Merge branch 'master' of github.com:open-cloud/xos
diff --git a/Makefile b/Makefile
deleted file mode 100644
index 2b55526..0000000
--- a/Makefile
+++ /dev/null
@@ -1,55 +0,0 @@
-NAME = xos
-SPECFILE = $(NAME).spec
-VERSION = $(shell rpm -q --specfile $(SPECFILE) --qf '%{VERSION}\n' | head -n 1)
-RELEASE = $(shell rpm -q --specfile $(SPECFILE) --qf '%{RELEASE}\n' | head -n 1)
-
-UPLOAD_SLICE=princeton_planetstack
-
-PWD = $(shell pwd)
-
-dist rpm: $(NAME)-$(VERSION)-$(RELEASE).rpm
-
-$(NAME)-$(VERSION).tar.gz:
-	mkdir -p $(NAME)-$(VERSION)
-	rsync -av --exclude=.svn --exclude=.git --exclude=*.tar.gz --exclude=*.rpm --exclude=__history --exclude=$(NAME)-$(VERSION)/ ./ $(NAME)-$(VERSION)
-	tar -czf $@ $(NAME)-$(VERSION)
-	rm -fr $(NAME)-$(VERSION)
-
-$(NAME)-$(VERSION)-$(RELEASE).rpm: $(NAME)-$(VERSION).tar.gz
-	mkdir -p build
-	rpmbuild -bb --define '_sourcedir $(PWD)' \
-                --define '_builddir $(PWD)/build' \
-                --define '_srcrpmdir $(PWD)' \
-                --define '_rpmdir $(PWD)' \
-                --define '_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm' \
-                $(SPECFILE)
-
-srpm: $(NAME)-$(VERSION)-$(RELEASE).src.rpm
-$(NAME)-$(VERSION)-$(RELEASE).src.rpm: $(NAME)-$(VERSION).tar.gz
-	rpmbuild -bs --define "_sourcedir $$(pwd)" \
-                --define "_srcrpmdir $$(pwd)" \
-                $(SPECFILE)
-
-clean:
-	rm -f $(NAME)-$(VERSION).tar.gz $(NAME)-$(VERSION)-$(RELEASE).src.rpm $(NAME)-$(VERSION)-$(RELEASE).noarch.rpm
-	rm -rf build
-
-upload: $(NAME)-$(VERSION)-$(RELEASE).rpm
-ifndef UPLOAD_HOST
-	$(error please specify UPLOAD_HOST=<hostname> on make command line)
-endif
-	scp $(NAME)-$(VERSION)-$(RELEASE).x86_64.rpm $(UPLOAD_SLICE)@$(UPLOAD_HOST):/root/
-
-install: upload
-	ssh $(UPLOAD_SLICE)@$(UPLOAD_HOST) yum -y install gcc graphviz-devel graphviz-python postgresql postgresql-server python-pip python-psycopg2 libxslt-devel python-httplib2 GeoIP
-	ssh $(UPLOAD_SLICE)@$(UPLOAD_HOST) rpm --install --upgrade --replacefiles --replacepkgs /root/$(NAME)-$(VERSION)-$(RELEASE).x86_64.rpm   
-	scp /opt/planetstack/hpc_wizard/bigquery_credentials.dat /opt/planetstack/hpc_wizard/client_secrets.json $(UPLOAD_SLICE)@$(UPLOAD_HOST):/opt/planetstack/hpc_wizard/ 
-
-install-keys:
-ifndef UPLOAD_HOST
-	$(error please specify UPLOAD_HOST=<hostname> on make command line)
-endif
-	scp /opt/planetstack/hpc_wizard/bigquery_credentials.dat /opt/planetstack/hpc_wizard/client_secrets.json $(UPLOAD_SLICE)@$(UPLOAD_HOST):/opt/planetstack/hpc_wizard/
-
-.PHONY: dist
-
diff --git a/ansible-hosts b/ansible-hosts
deleted file mode 100644
index 0dd74f1..0000000
--- a/ansible-hosts
+++ /dev/null
@@ -1,2 +0,0 @@
-[localhost]
-127.0.0.1
diff --git a/containers/xos/Makefile b/containers/xos/Makefile
index 4e0d40a..8efcf60 100644
--- a/containers/xos/Makefile
+++ b/containers/xos/Makefile
@@ -6,7 +6,7 @@
 NO_DOCKER_CACHE?=false
 
 .PHONY: build
-build: ; make -C ../../xos/configurations/common -f Makefile.cloudlab; docker build --no-cache=${NO_DOCKER_CACHE} --rm -t ${IMAGE_NAME} .
+build: ; docker build --no-cache=${NO_DOCKER_CACHE} --rm -t ${IMAGE_NAME} .
 
 .PHONY: custom
 custom: ; cat Dockerfile.templ | sed -e "s|XOS_GIT_REPO|${XOS_GIT_REPO}|g" -e "s|XOS_GIT_BRANCH|${XOS_GIT_BRANCH}|g" | docker build --no-cache=${NO_DOCKER_CACHE} --rm -t ${IMAGE_NAME} -
diff --git a/docker-cp.sh b/docker-cp.sh
deleted file mode 100755
index bec511d..0000000
--- a/docker-cp.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#! /bin/bash                                                                                                             
-# script for copying stuff into running Docker container
-# usage: docker-cp.sh <src> <dest>
-# example: docker-cp.sh foo /tmp/foo
-
-XOS=$( docker ps|grep "xos"|awk '{print $NF}' )
-FOLDER=`docker inspect -f   '{{.Id}}' $XOS`
-#cp $1 /var/lib/docker/aufs/mnt/$FOLDER/$2
-docker cp $1 $XOS:$2
diff --git a/external/keystone_user.py b/external/keystone_user.py
deleted file mode 100644
index 1245eec..0000000
--- a/external/keystone_user.py
+++ /dev/null
@@ -1,407 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Based on Jimmy Tang's implementation
-
-DOCUMENTATION = '''
----
-module: keystone_user
-version_added: "1.2"
-short_description: Manage OpenStack Identity (keystone) users, tenants and roles
-description:
-   - Manage users,tenants, roles from OpenStack.
-options:
-   login_user:
-     description:
-        - login username to authenticate to keystone
-     required: false
-     default: admin
-   login_password:
-     description:
-        - Password of login user
-     required: false
-     default: 'yes'
-   login_tenant_name:
-     description:
-        - The tenant login_user belongs to
-     required: false
-     default: None
-     version_added: "1.3"
-   token:
-     description:
-        - The token to be uses in case the password is not specified
-     required: false
-     default: None
-   endpoint:
-     description:
-        - The keystone url for authentication
-     required: false
-     default: 'http://127.0.0.1:35357/v2.0/'
-   user:
-     description:
-        - The name of the user that has to added/removed from OpenStack
-     required: false
-     default: None
-   password:
-     description:
-        - The password to be assigned to the user
-     required: false
-     default: None
-   tenant:
-     description:
-        - The tenant name that has be added/removed
-     required: false
-     default: None
-   tenant_description:
-     description:
-        - A description for the tenant
-     required: false
-     default: None
-   email:
-     description:
-        - An email address for the user
-     required: false
-     default: None
-   role:
-     description:
-        - The name of the role to be assigned or created
-     required: false
-     default: None
-   state:
-     description:
-        - Indicate desired state of the resource
-     choices: ['present', 'absent']
-     default: present
-requirements: [ python-keystoneclient ]
-author: Lorin Hochstein
-'''
-
-EXAMPLES = '''
-# Create a tenant
-- keystone_user: tenant=demo tenant_description="Default Tenant"
-
-# Create a user
-- keystone_user: user=john tenant=demo password=secrete
-
-# Apply the admin role to the john user in the demo tenant
-- keystone_user: role=admin user=john tenant=demo
-'''
-
-try:
-    from keystoneclient.v2_0 import client
-except ImportError:
-    keystoneclient_found = False
-else:
-    keystoneclient_found = True
-
-
-def authenticate(endpoint, token, login_user, login_password, login_tenant_name):
-    """Return a keystone client object"""
-
-    if token:
-        return client.Client(endpoint=endpoint, token=token)
-    else:
-        return client.Client(auth_url=endpoint, username=login_user,
-                             password=login_password, tenant_name=login_tenant_name)
-
-
-def tenant_exists(keystone, tenant):
-    """ Return True if tenant already exists"""
-    return tenant in [x.name for x in keystone.tenants.list()]
-
-
-def user_exists(keystone, user):
-    """" Return True if user already exists"""
-    return user in [x.name for x in keystone.users.list()]
-
-
-def get_tenant(keystone, name):
-    """ Retrieve a tenant by name"""
-    tenants = [x for x in keystone.tenants.list() if x.name == name]
-    count = len(tenants)
-    if count == 0:
-        raise KeyError("No keystone tenants with name %s" % name)
-    elif count > 1:
-        raise ValueError("%d tenants with name %s" % (count, name))
-    else:
-        return tenants[0]
-
-
-def get_user(keystone, name):
-    """ Retrieve a user by name"""
-    users = [x for x in keystone.users.list() if x.name == name]
-    count = len(users)
-    if count == 0:
-        raise KeyError("No keystone users with name %s" % name)
-    elif count > 1:
-        raise ValueError("%d users with name %s" % (count, name))
-    else:
-        return users[0]
-
-
-def get_role(keystone, name):
-    """ Retrieve a role by name"""
-    roles = [x for x in keystone.roles.list() if x.name == name]
-    count = len(roles)
-    if count == 0:
-        raise KeyError("No keystone roles with name %s" % name)
-    elif count > 1:
-        raise ValueError("%d roles with name %s" % (count, name))
-    else:
-        return roles[0]
-
-
-def get_tenant_id(keystone, name):
-    return get_tenant(keystone, name).id
-
-
-def get_user_id(keystone, name):
-    return get_user(keystone, name).id
-
-
-def ensure_tenant_exists(keystone, tenant_name, tenant_description,
-                         check_mode):
-    """ Ensure that a tenant exists.
-
-        Return (True, id) if a new tenant was created, (False, None) if it
-        already existed.
-    """
-
-    # Check if tenant already exists
-    try:
-        tenant = get_tenant(keystone, tenant_name)
-    except KeyError:
-        # Tenant doesn't exist yet
-        pass
-    else:
-        if tenant.description == tenant_description:
-            return (False, tenant.id)
-        else:
-            # We need to update the tenant description
-            if check_mode:
-                return (True, tenant.id)
-            else:
-                tenant.update(description=tenant_description)
-                return (True, tenant.id)
-
-    # We now know we will have to create a new tenant
-    if check_mode:
-        return (True, None)
-
-    ks_tenant = keystone.tenants.create(tenant_name=tenant_name,
-                                        description=tenant_description,
-                                        enabled=True)
-    return (True, ks_tenant.id)
-    
-
-def ensure_tenant_absent(keystone, tenant, check_mode):
-    """ Ensure that a tenant does not exist
-
-         Return True if the tenant was removed, False if it didn't exist
-         in the first place
-    """
-    if not tenant_exists(keystone, tenant):
-        return False
-
-    # We now know we will have to delete the tenant
-    if check_mode:
-        return True
-
-def ensure_user_exists_and_is_current(keystone, endpoint, user_name, password, email, tenant_name,
-                       check_mode):
-    """ Check if user exists and has the same email and password
-
-        Return (True, id) if a new user was created or one was updated, (False, id) if the user is 
-        up to date
-    """
-    
-    # Check if tenant already exists
-    try:
-        user = get_user(keystone, user_name)
-    except KeyError:
-        # Tenant doesn't exist yet
-        user = None
-        pass
-    else:
-        # User does exist, check if it's current
-        try:
-            authenticate(endpoint, None, user_name, password, tenant_name)
-        except: 
-            pass
-        else:
-            # It's current, we're done
-            return (False, user.id)
-
-    # We now know we will have to create a new user
-    if check_mode:
-        return (True, None)
-
-    tenant = get_tenant(keystone, tenant_name)
-
-    if (not user):
-        user = keystone.users.create(name=user_name, password=password,
-                                 email=email, tenant_id=tenant.id)
-    else:
-        user = keystone.users.update_password(user.id, password)
-        
-    return (True, user.id)
-
-
-def ensure_role_exists(keystone, user_name, tenant_name, role_name,
-                       check_mode):
-    """ Check if role exists
-
-        Return (True, id) if a new role was created or if the role was newly
-        assigned to the user for the tenant. (False, id) if the role already
-        exists and was already assigned to the user ofr the tenant.
-
-    """
-    # Check if the user has the role in the tenant
-    user = get_user(keystone, user_name)
-    tenant = get_tenant(keystone, tenant_name)
-    roles = [x for x in keystone.roles.roles_for_user(user, tenant)
-                     if x.name == role_name]
-    count = len(roles)
-
-    if count == 1:
-        # If the role is in there, we are done
-        role = roles[0]
-        return (False, role.id)
-    elif count > 1:
-        # Too many roles with the same name, throw an error
-        raise ValueError("%d roles with name %s" % (count, role_name))
-
-    # At this point, we know we will need to make changes
-    if check_mode:
-        return (True, None)
-
-    # Get the role if it exists
-    try:
-        role = get_role(keystone, role_name)
-    except KeyError:
-        # Role doesn't exist yet
-        role = keystone.roles.create(role_name)
-
-    # Associate the role with the user in the admin
-    keystone.roles.add_user_role(user, role, tenant)
-    return (True, role.id)
-
-
-def ensure_user_absent(keystone, user, check_mode):
-    raise NotImplementedError("Not yet implemented")
-
-
-def ensure_role_absent(keystone, uesr, tenant, role, check_mode):
-    raise NotImplementedError("Not yet implemented")
-
-
-def main():
-
-    argument_spec = openstack_argument_spec()
-    argument_spec.update(dict(
-            tenant_description=dict(required=False),
-            email=dict(required=False),
-            user=dict(required=False),
-            tenant=dict(required=False),
-            password=dict(required=False),
-            role=dict(required=False),
-            state=dict(default='present', choices=['present', 'absent']),
-            endpoint=dict(required=False,
-                          default="http://127.0.0.1:35357/v2.0"),
-            token=dict(required=False),
-            login_user=dict(required=False),
-            login_password=dict(required=False),
-            login_tenant_name=dict(required=False)
-    ))
-    # keystone operations themselves take an endpoint, not a keystone auth_url
-    del(argument_spec['auth_url'])
-    module = AnsibleModule(
-        argument_spec=argument_spec,
-        supports_check_mode=True,
-        mutually_exclusive=[['token', 'login_user'],
-                            ['token', 'login_password'],
-                            ['token', 'login_tenant_name']]
-    )
-
-    if not keystoneclient_found:
-        module.fail_json(msg="the python-keystoneclient module is required")
-
-    user = module.params['user']
-    password = module.params['password']
-    tenant = module.params['tenant']
-    tenant_description = module.params['tenant_description']
-    email = module.params['email']
-    role = module.params['role']
-    state = module.params['state']
-    endpoint = module.params['endpoint']
-    token = module.params['token']
-    login_user = module.params['login_user']
-    login_password = module.params['login_password']
-    login_tenant_name = module.params['login_tenant_name']
-
-    keystone = authenticate(endpoint, token, login_user, login_password, login_tenant_name)
-
-    check_mode = module.check_mode
-
-    try:
-        d = dispatch(keystone, user, password, tenant, tenant_description,
-                     email, role, state, endpoint, token, login_user,
-                     login_password, check_mode)
-    except Exception, e:
-        if check_mode:
-            # If we have a failure in check mode
-            module.exit_json(changed=True,
-                             msg="exception: %s" % e)
-        else:
-            module.fail_json(msg="exception: %s" % e)
-    else:
-        module.exit_json(**d)
-
-
-def dispatch(keystone, user=None, password=None, tenant=None,
-             tenant_description=None, email=None, role=None,
-             state="present", endpoint=None, token=None, login_user=None,
-             login_password=None, check_mode=False):
-    """ Dispatch to the appropriate method.
-
-        Returns a dict that will be passed to exit_json
-
-        tenant  user  role   state
-        ------  ----  ----  --------
-          X                  present     ensure_tenant_exists
-          X                  absent      ensure_tenant_absent
-          X      X           present     ensure_user_exists
-          X      X           absent      ensure_user_absent
-          X      X     X     present     ensure_role_exists
-          X      X     X     absent      ensure_role_absent
-
-
-        """
-    changed = False
-    id = None
-    if tenant and not user and not role and state == "present":
-        changed, id = ensure_tenant_exists(keystone, tenant,
-                                           tenant_description, check_mode)
-    elif tenant and not user and not role and state == "absent":
-        changed = ensure_tenant_absent(keystone, tenant, check_mode)
-    elif tenant and user and not role and state == "present":
-        changed, id = ensure_user_exists_and_is_current(keystone, endpoint, user, password,
-                                         email, tenant, check_mode)
-    elif tenant and user and not role and state == "absent":
-        changed = ensure_user_absent(keystone, user, check_mode)
-    elif tenant and user and role and state == "present":
-        changed, id = ensure_role_exists(keystone, user, tenant, role,
-                                         check_mode)
-    elif tenant and user and role and state == "absent":
-        changed = ensure_role_absent(keystone, user, tenant, role, check_mode)
-    else:
-        # Should never reach here
-        raise ValueError("Code should never reach here")
-
-    return dict(changed=changed, id=id)
-
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.openstack import *
-if __name__ == '__main__':
-    main()
diff --git a/install_opencloud b/install_opencloud
deleted file mode 100755
index ec0e0ab..0000000
--- a/install_opencloud
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/bin/sh
-yum -y install postgresql postgresql-server
-yum -y install python-psycopg2
-yum -y install graphviz
-yum -y install graphviz-devel
-yum -y install graphviz-python
-yum -y install libxslt-devel
-yum -y install python-pip
-yum -y install wget
-yum -y install tar
-yum -y install gcc
-yum -y install libxml2-dev
-yum -y install libxslt1-dev
-yum -y install python-devel
-
-
-pip-python install django==1.5
-pip-python install djangorestframework
-pip-python install markdown  # Markdown support for the browseable API.
-pip-python install pyyaml    # YAML content-type support.
-pip-python install django-filter  # Filtering support
-pip-python install lxml  # XML manipulation library
-pip-python install netaddr # IP Addr library
-pip-python install pytz
-pip-python install django-timezones
-pip-python install requests
-pip-python install django-crispy-forms
-pip-python install django-geoposition
-pip-python install django-extensions
-pip-python install django-suit
-pip-python install django-evolution
-pip-python install docutils
-pip-python install cython
-pip-python install bitfield
-pip-python install pygments
-
-easy_install django_evolution
-
-wget http://phantomjs.googlecode.com/files/phantomjs-1.7.0-linux-x86_64.tar.bz2
-
-mv ./phantomjs-1.7.0-linux-x86_64.tar.bz2 /usr/local/share
-
-cd /usr/local/share
-
-tar xvf phantomjs-1.7.0-linux-x86_64.tar.bz2
-
-ln -s /usr/local/share/phantomjs-1.7.0-linux-x86_64 /usr/local/share/phantomjs
-
-ln -s /usr/local/share/phantomjs/bin/phantomjs /usr/local/bin/phantomjs
-
-phantomjs --version
-
diff --git a/observer-initscript b/observer-initscript
deleted file mode 100755
index d1466b2..0000000
--- a/observer-initscript
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/bin/bash
-#
-# observer       Starts and stops Observer daemon
-#
-
-# Source function library.
-. /etc/init.d/functions
-
-[ -f /etc/sysconfig/xosobserver ] && . /etc/sysconfig/xosobserver
-
-
-xosobserver=${NODEMANAGER-"python /opt/xos/xos-observer.py -d"}
-prog="OpenCloud Observer"
-pidfile=${PIDFILE-/var/run/xosobserver.pid}
-
-RETVAL=0
-
-function start() {
-    action $"Starting $prog: " daemon --pidfile=$pidfile --check=xosobserver $xosobserver "$@"
-}
-
-function stop() {
-    action $"Stopping $prog: " killproc -p $pidfile xosobserver
-}
-
-case "$1" in
-    start)
-	start $options
-	;;
-    stop)
-	stop
-	;;
-    status)
-	status -p $pidfile xosobserver
-	RETVAL=$?
-	;;
-    restart|reload)
-	shift
-	stop
-	start $options "$@"
-	;;
-    condrestart)
-	shift
-	[ -f ${pidfile} ] && { stop; start $options "$@"; }
-	;;
-    restartverbose)
-	shift
-	stop
-	$xosobserver $verboseoptions "$@"
-	;;
-    restartdebug)
-	shift
-	stop
-	echo "Restarting with $debugoptions $@ .."
-	$xosobserver $debugoptions "$@"
-	;;
-    *)
-	echo $"Usage: $0 {start|stop|status|restart|condrestart|restartdebug [-d]}"
-	exit 1
-	;;
-esac
-
-exit $RETVAL
diff --git a/observer.conf b/observer.conf
deleted file mode 100644
index 92545eb..0000000
--- a/observer.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-[program:observer]
-command=python /opt/xos/xos-observer.py
diff --git a/setup.py b/setup.py
deleted file mode 100644
index 1162279..0000000
--- a/setup.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import os
-import shutil 
-from distutils.core import setup
-
-def copytree(src, dst, symlinks=False, ignore=None):
-    if not os.path.exists(dst):
-        os.makedirs(dst)
-    for item in os.listdir(src):
-        s = os.path.join(src, item)
-        d = os.path.join(dst, item)
-        if os.path.isdir(s):
-            copytree(s, d, symlinks, ignore)
-        else:
-            if not os.path.exists(d) or os.stat(src).st_mtime - os.stat(dst).st_mtime > 1:
-                shutil.copy2(s, d)
-
-setup(name='planetstack',
-      version='0.1',
-      description='PlanetStack',
-      scripts=['xos/xos-observer.py'],
-      data_files=[
-        ('/lib/systemd/system/', ['xos/redhat/xos-observer.service']),
-        ])
-
-copytree('xos/', '/opt/xos')
diff --git a/synchronizers b/synchronizers
deleted file mode 120000
index d587898..0000000
--- a/synchronizers
+++ /dev/null
@@ -1 +0,0 @@
-observers
\ No newline at end of file
diff --git a/upgrade-container.sh b/upgrade-container.sh
deleted file mode 100755
index 09c639d..0000000
--- a/upgrade-container.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-TMPDIR="/tmp/initdata"
-XOSDIR="/home/ubuntu/xos"
-
-mkdir -p $TMPDIR
-rm -f $TMPDIR/*.json
-
-XOS=$( docker ps|grep "xos:latest"|awk '{print $NF}' )
-docker exec $XOS /opt/xos/scripts/opencloud dumpdata
-docker cp $XOS:/opt/xos_backups/dumpdata-latest.json $TMPDIR
-docker cp $XOS:/opt/xos/xos_config $TMPDIR
-cp $TMPDIR/*.json $XOSDIR/xos/core/fixtures/initial_data.json
-cp $TMPDIR/xos_config $XOSDIR/xos/
-
-git pull
-
-if [[ $? != 0 ]]; then
-    echo "git pull" failed
-    exit
-fi
-
-docker build -t xos .
-
-docker stop $XOS
-docker run -p 8000:8000 xos 
diff --git a/wiki/figures/modeling-services-fig1.png b/wiki/figures/modeling-services-fig1.png
deleted file mode 100644
index 6df4b43..0000000
--- a/wiki/figures/modeling-services-fig1.png
+++ /dev/null
Binary files differ
diff --git a/wiki/figures/modeling-services-fig2.png b/wiki/figures/modeling-services-fig2.png
deleted file mode 100644
index a854b19..0000000
--- a/wiki/figures/modeling-services-fig2.png
+++ /dev/null
Binary files differ
diff --git a/xos-apps/README.md b/xos-apps/README.md
new file mode 100644
index 0000000..66d71eb
--- /dev/null
+++ b/xos-apps/README.md
@@ -0,0 +1,10 @@
+## Applications on XOS
+
+This directory may prove to be unnecessary, but for now we
+are using it for applications that run on top of the XOS API.
+Initially, this includes only an auto-scaling app that uses
+monitoring data to decide when to scale a service up/down.
+
+This is treated as an application rather than yet another
+service because it offers only a GUI front-end; it is not
+modelled as a service that other services can build upon.
diff --git a/xos.deps b/xos.deps
deleted file mode 100644
index 6eae1fc..0000000
--- a/xos.deps
+++ /dev/null
@@ -1,47 +0,0 @@
-{
-    "Node": [
-        "Site", 
-        "Deployment"
-    ], 
-    "Slice": [
-        "Site"
-    ], 
-    "ReservedResource": [
-        "Sliver"
-    ], 
-    "SliceMembership": [
-        "User", 
-        "Slice", 
-        "Role"
-    ], 
-    "NetworkSlice": [
-        "Network", 
-        "Slice"
-    ], 
-    "Tag": [
-        "Project"
-    ], 
-    "User": [
-        "Site"
-    ], 
-    "SliceTag": [
-        "Slice"
-    ], 
-    "Reservation": [
-        "Slice"
-    ], 
-    "NetworkSliver": [
-        "Network", 
-        "Sliver"
-    ], 
-    "SitePrivilege": [
-        "User", 
-        "Site", 
-        "Role"
-    ], 
-    "Sliver": [
-        "Image", 
-        "Slice", 
-        "Node"
-    ]
-}
diff --git a/xos.spec b/xos.spec
deleted file mode 100644
index c585e7e..0000000
--- a/xos.spec
+++ /dev/null
@@ -1,158 +0,0 @@
-Summary: OpenCloud core services
-Name: xos
-Version: 1.2.0
-Release: 5
-License: GPL+
-Group: Development/Tools
-Source0: %{_tmppath}/%{name}-%{version}.tar.gz
-BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}
-requires: postgresql
-requires: postgresql-server
-requires: python-psycopg2
-requires: graphviz
-requires: graphviz-devel
-requires: graphviz-python
-requires: libxslt-devel
-requires: python-pip
-requires: tar
-requires: gcc
-requires: python-httplib2
-requires: GeoIP
-requires: wget
-
-%description
-%{summary}
-
-%prep
-%setup -q
-
-%build
-# Empty section.
-
-%pre
-pip-python install django==1.7
-pip-python install djangorestframework==2.4.4
-pip-python install markdown  # Markdown support for the browseable API.
-pip-python install pyyaml    # YAML content-type support.
-pip-python install django-filter  # Filtering support
-pip-python install lxml  # XML manipulation library
-pip-python install netaddr # IP Addr library
-pip-python install pytz
-pip-python install django-timezones
-pip-python install requests
-pip-python install django-crispy-forms
-pip-python install django-geoposition
-pip-python install django-extensions
-pip-python install django-suit
-pip-python install django-evolution
-pip-python install django-bitfield
-pip-python install django-ipware
-pip-python install django-encrypted-fields
-pip-python install python-keyczar
-pip-python install python-keystoneclient
-pip-python install python-novaclient
-pip-python install python-neutronclient 
-pip-python install python-glanceclient
-pip-python install python-ceilometerclient
-pip-python install django_rest_swagger
-
-
-easy_install django_evolution
-easy_install python_gflags
-easy_install google_api_python_client
-
-if [ ! -f /usr/lib/python2.7/site-packages/suit/static/suit/js/jquery-1.9.1.min.js ]; then
-    wget -P /usr/lib/python2.7/site-packages/suit/static/suit/js http://code.jquery.com/jquery-1.9.1.min.js
-fi
-
-if [ ! -f /usr/share/GeoIP/GeoLiteCity.dat ]; then
-   rm -f /usr/share/GeoIP/GeoLiteCity.*
-   wget -P /usr/share/GeoIP http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz
-   gzip -d /usr/share/GeoIP/GeoLiteCity*.gz
-fi
-
-if [ "$1" == 2 ] ; then
-    if [[ -e /opt/xos/scripts/opencloud ]]; then
-        echo "UPGRADE - saving current state"
-        /opt/xos/scripts/opencloud dumpdata
-    fi
-fi
-
-%install
-rm -rf %{buildroot}
-mkdir -p  %{buildroot}
-install -d %{buildroot}/opt/xos
-install -d %{buildroot}/etc/init.d
-
-# in builddir
-
-rm -rf %{buildroot}/opt/xos
-# don't copy symbolic links (they are handled in %post)
-rsync -rptgoD ./xos %{buildroot}/opt/.  
-cp observer-initscript %{buildroot}/etc/init.d/xosobserver
-
-find %{buildroot}/opt/xos -type f -print | sed "s@^$RPM_BUILD_ROOT@@g" > %{_tmppath}/tmp-filelist
-echo /etc/init.d/xosobserver >> %{_tmppath}/tmp-filelist
-
-# remove config files from the file list (see %config below)
-cat > %{_tmppath}/config-files << "EOF"
-/opt/xos/xos_config
-/opt/xos/deployment_auth.py
-EOF
-
-sort %{_tmppath}/tmp-filelist > %{_tmppath}/tmp-filelist.sorted
-sort %{_tmppath}/config-files > %{_tmppath}/config-files.sorted
-comm -13 %{_tmppath}/config-files.sorted %{_tmppath}/tmp-filelist.sorted > %{_tmppath}/tmp-filelist
-
-cp %{_tmppath}/tmp-filelist /tmp/tmp-filelist
-
-
-%clean
-rm -rf %{buildroot}
-
-%files -f %{_tmppath}/tmp-filelist
-%defattr(-,root,root,-)
-%config /opt/xos/xos_config
-%config /opt/xos/deployment_auth.py
-%config /opt/xos/model-deps
-
-%post
-ln -s openstack_observer /opt/xos/observer
-#ln -s config-opencloud.py /opt/xos/syndicate_observer/syndicatelib_config/config.py
-
-if [ ! -e /opt/xos/public_keys ]; then
-    cd /opt/xos
-    scripts/opencloud genkeys
-fi
-
-if [ "$1" == 1 ] ; then
-    echo "NEW INSTALL - initializing database"
-    /opt/xos/scripts/opencloud initdb
-else
-    # scripts/opencloud will choose evolve or migrate depending on django version
-    echo "UPGRADE - doing evolution/migration"
-    /opt/xos/scripts/opencloud evolvedb
-fi
-
-# Clone ansible with latest openstack modules
-git clone --recursive git://github.com/ansible/ansible.git /opt/ansible
-mkdir -p /etc/ansible
-echo > /etc/ansible/hosts << "EOF"
-[localhost]
-127.0.0.1
-EOF
-
-
-# start the server
-/opt/xos/scripts/opencloud runserver
-
-%preun
-if [ "$1" = 0 ] ; then
-    echo "UNINSTALL - destroying xos"
-    rm -rf /opt/xos
-fi
-
-%changelog
-* Sat Feb 22 2014  Siobhan Tully  1.0.0
-- First Build
-
diff --git a/xos/README.md b/xos/README.md
new file mode 100644
index 0000000..d761d59
--- /dev/null
+++ b/xos/README.md
@@ -0,0 +1,21 @@
+## XOS Source Tree
+
+This is the main directory for XOS. Sub-directories include:
+
+* admin_customize, templates -- related to Django GUI
+* configurations -- collection of canned configurations
+* core -- core model definitions
+* generators -- tools to generate auxiliary structures from data model
+* model_policies -- invariants on the data model
+* nginx, uwsgi -- related to web server that runs XOS
+* openstack -- client-side interaction with OpenStack (to be depreciated)
+* services -- model definitions for a set of services
+* synchronizers -- collection of synchronizers
+* test -- system-wide tests to be collected here
+* tosca -- tosca modeling layer on top of RESTful API
+* tools -- assorted tools and scripts
+* xos -- common source code for all Django applications
+* xos_configuration -- top-level XOS configuration parameters
+
+Of these, configuration, services, and synchronizers are most
+relevant to developers.
diff --git a/xos/configurations/cord/README-VTN.md b/xos/configurations/cord/README-VTN.md
index be31fad..d27d1d1 100644
--- a/xos/configurations/cord/README-VTN.md
+++ b/xos/configurations/cord/README-VTN.md
@@ -68,6 +68,8 @@
 also listening on 6640.
 * Adding use_vtn=True to the [networking] section in the XOS config file has two effects: 1) it sets the gateway in sync_controller_networks, and 2) it disables automatic creation of nat-net for new slices. This is because VTN will fail if there is no gateway on a network, and because we don't have nat-net under the VTN configuration.
 * When using of-vfctl to look at flow rules, if you get a protocol error, try "ovs-ofctl show -O OpenFlow13 br-int "
+* Note that the VTN Synchronizer isn't started automatically. It's only use for inter-Service connectivity, so no need to mess with it until intra-Slice connectivity is working first. 
+* Note that the VTN Synchronizer won't connect non-access networks. Any network templates you want VTN to connect must have Access set to "Direct" or "Indirect". 
 
 There is no management network yet, so no way to SSH into the slices. I've been setting up a VNC tunnel, like this:
 
@@ -81,3 +83,9 @@
     ssh -o "GatewayPorts yes"  -L 5901:192.168.0.7:5901 smbaker@cp-1.smbaker-xos3.xos-pg0.clemson.cloudlab.us
 
 Then open a VNC session to the local port on your local machine. You'll have a console on the Instance. The username is "Ubuntu" and the password can be obtained from your cloudlab experiment description
+
+Things that can be tested:
+
+* Create an Instance, it should have a Private network, and there should be a tap attached from the instance to br-int
+* Two Instances in the same Slice can talk to one another. They can be on the same machine or different machines.
+* Two Slices can talk to one another if the slices are associated with Services and those Services have a Tenancy relationship between them. Note that 1) The VTN Synchronizer must be running, 2) There must be a Private network with Access=[Direct|Indirect], and 3) The connectivity is unidirection, from subscriber service to provider service.
diff --git a/xos/importer/__init__.py b/xos/importer/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/xos/importer/__init__.py
+++ /dev/null
diff --git a/xos/importer/plclassic/__init__.py b/xos/importer/plclassic/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/xos/importer/plclassic/__init__.py
+++ /dev/null
diff --git a/xos/importer/plclassic/importer.py b/xos/importer/plclassic/importer.py
deleted file mode 100644
index de8628b..0000000
--- a/xos/importer/plclassic/importer.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import os
-#os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
-import sys
-from optparse import OptionParser
-from getpass import getpass
-import xmlrpclib
-from plclassic.site_importer import SiteImporter
-from plclassic.user_importer import UserImporter
-from plclassic.slice_importer import SliceImporter
-from plclassic.instance_importer import InstanceImporter
-
-
-class Call:
-    def __init__(self, callable, auth):
-        self.callable = callable
-        self.auth = auth
-
-    def __call__(self, *args, **kwds):
-        a = [self.auth] + list(args)
-        return self.callable(*a)
-
-class API():
-    def __init__(self, username, password, url):
-        self.auth = {'AuthMethod': 'password',
-                     'Username': username,
-                     'AuthString': password}
-        self.server = xmlrpclib.ServerProxy(url, allow_none=True)
-
-    def __getattr__(self, name):         
-        return Call(getattr(self.server, name), self.auth) 
-
-class Importer: 
-
-    def __init__(self, username, password, url):
-        api = API(username, password, url)
-        self.sites = SiteImporter(api)
-        self.slices = SliceImporter(api)
-        self.users = UserImporter(api)
-        self.instances = InstanceImporter(api)
-
-    def run(self):
-        self.sites.run()
-        self.users.run()
-        self.slices.run(remote_sites=self.sites.remote_sites, 
-                        local_sites=self.sites.local_sites)
-        self.instances.run()           
-
-
-
-if __name__ == '__main__':
-    parser = OptionParser()
-        
-    parser.add_option("-u", "--username", dest="username",
-                        help="PLC username with which to authenticate")
-    parser.add_option("", "--url", dest="url",
-                        help="PLC url to contact")
-
-    (config, args) = parser.parse_args()
-    if len(sys.argv) == 1:
-        parser.print_help()
-        sys.exit(1)
-
-    password = None
-    try:
-        password = getpass()
-    except (EOFError, KeyboardInterrupt):
-        print
-        sys.exit(0)
-
-    Importer(config.username, password, config.url).run()
diff --git a/xos/importer/plclassic/instance_importer.py b/xos/importer/plclassic/instance_importer.py
deleted file mode 100644
index 0858572..0000000
--- a/xos/importer/plclassic/instance_importer.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from PLC.Nodes import Nodes
-
-class InstanceImporter:
-
-    def __init__(self, api):
-        self.api = api
-
-    def run(self):
-        return
diff --git a/xos/importer/plclassic/role_importer.py b/xos/importer/plclassic/role_importer.py
deleted file mode 100644
index 107587a..0000000
--- a/xos/importer/plclassic/role_importer.py
+++ /dev/null
@@ -1,9 +0,0 @@
-class RoleImporter:
-
-    def __init__(self, api):
-        self.api = api
-
-    def run(self):
-
-         return 
-
diff --git a/xos/importer/plclassic/site_importer.py b/xos/importer/plclassic/site_importer.py
deleted file mode 100644
index 2ee8157..0000000
--- a/xos/importer/plclassic/site_importer.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from core.models import Site
-
-class SiteImporter:
-
-    def __init__(self, api):
-        self.api = api
-        self.remote_sites = {}
-        self.local_sites = {}
-
-    def run(self):
-        db_sites = Site.objects.all()
-        for db_site in db_sites:
-            self.local_sites[db_site.login_base] = db_site
-        print "%s local sites" % len(db_sites)
-
-        sites = self.api.GetSites({'peer_id': None})
-        print "%s remote sites" % len(sites)
-        count = 0
-        for site in sites:
-            self.remote_sites[site['site_id']] = site 
-            if site['login_base'] not in self.local_sites:
-                new_site = Site(name=site['name'],
-                                login_base=site['login_base'],
-                                site_url=site['url'],
-                                enabled=site['enabled'],
-                                longitude=site['longitude'],
-                                latitude=site['latitude'],
-                                is_public=site['is_public'],
-                                abbreviated_name=site['abbreviated_name'])
-                new_site.save()
-                count += 1
-                self.local_sites[new_site.login_base] = new_site
-        print "imported %s sites" % count
diff --git a/xos/importer/plclassic/slice_importer.py b/xos/importer/plclassic/slice_importer.py
deleted file mode 100644
index b2dd84f..0000000
--- a/xos/importer/plclassic/slice_importer.py
+++ /dev/null
@@ -1,46 +0,0 @@
-from core.models import Slice
-
-class SliceImporter:
-
-    def __init__(self, api):
-        self.api = api
-        self.remote_slices = {}
-        self.local_slices = {}
-
-    def run(self, remote_sites={}, local_sites={}):
-        if not remote_sites:
-            sites = self.api.GetSites({'peer_id': None})
-            for site in sites:
-                remote_sites[site['site_id']] = site
-        
-
-        if not local_sites:
-            from core.models import Site
-            sites = Site.objects.all()
-            for site in sites:
-                local_sites[site.login_base] = site            
-
-        db_slices = Slice.objects.all()
-        for db_slice in db_slices:
-            self.local_slices[db_slice.name] = db_slice
-        print "%s local slices" % len(db_slices)
-
-        slices = self.api.GetSlices({'peer_id': None})
-        print "%s remote slices" % len(slices)
-        count = 0 
-        for slice in slices:
-            self.remote_slices[slice['slice_id']] = slice
-            if slice['name'] not in self.local_slices:
-                site = local_sites[remote_sites[slice['site_id']]['login_base']]
-                new_slice = Slice(name=slice['name'],
-                                   omf_friendly = False,
-                                   description = slice['description'],
-                                   slice_url = slice['url'],
-                                   site = site)
-                new_slice.save()
-                count += 1
-                self.local_slices[new_slice.name] = new_slice
-        print "Imported %s slices" % count
-
-          
-
diff --git a/xos/importer/plclassic/user_importer.py b/xos/importer/plclassic/user_importer.py
deleted file mode 100644
index 21d74b6..0000000
--- a/xos/importer/plclassic/user_importer.py
+++ /dev/null
@@ -1,19 +0,0 @@
-
-class UserImporter:
-
-    def __init__(self, api):
-        self.api = api
-        self.users = {}
-
-    def run(self):
-        users = self.api.GetPersons()
-
-    def save_site_privs(self, user):
-        # update site roles
-        pass
-
-    def save_slice_privs(self, user):
-        # update slice roles
-        pass
-          
-
diff --git a/xos/monitor/__init__.py b/xos/monitor/__init__.py
deleted file mode 100644
index 6debf82..0000000
--- a/xos/monitor/__init__.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from xos.settings import STATISTICS_DRIVER
-
-if (STATISTICS_DRIVER=="ceilometer"):
-    from observer import ceilometer
-    driver = ceilometer.CeilometerDriver()
-elif (not STATISTICS_DRIVER) or (STATISTICS_DRIVER.lower() == "none"):
-    # disabled
-    driver = None
-else:
-    driver = None
-    print "WARNING: Unknown statistics driver %s" % STATISTICS_DRIVER
diff --git a/xos/monitor/monitordriver.py b/xos/monitor/monitordriver.py
deleted file mode 100644
index 50e208c..0000000
--- a/xos/monitor/monitordriver.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Implement this interface
-# to serve as a driver for analytics
-
-class DashboardStatistics(dict):
-    def __init__(self):
-        self['stat_list'] = []
-        self['average'] = 0
-        self['sum'] = 0
-        self['unit'] = 'units'
-        self['stat_list']=[]
-        # stat_list is a list of dicts
-        # [ {'timestamp': datetime, 'value': value} ]
-
-
-class MonitorDriver:
-    def __init__(self):
-        pass
-
-    def get_meter(self, meter_name, obj, pk, credentials=None):
-        pass
diff --git a/xos/neutron_extension/1:2013.2.2-0ubuntu1~cloud0/nat.py b/xos/neutron_extension/1:2013.2.2-0ubuntu1~cloud0/nat.py
deleted file mode 100644
index 04e39f1..0000000
--- a/xos/neutron_extension/1:2013.2.2-0ubuntu1~cloud0/nat.py
+++ /dev/null
@@ -1,50 +0,0 @@
-from neutron.api.v2 import attributes
-
-FORWARD_PORTS = 'nat:forward_ports'
-
-EXTENDED_ATTRIBUTES_2_0 = {
-    'ports': {
-        FORWARD_PORTS: {'allow_post': True, 'allow_put': True,
-                       'default': attributes.ATTR_NOT_SPECIFIED,
-                       'is_visible': True},
-    }
-}
-
-
-class Nat(object):
-    """Extension class supporting OpenCloud NAT networking
-
-    This class is used by Quantum's extension framework to make
-    metadata about the OpenCloud Port extension available to
-    clients. No new resources are defined by this extension. Instead,
-    the existing Port resource's request and response messages are
-    extended with attributes in the OpenCloud namespace.
-    """
-
-    @classmethod
-    def get_name(cls):
-        return "OpenCloud NAT Networking Extension"
-
-    @classmethod
-    def get_alias(cls):
-        return "nat"
-
-    @classmethod
-    def get_description(cls):
-        return "Add TCP/UDP port forwarding through NAT to Quantum Port objects"
-
-    @classmethod
-    def get_namespace(cls):
-        # return "http://docs.openstack.org/ext/provider/api/v1.0"
-        # Nothing there right now
-        return "http://www.vicci.org/ext/opencloud/nat/api/v0.1"
-
-    @classmethod
-    def get_updated(cls):
-        return "2013-09-12T10:00:00-00:00"
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return EXTENDED_ATTRIBUTES_2_0
-        else:
-            return {}
diff --git a/xos/neutron_extension/1:2013.2.2-0ubuntu1~cloud0/ovs_db_v2.py b/xos/neutron_extension/1:2013.2.2-0ubuntu1~cloud0/ovs_db_v2.py
deleted file mode 100644
index 39cf315..0000000
--- a/xos/neutron_extension/1:2013.2.2-0ubuntu1~cloud0/ovs_db_v2.py
+++ /dev/null
@@ -1,425 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2011 Nicira Networks, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-# @author: Aaron Rosen, Nicira Networks, Inc.
-# @author: Bob Kukura, Red Hat, Inc.
-
-from sqlalchemy import func
-from sqlalchemy.orm import exc
-
-from neutron.common import exceptions as q_exc
-import neutron.db.api as db
-from neutron.db import models_v2
-from neutron.db import securitygroups_db as sg_db
-from neutron.extensions import securitygroup as ext_sg
-from neutron import manager
-from neutron.openstack.common.db import exception as db_exc
-from neutron.openstack.common import log as logging
-from neutron.plugins.openvswitch.common import constants
-from neutron.plugins.openvswitch import ovs_models_v2
-
-LOG = logging.getLogger(__name__)
-
-
-def initialize():
-    db.configure_db()
-
-
-def get_network_binding(session, network_id):
-    session = session or db.get_session()
-    try:
-        binding = (session.query(ovs_models_v2.NetworkBinding).
-                   filter_by(network_id=network_id).
-                   one())
-        return binding
-    except exc.NoResultFound:
-        return
-
-
-def add_network_binding(session, network_id, network_type,
-                        physical_network, segmentation_id):
-    with session.begin(subtransactions=True):
-        binding = ovs_models_v2.NetworkBinding(network_id, network_type,
-                                               physical_network,
-                                               segmentation_id)
-        session.add(binding)
-
-def get_port_forwarding(session, port_id):
-    session = session or db.get_session()
-    try:
-        forward = (session.query(ovs_models_v2.PortForwarding).
-                   filter_by(port_id=port_id).one())
-        return forward['forward_ports']
-    except exc.NoResultFound:
-        return
-
-def clear_port_forwarding(session, port_id):
-    with session.begin(subtransactions=True):
-        try:
-            # Get rid of old port bindings
-            forward = (session.query(ovs_models_v2.PortForwarding).
-                       filter_by(port_id=port_id).one())
-            if forward:
-                session.delete(forward)
-        except exc.NoResultFound:
-            pass
-
-def add_port_forwarding(session, port_id, forward_ports):
-    with session.begin(subtransactions=True):
-        forward = ovs_models_v2.PortForwarding(port_id, forward_ports)
-        session.add(forward)
-
-def sync_vlan_allocations(network_vlan_ranges):
-    """Synchronize vlan_allocations table with configured VLAN ranges."""
-
-    session = db.get_session()
-    with session.begin():
-        # get existing allocations for all physical networks
-        allocations = dict()
-        allocs = (session.query(ovs_models_v2.VlanAllocation).
-                  all())
-        for alloc in allocs:
-            if alloc.physical_network not in allocations:
-                allocations[alloc.physical_network] = set()
-            allocations[alloc.physical_network].add(alloc)
-
-        # process vlan ranges for each configured physical network
-        for physical_network, vlan_ranges in network_vlan_ranges.iteritems():
-            # determine current configured allocatable vlans for this
-            # physical network
-            vlan_ids = set()
-            for vlan_range in vlan_ranges:
-                vlan_ids |= set(xrange(vlan_range[0], vlan_range[1] + 1))
-
-            # remove from table unallocated vlans not currently allocatable
-            if physical_network in allocations:
-                for alloc in allocations[physical_network]:
-                    try:
-                        # see if vlan is allocatable
-                        vlan_ids.remove(alloc.vlan_id)
-                    except KeyError:
-                        # it's not allocatable, so check if its allocated
-                        if not alloc.allocated:
-                            # it's not, so remove it from table
-                            LOG.debug(_("Removing vlan %(vlan_id)s on "
-                                        "physical network "
-                                        "%(physical_network)s from pool"),
-                                      {'vlan_id': alloc.vlan_id,
-                                       'physical_network': physical_network})
-                            session.delete(alloc)
-                del allocations[physical_network]
-
-            # add missing allocatable vlans to table
-            for vlan_id in sorted(vlan_ids):
-                alloc = ovs_models_v2.VlanAllocation(physical_network, vlan_id)
-                session.add(alloc)
-
-        # remove from table unallocated vlans for any unconfigured physical
-        # networks
-        for allocs in allocations.itervalues():
-            for alloc in allocs:
-                if not alloc.allocated:
-                    LOG.debug(_("Removing vlan %(vlan_id)s on physical "
-                                "network %(physical_network)s from pool"),
-                              {'vlan_id': alloc.vlan_id,
-                               'physical_network': alloc.physical_network})
-                    session.delete(alloc)
-
-
-def get_vlan_allocation(physical_network, vlan_id):
-    session = db.get_session()
-    try:
-        alloc = (session.query(ovs_models_v2.VlanAllocation).
-                 filter_by(physical_network=physical_network,
-                           vlan_id=vlan_id).
-                 one())
-        return alloc
-    except exc.NoResultFound:
-        return
-
-
-def reserve_vlan(session):
-    with session.begin(subtransactions=True):
-        alloc = (session.query(ovs_models_v2.VlanAllocation).
-                 filter_by(allocated=False).
-                 with_lockmode('update').
-                 first())
-        if alloc:
-            LOG.debug(_("Reserving vlan %(vlan_id)s on physical network "
-                        "%(physical_network)s from pool"),
-                      {'vlan_id': alloc.vlan_id,
-                       'physical_network': alloc.physical_network})
-            alloc.allocated = True
-            return (alloc.physical_network, alloc.vlan_id)
-    raise q_exc.NoNetworkAvailable()
-
-
-def reserve_specific_vlan(session, physical_network, vlan_id):
-    with session.begin(subtransactions=True):
-        try:
-            alloc = (session.query(ovs_models_v2.VlanAllocation).
-                     filter_by(physical_network=physical_network,
-                               vlan_id=vlan_id).
-                     with_lockmode('update').
-                     one())
-            if alloc.allocated:
-                if vlan_id == constants.FLAT_VLAN_ID:
-                    raise q_exc.FlatNetworkInUse(
-                        physical_network=physical_network)
-                else:
-                    raise q_exc.VlanIdInUse(vlan_id=vlan_id,
-                                            physical_network=physical_network)
-            LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical "
-                        "network %(physical_network)s from pool"),
-                      {'vlan_id': vlan_id,
-                       'physical_network': physical_network})
-            alloc.allocated = True
-        except exc.NoResultFound:
-            LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical "
-                        "network %(physical_network)s outside pool"),
-                      {'vlan_id': vlan_id,
-                       'physical_network': physical_network})
-            alloc = ovs_models_v2.VlanAllocation(physical_network, vlan_id)
-            alloc.allocated = True
-            session.add(alloc)
-
-
-def release_vlan(session, physical_network, vlan_id, network_vlan_ranges):
-    with session.begin(subtransactions=True):
-        try:
-            alloc = (session.query(ovs_models_v2.VlanAllocation).
-                     filter_by(physical_network=physical_network,
-                               vlan_id=vlan_id).
-                     with_lockmode('update').
-                     one())
-            alloc.allocated = False
-            inside = False
-            for vlan_range in network_vlan_ranges.get(physical_network, []):
-                if vlan_id >= vlan_range[0] and vlan_id <= vlan_range[1]:
-                    inside = True
-                    break
-            if not inside:
-                session.delete(alloc)
-                LOG.debug(_("Releasing vlan %(vlan_id)s on physical network "
-                            "%(physical_network)s outside pool"),
-                          {'vlan_id': vlan_id,
-                           'physical_network': physical_network})
-            else:
-                LOG.debug(_("Releasing vlan %(vlan_id)s on physical network "
-                            "%(physical_network)s to pool"),
-                          {'vlan_id': vlan_id,
-                           'physical_network': physical_network})
-        except exc.NoResultFound:
-            LOG.warning(_("vlan_id %(vlan_id)s on physical network "
-                          "%(physical_network)s not found"),
-                        {'vlan_id': vlan_id,
-                         'physical_network': physical_network})
-
-
-def sync_tunnel_allocations(tunnel_id_ranges):
-    """Synchronize tunnel_allocations table with configured tunnel ranges."""
-
-    # determine current configured allocatable tunnels
-    tunnel_ids = set()
-    for tunnel_id_range in tunnel_id_ranges:
-        tun_min, tun_max = tunnel_id_range
-        if tun_max + 1 - tun_min > 1000000:
-            LOG.error(_("Skipping unreasonable tunnel ID range "
-                        "%(tun_min)s:%(tun_max)s"),
-                      {'tun_min': tun_min, 'tun_max': tun_max})
-        else:
-            tunnel_ids |= set(xrange(tun_min, tun_max + 1))
-
-    session = db.get_session()
-    with session.begin():
-        # remove from table unallocated tunnels not currently allocatable
-        allocs = (session.query(ovs_models_v2.TunnelAllocation).
-                  all())
-        for alloc in allocs:
-            try:
-                # see if tunnel is allocatable
-                tunnel_ids.remove(alloc.tunnel_id)
-            except KeyError:
-                # it's not allocatable, so check if its allocated
-                if not alloc.allocated:
-                    # it's not, so remove it from table
-                    LOG.debug(_("Removing tunnel %s from pool"),
-                              alloc.tunnel_id)
-                    session.delete(alloc)
-
-        # add missing allocatable tunnels to table
-        for tunnel_id in sorted(tunnel_ids):
-            alloc = ovs_models_v2.TunnelAllocation(tunnel_id)
-            session.add(alloc)
-
-
-def get_tunnel_allocation(tunnel_id):
-    session = db.get_session()
-    try:
-        alloc = (session.query(ovs_models_v2.TunnelAllocation).
-                 filter_by(tunnel_id=tunnel_id).
-                 with_lockmode('update').
-                 one())
-        return alloc
-    except exc.NoResultFound:
-        return
-
-
-def reserve_tunnel(session):
-    with session.begin(subtransactions=True):
-        alloc = (session.query(ovs_models_v2.TunnelAllocation).
-                 filter_by(allocated=False).
-                 with_lockmode('update').
-                 first())
-        if alloc:
-            LOG.debug(_("Reserving tunnel %s from pool"), alloc.tunnel_id)
-            alloc.allocated = True
-            return alloc.tunnel_id
-    raise q_exc.NoNetworkAvailable()
-
-
-def reserve_specific_tunnel(session, tunnel_id):
-    with session.begin(subtransactions=True):
-        try:
-            alloc = (session.query(ovs_models_v2.TunnelAllocation).
-                     filter_by(tunnel_id=tunnel_id).
-                     with_lockmode('update').
-                     one())
-            if alloc.allocated:
-                raise q_exc.TunnelIdInUse(tunnel_id=tunnel_id)
-            LOG.debug(_("Reserving specific tunnel %s from pool"), tunnel_id)
-            alloc.allocated = True
-        except exc.NoResultFound:
-            LOG.debug(_("Reserving specific tunnel %s outside pool"),
-                      tunnel_id)
-            alloc = ovs_models_v2.TunnelAllocation(tunnel_id)
-            alloc.allocated = True
-            session.add(alloc)
-
-
-def release_tunnel(session, tunnel_id, tunnel_id_ranges):
-    with session.begin(subtransactions=True):
-        try:
-            alloc = (session.query(ovs_models_v2.TunnelAllocation).
-                     filter_by(tunnel_id=tunnel_id).
-                     with_lockmode('update').
-                     one())
-            alloc.allocated = False
-            inside = False
-            for tunnel_id_range in tunnel_id_ranges:
-                if (tunnel_id >= tunnel_id_range[0]
-                    and tunnel_id <= tunnel_id_range[1]):
-                    inside = True
-                    break
-            if not inside:
-                session.delete(alloc)
-                LOG.debug(_("Releasing tunnel %s outside pool"), tunnel_id)
-            else:
-                LOG.debug(_("Releasing tunnel %s to pool"), tunnel_id)
-        except exc.NoResultFound:
-            LOG.warning(_("tunnel_id %s not found"), tunnel_id)
-
-
-def get_port(port_id):
-    session = db.get_session()
-    try:
-        port = session.query(models_v2.Port).filter_by(id=port_id).one()
-    except exc.NoResultFound:
-        port = None
-    return port
-
-
-def get_port_from_device(port_id):
-    """Get port from database."""
-    LOG.debug(_("get_port_with_securitygroups() called:port_id=%s"), port_id)
-    session = db.get_session()
-    sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
-
-    query = session.query(models_v2.Port,
-                          sg_db.SecurityGroupPortBinding.security_group_id)
-    query = query.outerjoin(sg_db.SecurityGroupPortBinding,
-                            models_v2.Port.id == sg_binding_port)
-    query = query.filter(models_v2.Port.id == port_id)
-    port_and_sgs = query.all()
-    if not port_and_sgs:
-        return None
-    port = port_and_sgs[0][0]
-    plugin = manager.NeutronManager.get_plugin()
-    port_dict = plugin._make_port_dict(port)
-    port_dict[ext_sg.SECURITYGROUPS] = [
-        sg_id for port_, sg_id in port_and_sgs if sg_id]
-    port_dict['security_group_rules'] = []
-    port_dict['security_group_source_groups'] = []
-    port_dict['fixed_ips'] = [ip['ip_address']
-                              for ip in port['fixed_ips']]
-    return port_dict
-
-
-def set_port_status(port_id, status):
-    session = db.get_session()
-    try:
-        port = session.query(models_v2.Port).filter_by(id=port_id).one()
-        port['status'] = status
-        session.merge(port)
-        session.flush()
-    except exc.NoResultFound:
-        raise q_exc.PortNotFound(port_id=port_id)
-
-
-def get_tunnel_endpoints():
-    session = db.get_session()
-
-    tunnels = session.query(ovs_models_v2.TunnelEndpoint)
-    return [{'id': tunnel.id,
-             'ip_address': tunnel.ip_address} for tunnel in tunnels]
-
-
-def _generate_tunnel_id(session):
-    max_tunnel_id = session.query(
-        func.max(ovs_models_v2.TunnelEndpoint.id)).scalar() or 0
-    return max_tunnel_id + 1
-
-
-def add_tunnel_endpoint(ip, max_retries=10):
-    """Return the endpoint of the given IP address or generate a new one."""
-
-    # NOTE(rpodolyaka): generation of a new tunnel endpoint must be put into a
-    #                   repeatedly executed transactional block to ensure it
-    #                   doesn't conflict with any other concurrently executed
-    #                   DB transactions in spite of the specified transactions
-    #                   isolation level value
-    for i in xrange(max_retries):
-        LOG.debug(_('Adding a tunnel endpoint for %s'), ip)
-        try:
-            session = db.get_session()
-            with session.begin(subtransactions=True):
-                tunnel = (session.query(ovs_models_v2.TunnelEndpoint).
-                          filter_by(ip_address=ip).with_lockmode('update').
-                          first())
-
-                if tunnel is None:
-                    tunnel_id = _generate_tunnel_id(session)
-                    tunnel = ovs_models_v2.TunnelEndpoint(ip, tunnel_id)
-                    session.add(tunnel)
-
-                return tunnel
-        except db_exc.DBDuplicateEntry:
-            # a concurrent transaction has been commited, try again
-            LOG.debug(_('Adding a tunnel endpoint failed due to a concurrent'
-                        'transaction had been commited (%s attempts left)'),
-                      max_retries - (i + 1))
-
-    raise q_exc.NeutronException(
-        message=_('Unable to generate a new tunnel id'))
diff --git a/xos/neutron_extension/1:2013.2.2-0ubuntu1~cloud0/ovs_models_v2.py b/xos/neutron_extension/1:2013.2.2-0ubuntu1~cloud0/ovs_models_v2.py
deleted file mode 100644
index 7e022f5..0000000
--- a/xos/neutron_extension/1:2013.2.2-0ubuntu1~cloud0/ovs_models_v2.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2011 Nicira Networks, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-# @author: Aaron Rosen, Nicira Networks, Inc.
-# @author: Bob Kukura, Red Hat, Inc.
-
-
-from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, PickleType
-from sqlalchemy.schema import UniqueConstraint
-
-from neutron.db.models_v2 import model_base
-
-
-class VlanAllocation(model_base.BASEV2):
-    """Represents allocation state of vlan_id on physical network."""
-    __tablename__ = 'ovs_vlan_allocations'
-
-    physical_network = Column(String(64), nullable=False, primary_key=True)
-    vlan_id = Column(Integer, nullable=False, primary_key=True,
-                     autoincrement=False)
-    allocated = Column(Boolean, nullable=False)
-
-    def __init__(self, physical_network, vlan_id):
-        self.physical_network = physical_network
-        self.vlan_id = vlan_id
-        self.allocated = False
-
-    def __repr__(self):
-        return "<VlanAllocation(%s,%d,%s)>" % (self.physical_network,
-                                               self.vlan_id, self.allocated)
-
-
-class TunnelAllocation(model_base.BASEV2):
-    """Represents allocation state of tunnel_id."""
-    __tablename__ = 'ovs_tunnel_allocations'
-
-    tunnel_id = Column(Integer, nullable=False, primary_key=True,
-                       autoincrement=False)
-    allocated = Column(Boolean, nullable=False)
-
-    def __init__(self, tunnel_id):
-        self.tunnel_id = tunnel_id
-        self.allocated = False
-
-    def __repr__(self):
-        return "<TunnelAllocation(%d,%s)>" % (self.tunnel_id, self.allocated)
-
-
-class NetworkBinding(model_base.BASEV2):
-    """Represents binding of virtual network to physical realization."""
-    __tablename__ = 'ovs_network_bindings'
-
-    network_id = Column(String(36),
-                        ForeignKey('networks.id', ondelete="CASCADE"),
-                        primary_key=True)
-    # 'gre', 'vlan', 'flat', 'local'
-    network_type = Column(String(32), nullable=False)
-    physical_network = Column(String(64))
-    segmentation_id = Column(Integer)  # tunnel_id or vlan_id
-
-    def __init__(self, network_id, network_type, physical_network,
-                 segmentation_id):
-        self.network_id = network_id
-        self.network_type = network_type
-        self.physical_network = physical_network
-        self.segmentation_id = segmentation_id
-
-    def __repr__(self):
-        return "<NetworkBinding(%s,%s,%s,%d)>" % (self.network_id,
-                                                  self.network_type,
-                                                  self.physical_network,
-                                                  self.segmentation_id)
-
-class PortForwarding(model_base.BASEV2):
-    """Ports to be forwarded through NAT """
-    __tablename__ = 'ovs_port_forwarding'
-
-    port_id = Column(String(36),
-                     ForeignKey('ports.id', ondelete="CASCADE"),
-                     primary_key=True)
-    forward_ports = Column(PickleType)
-
-    def __init__(self, port_id, forward_ports):
-        self.port_id = port_id
-        self.forward_ports = forward_ports
-
-    def __repr__(self):
-        return "<PortForwarding(%s,%s)>" % (self.port_id, self.forward_ports)
-
-class TunnelEndpoint(model_base.BASEV2):
-    """Represents tunnel endpoint in RPC mode."""
-    __tablename__ = 'ovs_tunnel_endpoints'
-    __table_args__ = (
-        UniqueConstraint('id', name='uniq_ovs_tunnel_endpoints0id'),
-    )
-
-    ip_address = Column(String(64), primary_key=True)
-    id = Column(Integer, nullable=False)
-
-    def __init__(self, ip_address, id):
-        self.ip_address = ip_address
-        self.id = id
-
-    def __repr__(self):
-        return "<TunnelEndpoint(%s,%s)>" % (self.ip_address, self.id)
-
diff --git a/xos/neutron_extension/1:2013.2.2-0ubuntu1~cloud0/ovs_neutron_plugin.py b/xos/neutron_extension/1:2013.2.2-0ubuntu1~cloud0/ovs_neutron_plugin.py
deleted file mode 100644
index abf0f80..0000000
--- a/xos/neutron_extension/1:2013.2.2-0ubuntu1~cloud0/ovs_neutron_plugin.py
+++ /dev/null
@@ -1,708 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2011 Nicira Networks, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-# @author: Somik Behera, Nicira Networks, Inc.
-# @author: Brad Hall, Nicira Networks, Inc.
-# @author: Dan Wendlandt, Nicira Networks, Inc.
-# @author: Dave Lapsley, Nicira Networks, Inc.
-# @author: Aaron Rosen, Nicira Networks, Inc.
-# @author: Bob Kukura, Red Hat, Inc.
-# @author: Seetharama Ayyadevara, Freescale Semiconductor, Inc.
-
-import sys
-
-from oslo.config import cfg
-
-from neutron.agent import securitygroups_rpc as sg_rpc
-from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
-from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
-from neutron.api.v2 import attributes
-from neutron.common import constants as q_const
-from neutron.common import exceptions as q_exc
-from neutron.common import rpc as q_rpc
-from neutron.common import topics
-from neutron.common import utils
-from neutron.db import agents_db
-from neutron.db import agentschedulers_db
-from neutron.db import allowedaddresspairs_db as addr_pair_db
-from neutron.db import db_base_plugin_v2
-from neutron.db import dhcp_rpc_base
-from neutron.db import external_net_db
-from neutron.db import extradhcpopt_db
-from neutron.db import extraroute_db
-from neutron.db import l3_agentschedulers_db
-from neutron.db import l3_gwmode_db
-from neutron.db import l3_rpc_base
-from neutron.db import portbindings_db
-from neutron.db import quota_db  # noqa
-from neutron.db import securitygroups_rpc_base as sg_db_rpc
-from neutron.extensions import allowedaddresspairs as addr_pair
-from neutron.extensions import extra_dhcp_opt as edo_ext
-from neutron.extensions import portbindings
-from neutron.extensions import providernet as provider
-from neutron.extensions import nat
-from neutron import manager
-from neutron.openstack.common import importutils
-from neutron.openstack.common import log as logging
-from neutron.openstack.common import rpc
-from neutron.openstack.common.rpc import proxy
-from neutron.plugins.common import constants as svc_constants
-from neutron.plugins.common import utils as plugin_utils
-from neutron.plugins.openvswitch.common import config  # noqa
-from neutron.plugins.openvswitch.common import constants
-from neutron.plugins.openvswitch import ovs_db_v2
-
-
-LOG = logging.getLogger(__name__)
-
-
-class OVSRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin,
-                      l3_rpc_base.L3RpcCallbackMixin,
-                      sg_db_rpc.SecurityGroupServerRpcCallbackMixin):
-
-    # history
-    #   1.0 Initial version
-    #   1.1 Support Security Group RPC
-
-    RPC_API_VERSION = '1.1'
-
-    def __init__(self, notifier, tunnel_type):
-        self.notifier = notifier
-        self.tunnel_type = tunnel_type
-
-    def create_rpc_dispatcher(self):
-        '''Get the rpc dispatcher for this manager.
-
-        If a manager would like to set an rpc API version, or support more than
-        one class as the target of rpc messages, override this method.
-        '''
-        return q_rpc.PluginRpcDispatcher([self,
-                                          agents_db.AgentExtRpcCallback()])
-
-    @classmethod
-    def get_port_from_device(cls, device):
-        port = ovs_db_v2.get_port_from_device(device)
-        if port:
-            port['device'] = device
-        return port
-
-    def get_device_details(self, rpc_context, **kwargs):
-        """Agent requests device details."""
-        agent_id = kwargs.get('agent_id')
-        device = kwargs.get('device')
-        LOG.debug(_("Device %(device)s details requested from %(agent_id)s"),
-                  {'device': device, 'agent_id': agent_id})
-        port = ovs_db_v2.get_port(device)
-        if port:
-            binding = ovs_db_v2.get_network_binding(None, port['network_id'])
-            entry = {'device': device,
-                     'network_id': port['network_id'],
-                     'port_id': port['id'],
-                     'admin_state_up': port['admin_state_up'],
-                     'network_type': binding.network_type,
-                     'segmentation_id': binding.segmentation_id,
-                     'physical_network': binding.physical_network}
-            new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up']
-                          else q_const.PORT_STATUS_DOWN)
-            if port['status'] != new_status:
-                ovs_db_v2.set_port_status(port['id'], new_status)
-        else:
-            entry = {'device': device}
-            LOG.debug(_("%s can not be found in database"), device)
-        return entry
-
-    def update_device_down(self, rpc_context, **kwargs):
-        """Device no longer exists on agent."""
-        agent_id = kwargs.get('agent_id')
-        device = kwargs.get('device')
-        host = kwargs.get('host')
-        port = ovs_db_v2.get_port(device)
-        LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"),
-                  {'device': device, 'agent_id': agent_id})
-        if port:
-            entry = {'device': device,
-                     'exists': True}
-            plugin = manager.NeutronManager.get_plugin()
-            if (host and
-                not plugin.get_port_host(rpc_context, port['id']) == host):
-                LOG.debug(_("Device %(device)s not bound to the"
-                            " agent host %(host)s"),
-                          {'device': device, 'host': host})
-            elif port['status'] != q_const.PORT_STATUS_DOWN:
-                # Set port status to DOWN
-                ovs_db_v2.set_port_status(port['id'],
-                                          q_const.PORT_STATUS_DOWN)
-        else:
-            entry = {'device': device,
-                     'exists': False}
-            LOG.debug(_("%s can not be found in database"), device)
-        return entry
-
-    def update_device_up(self, rpc_context, **kwargs):
-        """Device is up on agent."""
-        agent_id = kwargs.get('agent_id')
-        device = kwargs.get('device')
-        host = kwargs.get('host')
-        port = ovs_db_v2.get_port(device)
-        LOG.debug(_("Device %(device)s up on %(agent_id)s"),
-                  {'device': device, 'agent_id': agent_id})
-        plugin = manager.NeutronManager.get_plugin()
-        if port:
-            if (host and
-                not plugin.get_port_host(rpc_context, port['id']) == host):
-                LOG.debug(_("Device %(device)s not bound to the"
-                            " agent host %(host)s"),
-                          {'device': device, 'host': host})
-                return
-            elif port['status'] != q_const.PORT_STATUS_ACTIVE:
-                ovs_db_v2.set_port_status(port['id'],
-                                          q_const.PORT_STATUS_ACTIVE)
-        else:
-            LOG.debug(_("%s can not be found in database"), device)
-
-    def tunnel_sync(self, rpc_context, **kwargs):
-        """Update new tunnel.
-
-        Updates the datbase with the tunnel IP. All listening agents will also
-        be notified about the new tunnel IP.
-        """
-        tunnel_ip = kwargs.get('tunnel_ip')
-        # Update the database with the IP
-        tunnel = ovs_db_v2.add_tunnel_endpoint(tunnel_ip)
-        tunnels = ovs_db_v2.get_tunnel_endpoints()
-        entry = dict()
-        entry['tunnels'] = tunnels
-        # Notify all other listening agents
-        self.notifier.tunnel_update(rpc_context, tunnel.ip_address,
-                                    tunnel.id, self.tunnel_type)
-        # Return the list of tunnels IP's to the agent
-        return entry
-
-
-class AgentNotifierApi(proxy.RpcProxy,
-                       sg_rpc.SecurityGroupAgentRpcApiMixin):
-    '''Agent side of the openvswitch rpc API.
-
-    API version history:
-        1.0 - Initial version.
-
-    '''
-
-    BASE_RPC_API_VERSION = '1.0'
-
-    def __init__(self, topic):
-        super(AgentNotifierApi, self).__init__(
-            topic=topic, default_version=self.BASE_RPC_API_VERSION)
-        self.topic_network_delete = topics.get_topic_name(topic,
-                                                          topics.NETWORK,
-                                                          topics.DELETE)
-        self.topic_port_update = topics.get_topic_name(topic,
-                                                       topics.PORT,
-                                                       topics.UPDATE)
-        self.topic_tunnel_update = topics.get_topic_name(topic,
-                                                         constants.TUNNEL,
-                                                         topics.UPDATE)
-
-    def network_delete(self, context, network_id):
-        self.fanout_cast(context,
-                         self.make_msg('network_delete',
-                                       network_id=network_id),
-                         topic=self.topic_network_delete)
-
-    def port_update(self, context, port, network_type, segmentation_id,
-                    physical_network):
-        self.fanout_cast(context,
-                         self.make_msg('port_update',
-                                       port=port,
-                                       network_type=network_type,
-                                       segmentation_id=segmentation_id,
-                                       physical_network=physical_network),
-                         topic=self.topic_port_update)
-
-    def tunnel_update(self, context, tunnel_ip, tunnel_id, tunnel_type):
-        self.fanout_cast(context,
-                         self.make_msg('tunnel_update',
-                                       tunnel_ip=tunnel_ip,
-                                       tunnel_id=tunnel_id,
-                                       tunnel_type=tunnel_type),
-                         topic=self.topic_tunnel_update)
-
-
-class OVSNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
-                         external_net_db.External_net_db_mixin,
-                         extraroute_db.ExtraRoute_db_mixin,
-                         l3_gwmode_db.L3_NAT_db_mixin,
-                         sg_db_rpc.SecurityGroupServerRpcMixin,
-                         l3_agentschedulers_db.L3AgentSchedulerDbMixin,
-                         agentschedulers_db.DhcpAgentSchedulerDbMixin,
-                         portbindings_db.PortBindingMixin,
-                         extradhcpopt_db.ExtraDhcpOptMixin,
-                         addr_pair_db.AllowedAddressPairsMixin):
-
-    """Implement the Neutron abstractions using Open vSwitch.
-
-    Depending on whether tunneling is enabled, either a GRE, VXLAN tunnel or
-    a new VLAN is created for each network. An agent is relied upon to
-    perform the actual OVS configuration on each host.
-
-    The provider extension is also supported. As discussed in
-    https://bugs.launchpad.net/neutron/+bug/1023156, this class could
-    be simplified, and filtering on extended attributes could be
-    handled, by adding support for extended attributes to the
-    NeutronDbPluginV2 base class. When that occurs, this class should
-    be updated to take advantage of it.
-
-    The port binding extension enables an external application relay
-    information to and from the plugin.
-    """
-
-    # This attribute specifies whether the plugin supports or not
-    # bulk/pagination/sorting operations. Name mangling is used in
-    # order to ensure it is qualified by class
-    __native_bulk_support = True
-    __native_pagination_support = True
-    __native_sorting_support = True
-
-    _supported_extension_aliases = ["provider", "external-net", "router",
-                                    "ext-gw-mode", "binding", "quotas",
-                                    "security-group", "agent", "extraroute",
-                                    "l3_agent_scheduler",
-                                    "dhcp_agent_scheduler",
-                                    "extra_dhcp_opt",
-                                    "allowed-address-pairs",
-                                    "nat"]
-
-    @property
-    def supported_extension_aliases(self):
-        if not hasattr(self, '_aliases'):
-            aliases = self._supported_extension_aliases[:]
-            sg_rpc.disable_security_group_extension_if_noop_driver(aliases)
-            self._aliases = aliases
-        return self._aliases
-
-    def __init__(self, configfile=None):
-        self.base_binding_dict = {
-            portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS,
-            portbindings.CAPABILITIES: {
-                portbindings.CAP_PORT_FILTER:
-                'security-group' in self.supported_extension_aliases}}
-        ovs_db_v2.initialize()
-        self._parse_network_vlan_ranges()
-        ovs_db_v2.sync_vlan_allocations(self.network_vlan_ranges)
-        self.tenant_network_type = cfg.CONF.OVS.tenant_network_type
-        if self.tenant_network_type not in [constants.TYPE_LOCAL,
-                                            constants.TYPE_VLAN,
-                                            constants.TYPE_GRE,
-                                            constants.TYPE_VXLAN,
-                                            constants.TYPE_NONE]:
-            LOG.error(_("Invalid tenant_network_type: %s. "
-                      "Server terminated!"),
-                      self.tenant_network_type)
-            sys.exit(1)
-        self.enable_tunneling = cfg.CONF.OVS.enable_tunneling
-        self.tunnel_type = None
-        if self.enable_tunneling:
-            self.tunnel_type = cfg.CONF.OVS.tunnel_type or constants.TYPE_GRE
-        elif cfg.CONF.OVS.tunnel_type:
-            self.tunnel_type = cfg.CONF.OVS.tunnel_type
-            self.enable_tunneling = True
-        self.tunnel_id_ranges = []
-        if self.enable_tunneling:
-            self._parse_tunnel_id_ranges()
-            ovs_db_v2.sync_tunnel_allocations(self.tunnel_id_ranges)
-        elif self.tenant_network_type in constants.TUNNEL_NETWORK_TYPES:
-            LOG.error(_("Tunneling disabled but tenant_network_type is '%s'. "
-                      "Server terminated!"), self.tenant_network_type)
-            sys.exit(1)
-        self.setup_rpc()
-        self.network_scheduler = importutils.import_object(
-            cfg.CONF.network_scheduler_driver
-        )
-        self.router_scheduler = importutils.import_object(
-            cfg.CONF.router_scheduler_driver
-        )
-
-    def setup_rpc(self):
-        # RPC support
-        self.service_topics = {svc_constants.CORE: topics.PLUGIN,
-                               svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN}
-        self.conn = rpc.create_connection(new=True)
-        self.notifier = AgentNotifierApi(topics.AGENT)
-        self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = (
-            dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
-        )
-        self.agent_notifiers[q_const.AGENT_TYPE_L3] = (
-            l3_rpc_agent_api.L3AgentNotify
-        )
-        self.callbacks = OVSRpcCallbacks(self.notifier, self.tunnel_type)
-        self.dispatcher = self.callbacks.create_rpc_dispatcher()
-        for svc_topic in self.service_topics.values():
-            self.conn.create_consumer(svc_topic, self.dispatcher, fanout=False)
-        # Consume from all consumers in a thread
-        self.conn.consume_in_thread()
-
-    def _parse_network_vlan_ranges(self):
-        try:
-            self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges(
-                cfg.CONF.OVS.network_vlan_ranges)
-        except Exception as ex:
-            LOG.error(_("%s. Server terminated!"), ex)
-            sys.exit(1)
-        LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges)
-
-    def _parse_tunnel_id_ranges(self):
-        for entry in cfg.CONF.OVS.tunnel_id_ranges:
-            entry = entry.strip()
-            try:
-                tun_min, tun_max = entry.split(':')
-                self.tunnel_id_ranges.append((int(tun_min), int(tun_max)))
-            except ValueError as ex:
-                LOG.error(_("Invalid tunnel ID range: "
-                            "'%(range)s' - %(e)s. Server terminated!"),
-                          {'range': entry, 'e': ex})
-                sys.exit(1)
-        LOG.info(_("Tunnel ID ranges: %s"), self.tunnel_id_ranges)
-
-    def _extend_network_dict_provider(self, context, network):
-        binding = ovs_db_v2.get_network_binding(context.session,
-                                                network['id'])
-        network[provider.NETWORK_TYPE] = binding.network_type
-        if binding.network_type in constants.TUNNEL_NETWORK_TYPES:
-            network[provider.PHYSICAL_NETWORK] = None
-            network[provider.SEGMENTATION_ID] = binding.segmentation_id
-        elif binding.network_type == constants.TYPE_FLAT:
-            network[provider.PHYSICAL_NETWORK] = binding.physical_network
-            network[provider.SEGMENTATION_ID] = None
-        elif binding.network_type == constants.TYPE_VLAN:
-            network[provider.PHYSICAL_NETWORK] = binding.physical_network
-            network[provider.SEGMENTATION_ID] = binding.segmentation_id
-        elif binding.network_type == constants.TYPE_LOCAL:
-            network[provider.PHYSICAL_NETWORK] = None
-            network[provider.SEGMENTATION_ID] = None
-
-    def _process_provider_create(self, context, attrs):
-        network_type = attrs.get(provider.NETWORK_TYPE)
-        physical_network = attrs.get(provider.PHYSICAL_NETWORK)
-        segmentation_id = attrs.get(provider.SEGMENTATION_ID)
-
-        network_type_set = attributes.is_attr_set(network_type)
-        physical_network_set = attributes.is_attr_set(physical_network)
-        segmentation_id_set = attributes.is_attr_set(segmentation_id)
-
-        if not (network_type_set or physical_network_set or
-                segmentation_id_set):
-            return (None, None, None)
-
-        if not network_type_set:
-            msg = _("provider:network_type required")
-            raise q_exc.InvalidInput(error_message=msg)
-        elif network_type == constants.TYPE_FLAT:
-            if segmentation_id_set:
-                msg = _("provider:segmentation_id specified for flat network")
-                raise q_exc.InvalidInput(error_message=msg)
-            else:
-                segmentation_id = constants.FLAT_VLAN_ID
-        elif network_type == constants.TYPE_VLAN:
-            if not segmentation_id_set:
-                msg = _("provider:segmentation_id required")
-                raise q_exc.InvalidInput(error_message=msg)
-            if not utils.is_valid_vlan_tag(segmentation_id):
-                msg = (_("provider:segmentation_id out of range "
-                         "(%(min_id)s through %(max_id)s)") %
-                       {'min_id': q_const.MIN_VLAN_TAG,
-                        'max_id': q_const.MAX_VLAN_TAG})
-                raise q_exc.InvalidInput(error_message=msg)
-        elif network_type in constants.TUNNEL_NETWORK_TYPES:
-            if not self.enable_tunneling:
-                msg = _("%s networks are not enabled") % network_type
-                raise q_exc.InvalidInput(error_message=msg)
-            if physical_network_set:
-                msg = _("provider:physical_network specified for %s "
-                        "network") % network_type
-                raise q_exc.InvalidInput(error_message=msg)
-            else:
-                physical_network = None
-            if not segmentation_id_set:
-                msg = _("provider:segmentation_id required")
-                raise q_exc.InvalidInput(error_message=msg)
-        elif network_type == constants.TYPE_LOCAL:
-            if physical_network_set:
-                msg = _("provider:physical_network specified for local "
-                        "network")
-                raise q_exc.InvalidInput(error_message=msg)
-            else:
-                physical_network = None
-            if segmentation_id_set:
-                msg = _("provider:segmentation_id specified for local "
-                        "network")
-                raise q_exc.InvalidInput(error_message=msg)
-            else:
-                segmentation_id = None
-        else:
-            msg = _("provider:network_type %s not supported") % network_type
-            raise q_exc.InvalidInput(error_message=msg)
-
-        if network_type in [constants.TYPE_VLAN, constants.TYPE_FLAT]:
-            if physical_network_set:
-                if physical_network not in self.network_vlan_ranges:
-                    msg = _("Unknown provider:physical_network "
-                            "%s") % physical_network
-                    raise q_exc.InvalidInput(error_message=msg)
-            elif 'default' in self.network_vlan_ranges:
-                physical_network = 'default'
-            else:
-                msg = _("provider:physical_network required")
-                raise q_exc.InvalidInput(error_message=msg)
-
-        return (network_type, physical_network, segmentation_id)
-
-    def create_network(self, context, network):
-        (network_type, physical_network,
-         segmentation_id) = self._process_provider_create(context,
-                                                          network['network'])
-
-        session = context.session
-        #set up default security groups
-        tenant_id = self._get_tenant_id_for_create(
-            context, network['network'])
-        self._ensure_default_security_group(context, tenant_id)
-
-        with session.begin(subtransactions=True):
-            if not network_type:
-                # tenant network
-                network_type = self.tenant_network_type
-                if network_type == constants.TYPE_NONE:
-                    raise q_exc.TenantNetworksDisabled()
-                elif network_type == constants.TYPE_VLAN:
-                    (physical_network,
-                     segmentation_id) = ovs_db_v2.reserve_vlan(session)
-                elif network_type in constants.TUNNEL_NETWORK_TYPES:
-                    segmentation_id = ovs_db_v2.reserve_tunnel(session)
-                # no reservation needed for TYPE_LOCAL
-            else:
-                # provider network
-                if network_type in [constants.TYPE_VLAN, constants.TYPE_FLAT]:
-                    ovs_db_v2.reserve_specific_vlan(session, physical_network,
-                                                    segmentation_id)
-                elif network_type in constants.TUNNEL_NETWORK_TYPES:
-                    ovs_db_v2.reserve_specific_tunnel(session, segmentation_id)
-                # no reservation needed for TYPE_LOCAL
-            net = super(OVSNeutronPluginV2, self).create_network(context,
-                                                                 network)
-            ovs_db_v2.add_network_binding(session, net['id'], network_type,
-                                          physical_network, segmentation_id)
-
-            self._process_l3_create(context, net, network['network'])
-            self._extend_network_dict_provider(context, net)
-            # note - exception will rollback entire transaction
-        LOG.debug(_("Created network: %s"), net['id'])
-        return net
-
-    def update_network(self, context, id, network):
-        provider._raise_if_updates_provider_attributes(network['network'])
-
-        session = context.session
-        with session.begin(subtransactions=True):
-            net = super(OVSNeutronPluginV2, self).update_network(context, id,
-                                                                 network)
-            self._process_l3_update(context, net, network['network'])
-            self._extend_network_dict_provider(context, net)
-        return net
-
-    def delete_network(self, context, id):
-        session = context.session
-        with session.begin(subtransactions=True):
-            binding = ovs_db_v2.get_network_binding(session, id)
-            super(OVSNeutronPluginV2, self).delete_network(context, id)
-            if binding.network_type in constants.TUNNEL_NETWORK_TYPES:
-                ovs_db_v2.release_tunnel(session, binding.segmentation_id,
-                                         self.tunnel_id_ranges)
-            elif binding.network_type in [constants.TYPE_VLAN,
-                                          constants.TYPE_FLAT]:
-                ovs_db_v2.release_vlan(session, binding.physical_network,
-                                       binding.segmentation_id,
-                                       self.network_vlan_ranges)
-            # the network_binding record is deleted via cascade from
-            # the network record, so explicit removal is not necessary
-        self.notifier.network_delete(context, id)
-
-    def get_network(self, context, id, fields=None):
-        session = context.session
-        with session.begin(subtransactions=True):
-            net = super(OVSNeutronPluginV2, self).get_network(context,
-                                                              id, None)
-            self._extend_network_dict_provider(context, net)
-        return self._fields(net, fields)
-
-    def get_networks(self, context, filters=None, fields=None,
-                     sorts=None,
-                     limit=None, marker=None, page_reverse=False):
-        session = context.session
-        with session.begin(subtransactions=True):
-            nets = super(OVSNeutronPluginV2,
-                         self).get_networks(context, filters, None, sorts,
-                                            limit, marker, page_reverse)
-            for net in nets:
-                self._extend_network_dict_provider(context, net)
-
-        return [self._fields(net, fields) for net in nets]
-
-    def create_port(self, context, port):
-        # Set port status as 'DOWN'. This will be updated by agent
-        port['port']['status'] = q_const.PORT_STATUS_DOWN
-        port_data = port['port']
-        session = context.session
-        with session.begin(subtransactions=True):
-            self._ensure_default_security_group_on_port(context, port)
-            sgids = self._get_security_groups_on_port(context, port)
-            dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
-            port = super(OVSNeutronPluginV2, self).create_port(context, port)
-            self._process_portbindings_create_and_update(context,
-                                                         port_data, port)
-            self._process_port_create_security_group(context, port, sgids)
-            self._process_port_create_extra_dhcp_opts(context, port,
-                                                      dhcp_opts)
-            port[addr_pair.ADDRESS_PAIRS] = (
-                self._process_create_allowed_address_pairs(
-                    context, port,
-                    port_data.get(addr_pair.ADDRESS_PAIRS)))
-        self.notify_security_groups_member_updated(context, port)
-        return port
-
-    def _extend_port_dict_nat(self, context, port):
-        forward = ovs_db_v2.get_port_forwarding(context.session, port['id'])
-        if forward:
-            port[nat.FORWARD_PORTS] = forward
-        else:
-            port[nat.FORWARD_PORTS] = None
-
-    def _process_nat_update(self, context, attrs, id):
-        forward_ports = attrs.get(nat.FORWARD_PORTS)
-        forward_ports_set = attributes.is_attr_set(forward_ports)
-
-        if not forward_ports_set:
-            return None
-
-        # LOG.info("forward ports %s" % forward_ports)
-        valid_protocols = ["tcp", "udp"]
-        for entry in forward_ports:
-            if not isinstance(entry, dict):
-                msg = _("nat:forward_ports: must specify a list of dicts (ex: 'l4_protocol=tcp,l4_port=80')")
-                raise q_exc.InvalidInput(error_message=msg)
-            if not ("l4_protocol" in entry and "l4_port" in entry):
-                msg = _("nat:forward_ports: dict is missing l4_protocol and l4_port (ex: 'l4_protocol=tcp,l4_port=80')")
-                raise q_exc.InvalidInput(error_message=msg)
-            if entry['l4_protocol'] not in valid_protocols:
-                msg = _("nat:forward_ports: invalid protocol (only tcp and udp allowed)")
-                raise q_exc.InvalidInput(error_message=msg)
-
-            l4_port = entry['l4_port']
-            if ":" in l4_port:
-                try:
-                    (first, last) = l4_port.split(":")
-                    first = int(first)
-                    last = int(last)
-                except:
-                    msg = _("nat:forward_ports: l4_port range must be integer:integer")
-                    raise q_exc.InvalidInput(error_message=msg)
-            else:
-                try:
-                    l4_port = int(l4_port)
-                except:
-                    msg = _("nat:forward_ports: l4_port must be an integer")
-                    raise q_exc.InvalidInput(error_message=msg)
-
-        return forward_ports
-
-    def get_port(self, context, id, fields=None):
-        session = context.session
-        with session.begin(subtransactions=True):
-            port = super(OVSNeutronPluginV2, self).get_port(context, id, None)
-            self._extend_port_dict_nat(context, port)
-        return self._fields(port, fields)
-
-    def get_ports(self, context, filters=None, fields=None):
-        session = context.session
-        with session.begin(subtransactions=True):
-            ports = super(OVSNeutronPluginV2, self).get_ports(context, filters,
-                                                          None)
-            for port in ports:
-                self._extend_port_dict_nat(context, port)
-
-        return [self._fields(port, fields) for port in ports]
-
-    def update_port(self, context, id, port):
-        forward_ports = self._process_nat_update(context, port['port'], id)
-
-        session = context.session
-        need_port_update_notify = False
-        changed_fixed_ips = 'fixed_ips' in port['port']
-        with session.begin(subtransactions=True):
-            original_port = super(OVSNeutronPluginV2, self).get_port(
-                context, id)
-            updated_port = super(OVSNeutronPluginV2, self).update_port(
-                context, id, port)
-            if addr_pair.ADDRESS_PAIRS in port['port']:
-                self._delete_allowed_address_pairs(context, id)
-                self._process_create_allowed_address_pairs(
-                    context, updated_port,
-                    port['port'][addr_pair.ADDRESS_PAIRS])
-                need_port_update_notify = True
-            elif changed_fixed_ips:
-                self._check_fixed_ips_and_address_pairs_no_overlap(
-                    context, updated_port)
-
-            if forward_ports:
-                ovs_db_v2.clear_port_forwarding(session, updated_port['id'])
-                ovs_db_v2.add_port_forwarding(session, updated_port['id'], forward_ports)
-            self._extend_port_dict_nat(context, updated_port)
-
-            need_port_update_notify |= self.update_security_group_on_port(
-                context, id, port, original_port, updated_port)
-            self._process_portbindings_create_and_update(context,
-                                                         port['port'],
-                                                         updated_port)
-            need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
-                context, id, port, updated_port)
-
-        need_port_update_notify |= self.is_security_group_member_updated(
-            context, original_port, updated_port)
-        if original_port['admin_state_up'] != updated_port['admin_state_up']:
-            need_port_update_notify = True
-
-        if need_port_update_notify:
-            binding = ovs_db_v2.get_network_binding(None,
-                                                    updated_port['network_id'])
-            self.notifier.port_update(context, updated_port,
-                                      binding.network_type,
-                                      binding.segmentation_id,
-                                      binding.physical_network)
-        return updated_port
-
-    def delete_port(self, context, id, l3_port_check=True):
-
-        # if needed, check to see if this is a port owned by
-        # and l3-router.  If so, we should prevent deletion.
-        if l3_port_check:
-            self.prevent_l3_port_deletion(context, id)
-
-        session = context.session
-        with session.begin(subtransactions=True):
-            self.disassociate_floatingips(context, id)
-            port = self.get_port(context, id)
-            self._delete_port_security_group_bindings(context, id)
-            super(OVSNeutronPluginV2, self).delete_port(context, id)
-
-        self.notify_security_groups_member_updated(context, port)