migrate code over from xos repo

Change-Id: I764ac3196babdb1ce78dded2b2d8d6ad6e965ca6
diff --git a/xos/synchronizer/__init__.py b/xos/synchronizer/__init__.py
new file mode 100644
index 0000000..e56cd39
--- /dev/null
+++ b/xos/synchronizer/__init__.py
@@ -0,0 +1,36 @@
+from xos.config import Config
+
+try:
+    observer_disabled = Config().observer_disabled
+except:
+    observer_disabled = False
+
+def EnableObserver(x):
+    """ used for manage.py --noobserver """
+    global observer_disabled
+    observer_disabled = not x
+
+print_once = True
+
+def notify_observer(model=None, delete=False, pk=None, model_dict={}):
+    if (observer_disabled):
+        global print_once
+        if (print_once):
+            print "The observer is disabled"
+            print_once = False
+        return
+
+    try:
+        from .event_manager import EventSender
+        if (model and delete):
+            if hasattr(model,"__name__"):
+                modelName = model.__name__
+            else:
+                modelName = model.__class__.__name__
+            EventSender().fire(delete_flag = delete, model = modelName, pk = pk, model_dict=model_dict)
+        else:
+            EventSender().fire()
+    except Exception,e:
+        print "Exception in Observer. This should not disrupt the front end. %s"%str(e)
+
+
diff --git a/xos/synchronizer/client.py b/xos/synchronizer/client.py
new file mode 100644
index 0000000..d1383f4
--- /dev/null
+++ b/xos/synchronizer/client.py
@@ -0,0 +1,210 @@
+import urlparse
+try:
+    from keystoneauth1.identity import v2 as keystoneauth_v2
+    from keystoneauth1 import session as keystone_session
+    from keystoneclient.v2_0 import client as keystone_client
+    #from glance import client as glance_client
+    import glanceclient
+    from novaclient.v2 import client as nova_client
+    from neutronclient.v2_0 import client as neutron_client 
+    has_openstack = True
+except:
+    has_openstack = False
+
+from xos.config import Config
+
+def require_enabled(callable):
+    def wrapper(*args, **kwds):
+        if has_openstack:
+            return callable(*args, **kwds)
+        else:
+            return None
+    return wrapper
+
+def parse_novarc(filename):
+    opts = {}
+    f = open(filename, 'r')
+    for line in f:
+        try:
+            line = line.replace('export', '').strip()
+            parts = line.split('=')
+            if len(parts) > 1:
+                value = parts[1].replace("\'", "")
+                value = value.replace('\"', '')
+                opts[parts[0]] = value
+        except:
+            pass
+    f.close()
+    return opts
+
+class Client:
+    def __init__(self, username=None, password=None, tenant=None, url=None, token=None, endpoint=None, controller=None, cacert=None, admin=True, *args, **kwds):
+       
+        self.has_openstack = has_openstack
+        self.url = controller.auth_url
+        if admin:
+            self.username = controller.admin_user
+            self.password = controller.admin_password
+            self.tenant = controller.admin_tenant
+        else:
+            self.username = None
+            self.password = None
+            self.tenant = None
+
+        if username:
+            self.username = username
+        if password:
+            self.password = password
+        if tenant:
+            self.tenant = tenant
+        if url:
+            self.url = url
+        if token:
+            self.token = token    
+        if endpoint:
+            self.endpoint = endpoint
+
+        if cacert:
+            self.cacert = cacert
+        else:
+            self.cacert = getattr(Config(), "nova_ca_ssl_cert", "None")
+
+        #if '@' in self.username:
+        #    self.username = self.username[:self.username.index('@')]
+
+class KeystoneClient(Client):
+    def __init__(self, *args, **kwds):
+        Client.__init__(self, *args, **kwds)
+        if has_openstack:
+            auth = keystoneauth_v2.Password(username=self.username,
+                                            password=self.password,
+                                            tenant_name=self.tenant,
+                                            auth_url=self.url,
+                                            )
+            sess = keystone_session.Session(auth=auth, verify=self.cacert, )
+            self.client = keystone_client.Client(session=sess)
+
+    @require_enabled
+    def connect(self, *args, **kwds):
+        self.__init__(*args, **kwds)
+
+    @require_enabled
+    def __getattr__(self, name):
+        return getattr(self.client, name)
+
+
+class Glance(Client):
+    def __init__(self, *args, **kwds):
+        Client.__init__(self, *args, **kwds)
+        if has_openstack:
+            self.client = glanceclient.get_client(host='0.0.0.0',
+                                                   username=self.username,
+                                                   password=self.password,
+                                                   tenant=self.tenant,
+                                                   auth_url=self.url,
+                                                   cacert=self.cacert
+                                                   )
+    @require_enabled
+    def __getattr__(self, name):
+        return getattr(self.client, name)
+
+class GlanceClient(Client):
+    def __init__(self, version, endpoint, token, cacert=None, *args, **kwds):
+        Client.__init__(self, *args, **kwds)
+        if has_openstack:
+            self.client = glanceclient.Client(version, 
+                endpoint=endpoint, 
+                token=token,
+                cacert=cacert
+            )
+
+    @require_enabled
+    def __getattr__(self, name):
+        return getattr(self.client, name)        
+
+class NovaClient(Client):
+    def __init__(self, *args, **kwds):
+        Client.__init__(self, *args, **kwds)
+        if has_openstack:
+            self.client = nova_client.client.Client(
+                    "2",
+                    username=self.username,
+                    api_key=self.password,
+                    project_id=self.tenant,
+                    auth_url=self.url,
+                    region_name='',
+                    extensions=[],
+                    service_type='compute',
+                    service_name='',
+                    cacert=self.cacert
+                    )
+
+    @require_enabled
+    def connect(self, *args, **kwds):
+        self.__init__(*args, **kwds)
+
+    @require_enabled
+    def __getattr__(self, name):
+        return getattr(self.client, name)
+
+class NovaDB(Client):
+    def __init__(self, *args, **kwds):
+        Client.__init__(self, *args, **kwds)
+        if has_openstack:
+            self.ctx = get_admin_context()
+            nova_db_api.FLAGS(default_config_files=['/etc/nova/nova.conf'])
+            self.client = nova_db_api
+
+
+    @require_enabled
+    def connect(self, *args, **kwds):
+        self.__init__(*args, **kwds)
+
+    @require_enabled
+    def __getattr__(self, name):
+        return getattr(self.client, name)
+
+class NeutronClient(Client):
+    def __init__(self, *args, **kwds):
+        Client.__init__(self, *args, **kwds)
+        if has_openstack:
+            self.client = neutron_client.Client(username=self.username,
+                                                password=self.password,
+                                                tenant_name=self.tenant,
+                                                auth_url=self.url,
+                                                ca_cert=self.cacert
+                                                )
+    @require_enabled
+    def connect(self, *args, **kwds):
+        self.__init__(*args, **kwds)
+
+    @require_enabled
+    def __getattr__(self, name):
+        return getattr(self.client, name)
+
+class OpenStackClient:
+    """
+    A simple native shell to the openstack backend services.
+    This class can receive all nova calls to the underlying testbed
+    """
+
+    def __init__ ( self, *args, **kwds) :
+        # instantiate managers
+        self.keystone = KeystoneClient(*args, **kwds)
+        url_parsed = urlparse.urlparse(self.keystone.url)
+        hostname = url_parsed.netloc.split(':')[0]
+        token = self.keystone.client.tokens.authenticate(username=self.keystone.username, password=self.keystone.password, tenant_name=self.keystone.tenant)
+#        glance_endpoint = self.keystone.client.service_catalog.url_for(service_type='image', endpoint_type='publicURL')
+#        self.glanceclient = GlanceClient('1', endpoint=glance_endpoint, token=token.id, **kwds)
+        self.nova = NovaClient(*args, **kwds)
+        # self.nova_db = NovaDB(*args, **kwds)
+        self.neutron = NeutronClient(*args, **kwds)
+    
+
+    @require_enabled
+    def connect(self, *args, **kwds):
+        self.__init__(*args, **kwds)
+
+    @require_enabled
+    def authenticate(self):
+        return self.keystone.authenticate()
diff --git a/xos/synchronizer/driver.py b/xos/synchronizer/driver.py
new file mode 100644
index 0000000..3064eb2
--- /dev/null
+++ b/xos/synchronizer/driver.py
@@ -0,0 +1,488 @@
+import commands
+import hashlib
+from xos.config import Config
+from core.models import Controller
+
+try:
+    from openstack_xos.client import OpenStackClient
+    has_openstack = True
+except:
+    has_openstack = False
+
+manager_enabled = Config().api_nova_enabled
+
+class OpenStackDriver:
+
+    def __init__(self, config = None, client=None):
+        if config:
+            self.config = Config(config)
+        else:
+            self.config = Config()
+
+        if client:
+            self.shell = client
+
+        self.enabled = manager_enabled
+        self.has_openstack = has_openstack
+        self.controller = None
+        self.admin_user = None
+
+    def client_driver(self, caller=None, tenant=None, controller=None):
+        if caller:
+            auth = {'username': caller.email,
+                    'password': hashlib.md5(caller.password).hexdigest()[:6],
+                    'tenant': tenant}
+            client = OpenStackClient(controller=controller, cacert=self.config.nova_ca_ssl_cert, **auth)
+        else:
+            admin_driver = self.admin_driver(tenant=tenant, controller=controller)
+            client = OpenStackClient(tenant=tenant, controller=admin_driver.controller)
+
+        driver = OpenStackDriver(client=client)
+        #driver.admin_user = admin_driver.admin_user
+        #driver.controller = admin_driver.controller
+        return driver
+
+    def admin_driver(self, tenant=None, controller=None):
+        if isinstance(controller, int):
+            controller = Controller.objects.get(id=controller.id)
+        if not tenant:
+            tenant = controller.admin_tenant
+        client = OpenStackClient(tenant=tenant, controller=controller, cacert=self.config.nova_ca_ssl_cert)
+        driver = OpenStackDriver(client=client)
+        driver.admin_user = client.keystone.users.find(name=controller.admin_user)
+        driver.controller = controller
+        return driver
+
+    def create_role(self, name):
+        roles = self.shell.keystone.roles.findall(name=name)
+        roles_title = self.shell.keystone.roles.findall(name=name.title())
+        roles_found = roles + roles_title
+        if not roles_found:
+            role = self.shell.keystone.roles.create(name)
+        else:
+            role = roles_found[0]
+        return role
+
+    def delete_role(self, filter):
+        roles = self.shell.keystone.roles.findall(**filter)
+        for role in roles:
+            self.shell.keystone.roles.delete(role)
+        return 1
+
+    def create_tenant(self, tenant_name, enabled, description):
+        """Create keystone tenant. Suggested fields: name, description, enabled"""
+        tenants = self.shell.keystone.tenants.findall(name=tenant_name)
+        if not tenants:
+            fields = {'tenant_name': tenant_name, 'enabled': enabled,
+                      'description': description}
+            tenant = self.shell.keystone.tenants.create(**fields)
+        else:
+            tenant = tenants[0]
+
+        # always give the admin user the admin role to any tenant created
+        # by the driver.
+        self.add_user_role(self.admin_user.id, tenant.id, 'admin')
+        return tenant
+
+    def update_tenant(self, id, **kwds):
+        return self.shell.keystone.tenants.update(id, **kwds)
+
+    def delete_tenant(self, id):
+        # FIXME: nova_db is commented out in clients.py, throws errors.
+        # Commenting this out for the time being until actually fixed
+
+        #ctx = self.shell.nova_db.ctx
+        tenants = self.shell.keystone.tenants.findall(id=id)
+        for tenant in tenants:
+            # nova does not automatically delete the tenant's instances
+            # so we manually delete instances before deleting the tenant
+            #instances = self.shell.nova_db.instance_get_all_by_filters(ctx,
+            #          {'project_id': tenant.id}, 'id', 'asc')
+            #client = OpenStackClient(tenant=tenant.name)
+            #driver = OpenStackDriver(client=client)
+            #for instance in instances:
+            #    driver.destroy_instance(instance.id)
+            self.shell.keystone.tenants.delete(tenant)
+        return 1
+
+    def create_user(self, name, email, password, enabled):
+        users = self.shell.keystone.users.findall(email=email)
+        if not users:
+            fields = {'name': name, 'email': email, 'password': password,
+                      'enabled': enabled}
+            user = self.shell.keystone.users.create(**fields)
+        else:
+            user = users[0]
+        return user
+
+    def delete_user(self, id):
+        users = self.shell.keystone.users.findall(id=id)
+        for user in users:
+            # delete users keys
+            keys = self.shell.nova.keypairs.findall()
+            for key in keys:
+                self.shell.nova.keypairs.delete(key)
+            self.shell.keystone.users.delete(user)
+        return 1
+
+    def get_admin_role(self):
+        role = None
+        for admin_role_name in ['admin', 'Admin']:
+            roles = self.shell.keystone.roles.findall(name=admin_role_name)
+            if roles:
+                role = roles[0]
+                break
+        return role
+
+    def add_user_role(self, kuser_id, tenant_id, role_name):
+        user = self.shell.keystone.users.find(id=kuser_id)
+        tenant = self.shell.keystone.tenants.find(id=tenant_id)
+        # admin role can be lowercase or title. Look for both
+        role = None
+        if role_name.lower() == 'admin':
+            role = self.get_admin_role()
+        else:
+            # look up non admin role or force exception when admin role isnt found
+            role = self.shell.keystone.roles.find(name=role_name)
+
+        role_found = False
+        user_roles = user.list_roles(tenant.id)
+        for user_role in user_roles:
+            if user_role.name == role.name:
+                role_found = True
+        if not role_found:
+            tenant.add_user(user, role)
+
+        return 1
+
+    def delete_user_role(self, kuser_id, tenant_id, role_name):
+        user = self.shell.keystone.users.find(id=kuser_id)
+        tenant = self.shell.keystone.tenants.find(id=tenant_id)
+        # admin role can be lowercase or title. Look for both
+        role = None
+        if role_name.lower() == 'admin':
+            role = self.get_admin_role()
+        else:
+            # look up non admin role or force exception when admin role isnt found
+            role = self.shell.keystone.roles.find(name=role_name)
+
+        role_found = False
+        user_roles = user.list_roles(tenant.id)
+        for user_role in user_roles:
+            if user_role.name == role.name:
+                role_found = True
+        if role_found:
+            tenant.remove_user(user, role)
+
+        return 1
+
+    def update_user(self, id, fields):
+        if 'password' in fields:
+            self.shell.keystone.users.update_password(id, fields['password'])
+        if 'enabled' in fields:
+            self.shell.keystone.users.update_enabled(id, fields['enabled'])
+        return 1
+
+    def create_router(self, name, set_gateway=True):
+        routers = self.shell.neutron.list_routers(name=name)['routers']
+        if routers:
+            router = routers[0]
+        else:
+            router = self.shell.neutron.create_router({'router': {'name': name}})['router']
+        # add router to external network
+        if set_gateway:
+            nets = self.shell.neutron.list_networks()['networks']
+            for net in nets:
+                if net['router:external'] == True:
+                    self.shell.neutron.add_gateway_router(router['id'],
+                                                          {'network_id': net['id']})
+
+        return router
+
+    def delete_router(self, id):
+        routers = self.shell.neutron.list_routers(id=id)['routers']
+        for router in routers:
+            self.shell.neutron.delete_router(router['id'])
+            # remove router form external network
+            #nets = self.shell.neutron.list_networks()['networks']
+            #for net in nets:
+            #    if net['router:external'] == True:
+            #        self.shell.neutron.remove_gateway_router(router['id'])
+
+    def add_router_interface(self, router_id, subnet_id):
+        router = self.shell.neutron.show_router(router_id)['router']
+        subnet = self.shell.neutron.show_subnet(subnet_id)['subnet']
+        if router and subnet:
+            self.shell.neutron.add_interface_router(router_id, {'subnet_id': subnet_id})
+
+    def delete_router_interface(self, router_id, subnet_id):
+        router = self.shell.neutron.show_router(router_id)
+        subnet = self.shell.neutron.show_subnet(subnet_id)
+        if router and subnet:
+            self.shell.neutron.remove_interface_router(router_id, {'subnet_id': subnet_id})
+
+    def create_network(self, name, shared=False):
+        nets = self.shell.neutron.list_networks(name=name)['networks']
+        if nets:
+            net = nets[0]
+        else:
+            net = self.shell.neutron.create_network({'network': {'name': name, 'shared': shared}})['network']
+        return net
+
+    def delete_network(self, id):
+        nets = self.shell.neutron.list_networks()['networks']
+        for net in nets:
+            if net['id'] == id:
+                # delete_all ports
+                self.delete_network_ports(net['id'])
+                # delete all subnets:
+                for subnet_id in net['subnets']:
+                    self.delete_subnet(subnet_id)
+                self.shell.neutron.delete_network(net['id'])
+        return 1
+
+    def delete_network_ports(self, network_id):
+        ports = self.shell.neutron.list_ports()['ports']
+        for port in ports:
+            if port['network_id'] == network_id:
+                self.shell.neutron.delete_port(port['id'])
+        return 1
+
+    def delete_subnet_ports(self, subnet_id):
+        ports = self.shell.neutron.list_ports()['ports']
+        for port in ports:
+            delete = False
+            for fixed_ip in port['fixed_ips']:
+                if fixed_ip['subnet_id'] == subnet_id:
+                    delete=True
+                    break
+            if delete:
+                self.shell.neutron.delete_port(port['id'])
+        return 1
+
+    def create_subnet(self, name, network_id, cidr_ip, ip_version, start, end):
+        #nets = self.shell.neutron.list_networks(name=network_name)['networks']
+        #if not nets:
+        #    raise Exception, "No such network: %s" % network_name
+        #net = nets[0]
+
+        subnet = None
+        subnets = self.shell.neutron.list_subnets()['subnets']
+        for snet in subnets:
+            if snet['cidr'] == cidr_ip and snet['network_id'] == network_id:
+                subnet = snet
+
+        if not subnet:
+            # HACK: Add metadata route -- Neutron does not reliably supply this
+            metadata_ip = cidr_ip.replace("0/24", "3")
+
+            allocation_pools = [{'start': start, 'end': end}]
+            subnet = {'subnet': {'name': name,
+                                 'network_id': network_id,
+                                 'ip_version': ip_version,
+                                 'cidr': cidr_ip,
+                                 #'dns_nameservers': ['8.8.8.8', '8.8.4.4'],
+                                 'host_routes': [{'destination':'169.254.169.254/32','nexthop':metadata_ip}],
+                                 'gateway_ip': None,
+                                 'allocation_pools': allocation_pools}}
+            subnet = self.shell.neutron.create_subnet(subnet)['subnet']
+            # self.add_external_route(subnet)
+
+        return subnet
+
+    def update_subnet(self, id, fields):
+        return self.shell.neutron.update_subnet(id, fields)
+
+    def delete_subnet(self, id):
+        #return self.shell.neutron.delete_subnet(id=id)
+        # inefficient but fault tolerant
+        subnets = self.shell.neutron.list_subnets()['subnets']
+        for subnet in subnets:
+            if subnet['id'] == id:
+                self.delete_subnet_ports(subnet['id'])
+                self.shell.neutron.delete_subnet(id)
+                self.delete_external_route(subnet)
+        return 1
+
+    def get_external_routes(self):
+        status, output = commands.getstatusoutput('route')
+        routes = output.split('\n')[3:]
+        return routes
+
+    def add_external_route(self, subnet, routes=[]):
+        if not routes:
+            routes = self.get_external_routes()
+
+        ports = self.shell.neutron.list_ports()['ports']
+
+        gw_ip = subnet['gateway_ip']
+        subnet_id = subnet['id']
+
+        # 1. Find the port associated with the subnet's gateway
+        # 2. Find the router associated with that port
+        # 3. Find the port associated with this router and on the external net
+        # 4. Set up route to the subnet through the port from step 3
+        ip_address = None
+        for port in ports:
+            for fixed_ip in port['fixed_ips']:
+                if fixed_ip['subnet_id'] == subnet_id and fixed_ip['ip_address'] == gw_ip:
+                    gw_port = port
+                    router_id = gw_port['device_id']
+                    router = self.shell.neutron.show_router(router_id)['router']
+                    if router and router.get('external_gateway_info'):
+                        ext_net = router['external_gateway_info']['network_id']
+                        for port in ports:
+                            if port['device_id'] == router_id and port['network_id'] == ext_net:
+                                ip_address = port['fixed_ips'][0]['ip_address']
+
+        if ip_address:
+            # check if external route already exists
+            route_exists = False
+            if routes:
+                for route in routes:
+                    if subnet['cidr'] in route and ip_address in route:
+                        route_exists = True
+            if not route_exists:
+                cmd = "route add -net %s dev br-ex gw %s" % (subnet['cidr'], ip_address)
+                s, o = commands.getstatusoutput(cmd)
+                #print cmd, "\n", s, o
+
+        return 1
+
+    def delete_external_route(self, subnet):
+        ports = self.shell.neutron.list_ports()['ports']
+
+        gw_ip = subnet['gateway_ip']
+        subnet_id = subnet['id']
+
+        # 1. Find the port associated with the subnet's gateway
+        # 2. Find the router associated with that port
+        # 3. Find the port associated with this router and on the external net
+        # 4. Set up route to the subnet through the port from step 3
+        ip_address = None
+        for port in ports:
+            for fixed_ip in port['fixed_ips']:
+                if fixed_ip['subnet_id'] == subnet_id and fixed_ip['ip_address'] == gw_ip:
+                    gw_port = port
+                    router_id = gw_port['device_id']
+                    router = self.shell.neutron.show_router(router_id)['router']
+                    ext_net = router['external_gateway_info']['network_id']
+                    for port in ports:
+                        if port['device_id'] == router_id and port['network_id'] == ext_net:
+                            ip_address = port['fixed_ips'][0]['ip_address']
+
+        if ip_address:
+            cmd = "route delete -net %s" % (subnet['cidr'])
+            commands.getstatusoutput(cmd)
+
+        return 1
+
+    def create_keypair(self, name, public_key):
+        keys = self.shell.nova.keypairs.findall(name=name)
+        if keys:
+            key = keys[0]
+            # update key
+            if key.public_key != public_key:
+                self.delete_keypair(key.id)
+                key = self.shell.nova.keypairs.create(name=name, public_key=public_key)
+        else:
+            key = self.shell.nova.keypairs.create(name=name, public_key=public_key)
+        return key
+
+    def delete_keypair(self, id):
+        keys = self.shell.nova.keypairs.findall(id=id)
+        for key in keys:
+            self.shell.nova.keypairs.delete(key)
+        return 1
+
+    def get_private_networks(self, tenant=None):
+        if not tenant:
+            tenant = self.shell.nova.tenant
+        tenant = self.shell.keystone.tenants.find(name=tenant)
+        search_opts = {"tenant_id": tenant.id, "shared": False}
+        private_networks = self.shell.neutron.list_networks(**search_opts)
+        return private_networks
+
+    def get_shared_networks(self):
+        search_opts = {"shared": True}
+        shared_networks = self.shell.neutron.list_networks(**search_opts)
+        return shared_networks
+
+    def get_network_subnet(self, network_id):
+        subnet_id = None
+        subnet = None
+        if network_id:
+            os_networks = self.shell.neutron.list_networks(id=network_id)["networks"]
+            if os_networks:
+                os_network = os_networks[0]
+                if os_network['subnets']:
+                    subnet_id = os_network['subnets'][0]
+                    os_subnets = self.shell.neutron.list_subnets(id=subnet_id)['subnets']
+                    if os_subnets:
+                        subnet = os_subnets[0]['cidr']
+
+        return (subnet_id, subnet)
+
+    def spawn_instance(self, name, key_name=None, availability_zone=None, hostname=None, image_id=None, security_group=None, pubkeys=[], nics=None, metadata=None, userdata=None, flavor_name=None):
+        if not flavor_name:
+            flavor_name = self.config.nova_default_flavor
+
+        flavor = self.shell.nova.flavors.find(name=flavor_name)
+
+        if not security_group:
+            security_group = self.config.nova_default_security_group
+
+        files = {}
+        #if pubkeys:
+        #    files["/root/.ssh/authorized_keys"] = "\n".join(pubkeys).encode('base64')
+        hints = {}
+
+        # determine availability zone and compute host
+        availability_zone_filter = None
+        if availability_zone is None or not availability_zone:
+            availability_zone_filter = 'nova'
+        else:
+            availability_zone_filter = availability_zone
+        if hostname:
+            availability_zone_filter += ':%s' % hostname
+
+        server = self.shell.nova.servers.create(
+                                            name=name,
+                                            key_name = key_name,
+                                            flavor=flavor.id,
+                                            image=image_id,
+                                            security_group = security_group,
+                                            #files = files,
+                                            scheduler_hints=hints,
+                                            availability_zone=availability_zone_filter,
+                                            nics=nics,
+                                            networks=nics,
+                                            meta=metadata,
+                                            userdata=userdata)
+        return server
+
+    def destroy_instance(self, id):
+        if (self.shell.nova.tenant=="admin"):
+            # findall() is implemented as a list() followed by a python search of the
+            # list. Since findall() doesn't accept "all_tenants", we do this using
+            # list() ourselves. This allows us to delete an instance as admin.
+            servers = self.shell.nova.servers.list(search_opts={"all_tenants": True})
+        else:
+            servers = self.shell.nova.servers.list()
+        for server in servers:
+            if server.id == id:
+                result=self.shell.nova.servers.delete(server)
+
+    def update_instance_metadata(self, id, metadata):
+        servers = self.shell.nova.servers.findall(id=id)
+        for server in servers:
+            self.shell.nova.servers.set_meta(server, metadata)
+            # note: set_meta() returns a broken Server() object. Don't try to
+            # print it in the shell or it will fail in __repr__.
+
+    def delete_instance_metadata(self, id, metadata):
+        # note: metadata is a dict. Only the keys matter, not the values.
+        servers = self.shell.nova.servers.findall(id=id)
+        for server in servers:
+            self.shell.nova.servers.delete_meta(server, metadata)
+
diff --git a/xos/synchronizer/error_mapper.py b/xos/synchronizer/error_mapper.py
new file mode 100644
index 0000000..9eb878d
--- /dev/null
+++ b/xos/synchronizer/error_mapper.py
@@ -0,0 +1,25 @@
+from xos.config import Config
+from xos.logger import Logger, logging, logger
+
+class ErrorMapper:
+	def __init__(self, error_map_file):
+		self.error_map = {}
+		try:
+			error_map_lines = open(error_map_file).read().splitlines()
+			for l in error_map_lines:
+				if (not l.startswith('#')):
+					splits = l.split('->')
+					k,v = map(lambda i:i.rstrip(),splits)
+					self.error_map[k]=v
+		except:
+			logging.info('Could not read error map')
+
+
+	def map(self, error):
+		return self.error_map[error]
+
+
+
+
+
+
diff --git a/xos/synchronizer/manager.py b/xos/synchronizer/manager.py
new file mode 100644
index 0000000..193ec75
--- /dev/null
+++ b/xos/synchronizer/manager.py
@@ -0,0 +1,569 @@
+import os
+#os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
+import string
+import random
+import hashlib
+from datetime import datetime
+from django.utils import timezone
+
+from netaddr import IPAddress, IPNetwork
+from xos import settings
+from django.core import management
+from core.models import *
+from xos.config import Config
+try:
+    from openstack_xos.client import OpenStackClient
+    from openstack_xos.driver import OpenStackDriver
+    has_openstack = True
+except:
+    has_openstack = False
+
+manager_enabled = Config().api_nova_enabled
+
+
+def random_string(size=6):
+    return ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(size))
+
+def require_enabled(callable):
+    def wrapper(*args, **kwds):
+        if manager_enabled and has_openstack:
+            return callable(*args, **kwds)
+        else:
+            return None
+    return wrapper
+
+
+class OpenStackManager:
+
+    def __init__(self, auth={}, caller=None):
+        self.client = None
+        self.driver = None
+        self.caller = None
+        self.has_openstack = has_openstack       
+        self.enabled = manager_enabled
+
+        if has_openstack and manager_enabled:
+            if auth:
+                try:
+                    self.init_user(auth, caller)
+                except:
+                    # if this fails then it meanse the caller doesn't have a
+                    # role at the slice's tenant. if the caller is an admin
+                    # just use the admin client/manager.
+                    if caller and caller.is_admin: 
+                        self.init_admin()
+                    else: raise
+            else:
+                self.init_admin()
+
+    @require_enabled 
+    def init_caller(self, caller, tenant):
+        auth = {'username': caller.email,
+                'password': hashlib.md5(caller.password).hexdigest()[:6],
+                'tenant': tenant}
+        self.client = OpenStackClient(**auth)
+        self.driver = OpenStackDriver(client=self.client)
+        self.caller = caller                 
+    
+    @require_enabled
+    def init_admin(self, tenant=None):
+        # use the admin credentials 
+        self.client = OpenStackClient(tenant=tenant)
+        self.driver = OpenStackDriver(client=self.client)
+        self.caller = self.driver.admin_user
+        self.caller.kuser_id = self.caller.id 
+
+    @require_enabled
+    def save_role(self, role):
+        if not role.role:
+            keystone_role = self.driver.create_role(role.role_type)
+            role.role = keystone_role.id
+
+    @require_enabled
+    def delete_role(self, role):
+        if role.role:
+            self.driver.delete_role({'id': role.role})
+
+    @require_enabled
+    def save_key(self, key, name):
+        key_fields = {'name': name,
+                      'public_key': key}
+        nova_key = self.driver.create_keypair(**key_fields)
+
+    @require_enabled
+    def delete_key(self, key):
+        if key.nkey_id:
+            self.driver.delete_keypair(key.nkey_id)
+
+    @require_enabled
+    def save_user(self, user):
+        name = user.email[:user.email.find('@')]
+        user_fields = {'name': name,
+                       'email': user.email,
+                       'password': hashlib.md5(user.password).hexdigest()[:6],
+                       'enabled': True}
+        if not user.kuser_id:
+            keystone_user = self.driver.create_user(**user_fields)
+            user.kuser_id = keystone_user.id
+        else:
+            self.driver.update_user(user.kuser_id, user_fields)     
+
+        if user.site:
+            self.driver.add_user_role(user.kuser_id, user.site.tenant_id, 'user')
+            if user.is_admin:
+                self.driver.add_user_role(user.kuser_id, user.site.tenant_id, 'admin')
+            else:
+                # may have admin role so attempt to remove it
+                self.driver.delete_user_role(user.kuser_id, user.site.tenant_id, 'admin')
+
+        if user.public_key:
+            self.init_caller(user, user.site.login_base)
+            self.save_key(user.public_key, user.keyname)
+            self.init_admin()
+
+        user.save()
+        user.enacted = timezone.now()
+        user.save(update_fields=['enacted'])
+  
+    @require_enabled
+    def delete_user(self, user):
+        if user.kuser_id:
+            self.driver.delete_user(user.kuser_id)        
+    
+    @require_enabled
+    def save_site(self, site, add_role=True):
+        if not site.tenant_id:
+            tenant = self.driver.create_tenant(tenant_name=site.login_base,
+                                               description=site.name,
+                                               enabled=site.enabled)
+            site.tenant_id = tenant.id
+            # give caller an admin role at the tenant they've created
+            self.driver.add_user_role(self.caller.kuser_id, tenant.id, 'admin')
+
+        # update the record
+        if site.id and site.tenant_id:
+            self.driver.update_tenant(site.tenant_id,
+                                      description=site.name,
+                                      enabled=site.enabled)
+
+        # commit the updated record
+        site.save()
+        site.enacted = timezone.now()
+        site.save(update_fields=['enacted']) # enusre enacted > updated  
+        
+
+    @require_enabled
+    def delete_site(self, site):
+        if site.tenant_id:
+            self.driver.delete_tenant(site.tenant_id)
+               
+    @require_enabled
+    def save_site_privilege(self, site_priv):
+        if site_priv.user.kuser_id and site_priv.site.tenant_id:
+            self.driver.add_user_role(site_priv.user.kuser_id,
+                                      site_priv.site.tenant_id,
+                                      site_priv.role.role_type)
+        site_priv.enacted = timezone.now()
+        site_priv.save(update_fields=['enacted'])
+
+    
+    @require_enabled
+    def delete_site_privilege(self, site_priv):
+        self.driver.delete_user_role(site_priv.user.kuser_id, 
+                                     site_priv.site.tenant_id, 
+                                     site_priv.role.role_type)
+
+    @require_enabled
+    def save_slice(self, slice):
+        if not slice.tenant_id:
+            nova_fields = {'tenant_name': slice.name,
+                   'description': slice.description,
+                   'enabled': slice.enabled}
+            tenant = self.driver.create_tenant(**nova_fields)
+            slice.tenant_id = tenant.id
+
+            # give caller an admin role at the tenant they've created
+            self.driver.add_user_role(self.caller.kuser_id, tenant.id, 'admin')
+
+            # refresh credentials using this tenant
+            self.driver.shell.connect(username=self.driver.shell.keystone.username,
+                                      password=self.driver.shell.keystone.password,
+                                      tenant=tenant.name)
+
+            # create network
+            network = self.driver.create_network(slice.name)
+            slice.network_id = network['id']
+
+            # create router
+            router = self.driver.create_router(slice.name)
+            slice.router_id = router['id']
+
+            # create subnet
+            next_subnet = self.get_next_subnet()
+            cidr = str(next_subnet.cidr)
+            ip_version = next_subnet.version
+            start = str(next_subnet[2])
+            end = str(next_subnet[-2]) 
+            subnet = self.driver.create_subnet(name=slice.name,
+                                               network_id = network['id'],
+                                               cidr_ip = cidr,
+                                               ip_version = ip_version,
+                                               start = start,
+                                               end = end)
+            slice.subnet_id = subnet['id']
+            # add subnet as interface to slice's router
+            self.driver.add_router_interface(router['id'], subnet['id'])
+            # add external route
+            self.driver.add_external_route(subnet)
+
+
+        if slice.id and slice.tenant_id:
+            self.driver.update_tenant(slice.tenant_id,
+                                      description=slice.description,
+                                      enabled=slice.enabled)   
+
+        slice.save()
+        slice.enacted = timezone.now()
+        slice.save(update_fields=['enacted']) 
+
+    @require_enabled
+    def delete_slice(self, slice):
+        if slice.tenant_id:
+            self._delete_slice(slice.tenant_id, slice.network_id, 
+                               slice.router_id, slice.subnet_id)
+    @require_enabled
+    def _delete_slice(self, tenant_id, network_id, router_id, subnet_id):
+        self.driver.delete_router_interface(slice.router_id, slice.subnet_id)
+        self.driver.delete_subnet(slice.subnet_id)
+        self.driver.delete_router(slice.router_id)
+        self.driver.delete_network(slice.network_id)
+        self.driver.delete_tenant(slice.tenant_id)
+        # delete external route
+        subnet = None
+        subnets = self.driver.shell.neutron.list_subnets()['subnets']
+        for snet in subnets:
+            if snet['id'] == slice.subnet_id:
+                subnet = snet
+        if subnet:
+            self.driver.delete_external_route(subnet) 
+
+    
+    @require_enabled
+    def save_slice_membership(self, slice_memb):
+        if slice_memb.user.kuser_id and slice_memb.slice.tenant_id:
+            self.driver.add_user_role(slice_memb.user.kuser_id,
+                                      slice_memb.slice.tenant_id,
+                                      slice_memb.role.role_type)
+        slice_memb.enacted = timezone.now()
+        slice_memb.save(update_fields=['enacted'])
+
+
+    @require_enabled
+    def delete_slice_membership(self, slice_memb):
+        self.driver.delete_user_role(slice_memb.user.kuser_id,
+                                     slice_memb.slice.tenant_id,
+                                     slice_memb.role.role_type)
+
+
+    @require_enabled
+    def get_next_subnet(self):
+        # limit ourself to 10.0.x.x for now
+        valid_subnet = lambda net: net.startswith('10.0')  
+        subnets = self.driver.shell.neutron.list_subnets()['subnets']
+        ints = [int(IPNetwork(subnet['cidr']).ip) for subnet in subnets \
+                if valid_subnet(subnet['cidr'])] 
+        ints.sort()
+        last_ip = IPAddress(ints[-1])
+        last_network = IPNetwork(str(last_ip) + "/24")
+        next_network = IPNetwork(str(IPAddress(last_network) + last_network.size) + "/24")
+        return next_network
+
+    @require_enabled
+    def save_subnet(self, subnet):    
+        if not subnet.subnet_id:
+            neutron_subnet = self.driver.create_subnet(name= subnet.slice.name,
+                                          network_id=subnet.slice.network_id,
+                                          cidr_ip = subnet.cidr,
+                                          ip_version=subnet.ip_version,
+                                          start = subnet.start,
+                                          end = subnet.end)
+            subnet.subnet_id = neutron_subnet['id']
+            # add subnet as interface to slice's router
+            self.driver.add_router_interface(subnet.slice.router_id, subnet.subnet_id)
+            #add_route = 'route add -net %s dev br-ex gw 10.100.0.5' % self.cidr
+            #commands.getstatusoutput(add_route)
+
+    
+    @require_enabled
+    def delete_subnet(self, subnet):
+        if subnet.subnet_id:
+            self.driver.delete_router_interface(subnet.slice.router_id, subnet.subnet_id)
+            self.driver.delete_subnet(subnet.subnet_id)
+            #del_route = 'route del -net %s' % self.cidr
+            #commands.getstatusoutput(del_route)
+
+    def get_requested_networks(self, slice):
+        network_ids = [x.network_id for x in slice.networks.all()]
+
+        if slice.network_id is not None:
+            network_ids.append(slice.network_id)
+
+        networks = []
+        for network_id in network_ids:
+            networks.append({"net-id": network_id})
+
+        return networks
+
+    @require_enabled
+    def save_instance(self, instance):
+        metadata_update = {}
+        if ("numberCores" in instance.changed_fields):
+            metadata_update["cpu_cores"] = str(instance.numberCores)
+
+        for tag in instance.slice.tags.all():
+            if tag.name.startswith("sysctl-"):
+                metadata_update[tag.name] = tag.value
+
+        if not instance.instance_id:
+            nics = self.get_requested_networks(instance.slice)
+            for nic in nics:
+                # If a network hasn't been instantiated yet, then we'll fail
+                # during slice creation. Defer saving the instance for now.
+                if not nic.get("net-id", None):
+                    instance.save()   # in case it hasn't been saved yet
+                    return
+            slice_memberships = SliceMembership.objects.filter(slice=instance.slice)
+            pubkeys = [sm.user.public_key for sm in slice_memberships if sm.user.public_key]
+            pubkeys.append(instance.creator.public_key)
+            instance = self.driver.spawn_instance(name=instance.name,
+                                   key_name = instance.creator.keyname,
+                                   image_id = instance.image.image_id,
+                                   hostname = instance.node.name,
+                                   pubkeys = pubkeys,
+                                   nics = nics,
+                                   metadata = metadata_update )
+            instance.instance_id = instance.id
+            instance.instance_name = getattr(instance, 'OS-EXT-SRV-ATTR:instance_name')
+        else:
+            if metadata_update:
+                self.driver.update_instance_metadata(instance.instance_id, metadata_update)
+
+        instance.save()
+        instance.enacted = timezone.now()
+        instance.save(update_fields=['enacted'])
+
+    @require_enabled
+    def delete_instance(self, instance):
+        if instance.instance_id:
+            self.driver.destroy_instance(instance.instance_id) 
+    
+
+    def refresh_nodes(self):
+        # collect local nodes
+        nodes = Node.objects.all()
+        nodes_dict = {}
+        for node in nodes:
+            if 'viccidev10' not in node.name:
+                nodes_dict[node.name] = node 
+        
+        deployment = Deployment.objects.filter(name='VICCI')[0]
+        login_bases = ['princeton', 'stanford', 'gt', 'uw', 'mpisws']
+        sites = Site.objects.filter(login_base__in=login_bases)
+        # collect nova nodes:
+        compute_nodes = self.client.nova.hypervisors.list()
+
+        compute_nodes_dict = {}
+        for compute_node in compute_nodes:
+            compute_nodes_dict[compute_node.hypervisor_hostname] = compute_node
+
+        # add new nodes:
+        new_node_names = set(compute_nodes_dict.keys()).difference(nodes_dict.keys())
+        i = 0
+        max = len(sites)
+        for name in new_node_names:
+            if i == max:
+                i = 0
+            site = sites[i]
+            node = Node(name=compute_nodes_dict[name].hypervisor_hostname,
+                        site=site,
+                        deployment=deployment)
+            node.save()
+            i+=1
+
+        # remove old nodes
+        old_node_names = set(nodes_dict.keys()).difference(compute_nodes_dict.keys())
+        Node.objects.filter(name__in=old_node_names).delete()
+
+    def refresh_images(self):
+        from core.models.image import Image
+        # collect local images
+        images = Image.objects.all()
+        images_dict = {}
+        for image in images:
+            images_dict[image.name] = image
+
+        # collect glance images
+        glance_images = self.client.glance.get_images()
+        glance_images_dict = {}
+        for glance_image in glance_images:
+            glance_images_dict[glance_image['name']] = glance_image
+
+        # add new images
+        new_image_names = set(glance_images_dict.keys()).difference(images_dict.keys())
+        for name in new_image_names:
+            image = Image(image_id=glance_images_dict[name]['id'],
+                          name=glance_images_dict[name]['name'],
+                          disk_format=glance_images_dict[name]['disk_format'],
+                          container_format=glance_images_dict[name]['container_format'])
+            image.save()
+
+        # remove old images
+        old_image_names = set(images_dict.keys()).difference(glance_images_dict.keys())
+        Image.objects.filter(name__in=old_image_names).delete()
+
+    @require_enabled
+    def save_network(self, network):
+        if not network.network_id:
+            if network.template.shared_network_name:
+                network.network_id = network.template.shared_network_id
+                (network.subnet_id, network.subnet) = self.driver.get_network_subnet(network.network_id)
+            else:
+                network_name = network.name
+
+                # create network
+                os_network = self.driver.create_network(network_name, shared=True)
+                network.network_id = os_network['id']
+
+                # create router
+                router = self.driver.create_router(network_name)
+                network.router_id = router['id']
+
+                # create subnet
+                next_subnet = self.get_next_subnet()
+                cidr = str(next_subnet.cidr)
+                ip_version = next_subnet.version
+                start = str(next_subnet[2])
+                end = str(next_subnet[-2])
+                subnet = self.driver.create_subnet(name=network_name,
+                                                   network_id = network.network_id,
+                                                   cidr_ip = cidr,
+                                                   ip_version = ip_version,
+                                                   start = start,
+                                                   end = end)
+                network.subnet = cidr
+                network.subnet_id = subnet['id']
+                # add subnet as interface to slice's router
+                self.driver.add_router_interface(router['id'], subnet['id'])
+                # add external route
+                self.driver.add_external_route(subnet)
+
+        network.save()
+        network.enacted = timezone.now()
+        network.save(update_fields=['enacted'])
+
+    def delete_network(self, network):
+        if (network.router_id) and (network.subnet_id):
+            self.driver.delete_router_interface(network.router_id, network.subnet_id)
+        if network.subnet_id:
+            self.driver.delete_subnet(network.subnet_id)
+        if network.router_id:
+            self.driver.delete_router(network.router_id)
+        if network.network_id:
+            self.driver.delete_network(network.network_id)
+
+    def save_network_template(self, template):
+        if (template.shared_network_name) and (not template.shared_network_id):
+            os_networks = self.driver.shell.neutron.list_networks(name=template.shared_network_name)['networks']
+            if os_networks:
+                template.shared_network_id = os_networks[0]["id"]
+
+        template.save()
+        template.enacted = timezone.now()
+        template.save(update_fields=['enacted'])
+
+    def find_or_make_template_for_network(self, name):
+        """ Given a network name, try to guess the right template for it """
+
+        # templates for networks we may encounter
+        if name=='nat-net':
+            template_dict = None # {"name": "private-nat", "visibility": "private", "translation": "nat"}
+        elif name=='sharednet1':
+            template_dict = {"name": "dedicated-public", "visibility": "public", "translation": "none"}
+        else:
+            template_dict = {"name": "private", "visibility": "private", "translation": "none"}
+
+        # if we have an existing template return it
+        templates = NetworkTemplate.objects.filter(name=template_dict["name"])
+        if templates:
+            return templates[0]
+
+        if template_dict == None:
+            return None
+
+        template = NetworkTemplate(**template_dict)
+        template.save()
+        return template
+
+    def refresh_network_templates(self):
+        for template in NetworkTemplate.objects.all():
+            if (template.shared_network_name) and (not template.shared_network_id):
+                 # this will cause us to try to fill in the shared_network_id
+                 self.save_network_template(template)
+
+    def refresh_networks(self):
+        # get a list of all networks in the model
+
+        networks = Network.objects.all()
+        networks_by_name = {}
+        networks_by_id = {}
+        for network in networks:
+            networks_by_name[network.name] = network
+            networks_by_id[network.network_id] = network
+
+        # Get a list of all shared networks in OS
+
+        os_networks = self.driver.shell.neutron.list_networks()['networks']
+        os_networks_by_name = {}
+        os_networks_by_id = {}
+        for os_network in os_networks:
+            os_networks_by_name[os_network['name']] = os_network
+            os_networks_by_id[os_network['id']] = os_network
+
+        for (uuid, os_network) in os_networks_by_id.items():
+            #print "checking OS network", os_network['name']
+            if (os_network['shared']) and (uuid not in networks_by_id):
+                # Only automatically create shared networks. This is for Andy's
+                # nat-net and sharednet1.
+
+                owner_slice = Slice.objects.get(tenant_id = os_network['tenant_id'])
+                template = self.find_or_make_template_for_network(os_network['name'])
+
+                if (template is None):
+                    # This is our way of saying we don't want to auto-instantiate
+                    # this network type.
+                    continue
+
+                (subnet_id, subnet) = self.driver.get_network_subnet(os_network['id'])
+
+                if owner_slice:
+                    #print "creating model object for OS network", os_network['name']
+                    new_network = Network(name = os_network['name'],
+                                          template = template,
+                                          owner = owner_slice,
+                                          network_id = uuid,
+                                          subnet_id = subnet_id)
+                    new_network.save()
+
+        for (network_id, network) in networks_by_id.items():
+            # If the network disappeared from OS, then reset its network_id to None
+            if (network.network_id is not None) and (network.network_id not in os_networks_by_id):
+                network.network_id = None
+
+            # If no OS object exists, then saving the network will create one
+            if (network.network_id is None):
+                #print "creating OS network for", network.name
+                self.save_network(network)
+            else:
+                pass #print "network", network.name, "has its OS object"
+
+
diff --git a/xos/synchronizer/model_policies/__init__.py b/xos/synchronizer/model_policies/__init__.py
new file mode 100644
index 0000000..36c6e25
--- /dev/null
+++ b/xos/synchronizer/model_policies/__init__.py
@@ -0,0 +1,12 @@
+from .model_policy_Slice import *
+from .model_policy_Instance import *
+from .model_policy_User import *
+from .model_policy_Network import *
+from .model_policy_Site import *
+from .model_policy_SitePrivilege import *
+from .model_policy_SlicePrivilege import *
+from .model_policy_ControllerSlice import *
+from .model_policy_ControllerSite import *
+from .model_policy_ControllerUser import *
+from .model_policy_Controller import *
+from .model_policy_Image import *
diff --git a/xos/synchronizer/model_policies/model_policy_Controller.py b/xos/synchronizer/model_policies/model_policy_Controller.py
new file mode 100644
index 0000000..c62b612
--- /dev/null
+++ b/xos/synchronizer/model_policies/model_policy_Controller.py
@@ -0,0 +1,62 @@
+
+def handle(controller):
+    from core.models import Controller, Site, ControllerSite, Slice, ControllerSlice, User, ControllerUser, ControllerImages, ControllerNetwork, Image, Network
+    from collections import defaultdict
+
+    # relations for all sites
+    ctrls_by_site = defaultdict(list)
+    ctrl_sites = ControllerSite.objects.all()
+    for ctrl_site in ctrl_sites:
+        ctrls_by_site[ctrl_site.site].append(ctrl_site.controller)
+    sites = Site.objects.all()
+    for site in sites:
+        if site not in ctrls_by_site or \
+            controller not in ctrls_by_site[site]:
+            controller_site = ControllerSite(controller=controller, site=site)
+            controller_site.save()
+    # relations for all slices
+    ctrls_by_slice = defaultdict(list)
+    ctrl_slices = ControllerSlice.objects.all()
+    for ctrl_slice in ctrl_slices:
+        ctrls_by_slice[ctrl_slice.slice].append(ctrl_slice.controller)
+    slices = Slice.objects.all()
+    for slice in slices:
+        if slice not in ctrls_by_slice or \
+            controller not in ctrls_by_slice[slice]:
+            controller_slice = ControllerSlice(controller=controller, slice=slice)
+            controller_slice.save()
+    # relations for all users
+    ctrls_by_user = defaultdict(list)
+    ctrl_users = ControllerUser.objects.all()
+    for ctrl_user in ctrl_users:
+        ctrls_by_user[ctrl_user.user].append(ctrl_user.controller)
+    users = User.objects.all()
+    for user in users:
+        if user not in ctrls_by_user or \
+            controller not in ctrls_by_user[user]:
+            controller_user = ControllerUser(controller=controller, user=user)
+            controller_user.save()
+    # relations for all networks
+    ctrls_by_network = defaultdict(list)
+    ctrl_networks = ControllerNetwork.objects.all()
+    for ctrl_network in ctrl_networks:
+        ctrls_by_network[ctrl_network.network].append(ctrl_network.controller)
+    networks = Network.objects.all()
+    for network in networks:
+        if network not in ctrls_by_network or \
+            controller not in ctrls_by_network[network]:
+            controller_network = ControllerNetwork(controller=controller, network=network)
+            if network.subnet and network.subnet.strip():
+                controller_network.subnet = network.subnet.strip()
+            controller_network.save()
+    # relations for all images
+    ctrls_by_image = defaultdict(list)
+    ctrl_images = ControllerImages.objects.all()
+    for ctrl_image in ctrl_images:
+        ctrls_by_image[ctrl_image.image].append(ctrl_image.controller)
+    images = Image.objects.all()
+    for image in images:
+        if image not in ctrls_by_image or \
+            controller not in ctrls_by_image[image]:
+            controller_image = ControllerImages(controller=controller, image=image)
+            controller_image.save()
diff --git a/xos/synchronizer/model_policies/model_policy_ControllerSite.py b/xos/synchronizer/model_policies/model_policy_ControllerSite.py
new file mode 100644
index 0000000..4b76080
--- /dev/null
+++ b/xos/synchronizer/model_policies/model_policy_ControllerSite.py
@@ -0,0 +1,16 @@
+def handle(controller_site):
+    from core.models import ControllerSite, Site
+   
+    try:
+        my_status_code = int(controller_site.backend_status[0])
+        try:
+            his_status_code = int(controller_site.site.backend_status[0])
+        except:
+            his_status_code = 0
+ 
+        if (my_status_code not in [0,his_status_code]):
+            controller_site.site.backend_status = controller_site.backend_status
+            controller_site.site.save(update_fields = ['backend_status'])
+    except Exception,e:
+        print str(e)	
+        pass
diff --git a/xos/synchronizer/model_policies/model_policy_ControllerSlice.py b/xos/synchronizer/model_policies/model_policy_ControllerSlice.py
new file mode 100644
index 0000000..bfe7995
--- /dev/null
+++ b/xos/synchronizer/model_policies/model_policy_ControllerSlice.py
@@ -0,0 +1,25 @@
+def handle(controller_slice):
+    from core.models import ControllerSlice, Slice
+   
+    try:
+        my_status_code = int(controller_slice.backend_status[0])
+        try:
+            his_status_code = int(controller_slice.slice.backend_status[0])
+        except:
+            his_status_code = 0
+ 
+        fields = []
+        if (my_status_code not in [0,his_status_code]):
+            controller_slice.slice.backend_status = controller_slice.backend_status
+            fields+=['backend_status']
+
+        if (controller_slice.backend_register != controller_slice.slice.backend_register):
+            controller_slice.slice.backend_register = controller_slice.backend_register
+            fields+=['backend_register']
+
+        controller_slice.slice.save(update_fields = fields)
+
+        
+    except Exception,e:
+        print str(e)	
+        pass
diff --git a/xos/synchronizer/model_policies/model_policy_ControllerUser.py b/xos/synchronizer/model_policies/model_policy_ControllerUser.py
new file mode 100644
index 0000000..b69c9b8
--- /dev/null
+++ b/xos/synchronizer/model_policies/model_policy_ControllerUser.py
@@ -0,0 +1,16 @@
+def handle(controller_user):
+    from core.models import ControllerUser, User
+   
+    try:
+        my_status_code = int(controller_user.backend_status[0])
+        try:
+            his_status_code = int(controller_user.user.backend_status[0])
+        except:
+            his_status_code = 0
+ 
+        if (my_status_code not in [0,his_status_code]):
+            controller_user.user.backend_status = controller_user.backend_status
+            controller_user.user.save(update_fields = ['backend_status'])
+    except Exception,e:
+        print str(e)	
+        pass
diff --git a/xos/synchronizer/model_policies/model_policy_Image.py b/xos/synchronizer/model_policies/model_policy_Image.py
new file mode 100644
index 0000000..c77d5bb
--- /dev/null
+++ b/xos/synchronizer/model_policies/model_policy_Image.py
@@ -0,0 +1,17 @@
+def handle(image):
+    from core.models import Controller, ControllerImages, Image
+    from collections import defaultdict
+
+    if (image.kind == "container"):
+        # container images do not get instantiated
+        return
+
+    controller_images = ControllerImages.objects.filter(image=image)
+    existing_controllers = [cs.controller for cs in controller_images] 
+    
+    all_controllers = Controller.objects.all() 
+    for controller in all_controllers:
+        if controller not in existing_controllers:
+            sd = ControllerImages(image=image, controller=controller)
+            sd.save()
+
diff --git a/xos/synchronizer/model_policies/model_policy_Instance.py b/xos/synchronizer/model_policies/model_policy_Instance.py
new file mode 100644
index 0000000..dd1a8d5
--- /dev/null
+++ b/xos/synchronizer/model_policies/model_policy_Instance.py
@@ -0,0 +1,58 @@
+def handle_container_on_metal(instance):
+        from core.models import Instance, Flavor, Port, Image
+
+        print "MODEL POLICY: instance", instance, "handle container_on_metal"
+
+        if instance.deleted:
+            return
+
+        if (instance.isolation in ["container"]) and (instance.slice.network not in ["host", "bridged"]):
+            # Our current docker-on-metal network strategy requires that there be some
+            # VM on the server that connects to the networks, so that
+            # the containers can piggyback off of that configuration.
+            if not Instance.objects.filter(slice=instance.slice, node=instance.node, isolation="vm").exists():
+                flavors = Flavor.objects.filter(name="m1.small")
+                if not flavors:
+                    raise XOSConfigurationError("No m1.small flavor")
+
+                images = Image.objects.filter(kind="vm")
+
+                companion_instance = Instance(slice = instance.slice,
+                                node = instance.node,
+                                image = images[0],
+                                creator = instance.creator,
+                                deployment = instance.node.site_deployment.deployment,
+                                flavor = flavors[0])
+                companion_instance.save()
+
+                print "MODEL POLICY: instance", instance, "created companion", companion_instance
+
+        # Add the ports for the container
+        for network in instance.slice.networks.all():
+            # hmmm... The NAT ports never become ready, because sync_ports never
+            # instantiates them. Need to think about this.
+            print "MODEL POLICY: instance", instance, "handling network", network
+            if (network.name.endswith("-nat")):
+                continue
+
+            if not Port.objects.filter(network=network, instance=instance).exists():
+                port = Port(network = network, instance=instance)
+                port.save()
+                print "MODEL POLICY: instance", instance, "created port", port
+
+def handle(instance):
+    from core.models import Controller, ControllerSlice, ControllerNetwork, NetworkSlice
+
+    networks = [ns.network for ns in NetworkSlice.objects.filter(slice=instance.slice)]
+    controller_networks = ControllerNetwork.objects.filter(network__in=networks,
+                                                                controller=instance.node.site_deployment.controller)
+
+    for cn in controller_networks:
+        if (cn.lazy_blocked):
+                print "MODEL POLICY: instance", instance, "unblocking network", cn.network
+		cn.lazy_blocked=False
+		cn.backend_register = '{}'
+		cn.save()
+
+    if (instance.isolation in ["container", "container_vm"]):
+        handle_container_on_metal(instance)
diff --git a/xos/synchronizer/model_policies/model_policy_Network.py b/xos/synchronizer/model_policies/model_policy_Network.py
new file mode 100644
index 0000000..06347c5
--- /dev/null
+++ b/xos/synchronizer/model_policies/model_policy_Network.py
@@ -0,0 +1,41 @@
+from core.models import *
+
+def handle(network):
+	from core.models import ControllerSlice,ControllerNetwork, Network
+	from collections import defaultdict
+
+        print "MODEL POLICY: network", network
+
+        # network = Network.get(network_id)
+	# network controllers are not visible to users. We must ensure
+	# networks are deployed at all deploymets available to their slices.
+	slice_controllers = ControllerSlice.objects.all()
+	slice_deploy_lookup = defaultdict(list)
+	for slice_controller in slice_controllers:
+		slice_deploy_lookup[slice_controller.slice].append(slice_controller.controller)
+
+	network_controllers = ControllerNetwork.objects.all()
+	network_deploy_lookup = defaultdict(list)
+	for network_controller in network_controllers:
+		network_deploy_lookup[network_controller.network].append(network_controller.controller)
+
+	expected_controllers = slice_deploy_lookup[network.owner]
+	for expected_controller in expected_controllers:
+		if network not in network_deploy_lookup or \
+		  expected_controller not in network_deploy_lookup[network]:
+                        lazy_blocked=True
+
+                        # check and see if some instance already exists
+                        for networkslice in network.networkslices.all():
+                            if networkslice.slice.instances.filter(node__site_deployment__controller=expected_controller).exists():
+                               print "MODEL_POLICY: network, setting lazy_blocked to false because instance on controller already exists"
+                               lazy_blocked=False
+
+			nd = ControllerNetwork(network=network, controller=expected_controller, lazy_blocked=lazy_blocked)
+                        print "MODEL POLICY: network", network, "create ControllerNetwork", nd, "lazy_blocked", lazy_blocked
+                        if network.subnet:
+                            # XXX: Possibly unpredictable behavior if there is
+                            # more than one ControllerNetwork and the subnet
+                            # is specified.
+                            nd.subnet = network.subnet
+			nd.save()
diff --git a/xos/synchronizer/model_policies/model_policy_Site.py b/xos/synchronizer/model_policies/model_policy_Site.py
new file mode 100644
index 0000000..23010a2
--- /dev/null
+++ b/xos/synchronizer/model_policies/model_policy_Site.py
@@ -0,0 +1,14 @@
+
+def handle(site):
+    from core.models import Controller, ControllerSite, Site 
+
+    # site = Site.get(site_id)
+    # make sure site has a ControllerSite record for each controller
+    ctrl_sites = ControllerSite.objects.filter(site=site)
+    existing_controllers = [cs.controller for cs in ctrl_sites]
+
+    all_controllers = Controller.objects.all()
+    for ctrl in all_controllers:
+        if ctrl not in existing_controllers:
+            ctrl_site = ControllerSite(controller=ctrl, site=site)
+            ctrl_site.save() 
diff --git a/xos/synchronizer/model_policies/model_policy_SitePrivilege.py b/xos/synchronizer/model_policies/model_policy_SitePrivilege.py
new file mode 100644
index 0000000..d9c6a1e
--- /dev/null
+++ b/xos/synchronizer/model_policies/model_policy_SitePrivilege.py
@@ -0,0 +1,15 @@
+def handle(site_privilege):
+    from core.models import Controller, SitePrivilege, ControllerSitePrivilege
+    
+    # site_privilege = SitePrivilege.get(site_privilege_id)
+    # apply site privilage at all controllers
+    controller_site_privileges = ControllerSitePrivilege.objects.filter(
+        site_privilege = site_privilege,
+        )
+    existing_controllers = [sp.controller for sp in controller_site_privileges]
+    all_controllers = Controller.objects.all()
+    for controller in all_controllers:
+        if controller not in existing_controllers:
+            ctrl_site_priv = ControllerSitePrivilege(controller=controller, site_privilege=site_privilege)
+            ctrl_site_priv.save()  
+
diff --git a/xos/synchronizer/model_policies/model_policy_Slice.py b/xos/synchronizer/model_policies/model_policy_Slice.py
new file mode 100644
index 0000000..088d583
--- /dev/null
+++ b/xos/synchronizer/model_policies/model_policy_Slice.py
@@ -0,0 +1,102 @@
+from xos.config import Config
+
+def handle_delete(slice):
+    from core.models import Controller, ControllerSlice, SiteDeployment, Network, NetworkSlice,NetworkTemplate, Slice
+    from collections import defaultdict
+
+    public_nets = []
+    private_net = None
+    networks = Network.objects.filter(owner=slice)
+
+    for n in networks:
+        n.delete()	
+    
+    # Note that sliceprivileges and slicecontrollers are autodeleted, through the dependency graph
+
+def handle(slice):
+    from core.models import Controller, ControllerSlice, SiteDeployment, Network, NetworkSlice,NetworkTemplate, Slice
+    from collections import defaultdict
+
+    # only create nat_net if not using VTN
+    support_nat_net = not getattr(Config(), "networking_use_vtn", False)
+
+    print "MODEL POLICY: slice", slice
+
+    # slice = Slice.get(slice_id)
+
+    controller_slices = ControllerSlice.objects.filter(slice=slice)
+    existing_controllers = [cs.controller for cs in controller_slices] 
+        
+    print "MODEL POLICY: slice existing_controllers=", existing_controllers
+
+    all_controllers = Controller.objects.all()
+    for controller in all_controllers:
+        if controller not in existing_controllers:
+            print "MODEL POLICY: slice adding controller", controller
+            sd = ControllerSlice(slice=slice, controller=controller)
+            sd.save()
+
+    if slice.network in ["host", "bridged"]:
+        # Host and Bridged docker containers need no networks and they will
+        # only get in the way.
+        print "MODEL POLICY: Skipping network creation"
+    elif slice.network in ["noauto"]:
+        # do nothing
+        pass
+    else:
+        # make sure slice has at least 1 public and 1 private networkd
+        public_nets = []
+        private_nets = []
+        networks = Network.objects.filter(owner=slice)
+        for network in networks:
+            if not network.autoconnect:
+                continue
+            if network.template.name == 'Public dedicated IPv4':
+                public_nets.append(network)
+            elif network.template.name == 'Public shared IPv4':
+                public_nets.append(network)
+            elif network.template.name == 'Private':
+                private_nets.append(network)
+        if support_nat_net and (not public_nets):
+            # ensure there is at least one public network, and default it to dedicated
+            nat_net = Network(
+                    name = slice.name+'-nat',
+                        template = NetworkTemplate.objects.get(name='Public shared IPv4'),
+                    owner = slice
+                    )
+            if slice.exposed_ports:
+                nat_net.ports = slice.exposed_ports
+            nat_net.save()
+            public_nets.append(nat_net)
+            print "MODEL POLICY: slice", slice, "made nat-net"
+
+        if not private_nets:
+            private_net = Network(
+                name = slice.name+'-private',
+                template = NetworkTemplate.objects.get(name='Private'),
+                owner = slice
+            )
+            private_net.save()
+            print "MODEL POLICY: slice", slice, "made private net"
+            private_nets = [private_net]
+        # create slice networks
+        public_net_slice = None
+        private_net_slice = None
+        net_slices = NetworkSlice.objects.filter(slice=slice, network__in=private_nets+public_nets)
+        for net_slice in net_slices:
+            if net_slice.network in public_nets:
+                public_net_slice = net_slice
+            elif net_slice.network in private_nets:
+                private_net_slice = net_slice
+        if support_nat_net and (not public_net_slice):
+            public_net_slice = NetworkSlice(slice=slice, network=public_nets[0])
+            public_net_slice.save()
+            print "MODEL POLICY: slice", slice, "made public_net_slice"
+        if not private_net_slice:
+            private_net_slice = NetworkSlice(slice=slice, network=private_nets[0])
+            private_net_slice.save()
+            print "MODEL POLICY: slice", slice, "made private_net_slice"
+
+    print "MODEL POLICY: slice", slice, "DONE"
+
+
diff --git a/xos/synchronizer/model_policies/model_policy_SlicePrivilege.py b/xos/synchronizer/model_policies/model_policy_SlicePrivilege.py
new file mode 100644
index 0000000..bca7f22
--- /dev/null
+++ b/xos/synchronizer/model_policies/model_policy_SlicePrivilege.py
@@ -0,0 +1,15 @@
+def handle(slice_privilege):
+    from core.models import Controller, SlicePrivilege, ControllerSlicePrivilege
+   
+    # slice_privilege = SlicePrivilege.get(slice_privilege_id) 
+    # apply slice privilage at all controllers
+    controller_slice_privileges = ControllerSlicePrivilege.objects.filter(
+        slice_privilege = slice_privilege,
+        )
+    existing_controllers = [sp.controller for sp in controller_slice_privileges]
+    all_controllers = Controller.objects.all()
+    for controller in all_controllers:
+        if controller not in existing_controllers:
+            ctrl_slice_priv = ControllerSlicePrivilege(controller=controller, slice_privilege=slice_privilege)
+            ctrl_slice_priv.save()  
+
diff --git a/xos/synchronizer/model_policies/model_policy_Sliver.py b/xos/synchronizer/model_policies/model_policy_Sliver.py
new file mode 100644
index 0000000..a13428d
--- /dev/null
+++ b/xos/synchronizer/model_policies/model_policy_Sliver.py
@@ -0,0 +1,13 @@
+
+def handle(instance):
+    from core.models import Controller, ControllerSlice, ControllerNetwork, NetworkSlice
+
+    networks = [ns.network for ns in NetworkSlice.objects.filter(slice=instance.slice)]
+    controller_networks = ControllerNetwork.objects.filter(network__in=networks,
+                                                                controller=instance.node.site_deployment.controller)
+
+    for cn in controller_networks:
+        if (cn.lazy_blocked):	
+		cn.lazy_blocked=False
+		cn.backend_register = '{}'
+		cn.save()
diff --git a/xos/synchronizer/model_policies/model_policy_User.py b/xos/synchronizer/model_policies/model_policy_User.py
new file mode 100644
index 0000000..8d14244
--- /dev/null
+++ b/xos/synchronizer/model_policies/model_policy_User.py
@@ -0,0 +1,14 @@
+def handle(user):
+    from core.models import Controller, ControllerSite, ControllerUser, User
+    from collections import defaultdict
+
+    # user = User.get(user_id)
+    
+    controller_users = ControllerUser.objects.filter(user=user)
+    existing_controllers = [cu.controller for cu in controller_users]
+    all_controllers = Controller.objects.all()
+    for controller in all_controllers:
+        if controller not in existing_controllers:
+            ctrl_user = ControllerUser(controller=controller, user=user)
+            ctrl_user.save()  
+
diff --git a/xos/synchronizer/openstacksyncstep.py b/xos/synchronizer/openstacksyncstep.py
new file mode 100644
index 0000000..46056cf
--- /dev/null
+++ b/xos/synchronizer/openstacksyncstep.py
@@ -0,0 +1,14 @@
+import os
+import base64
+from synchronizers.base.syncstep import SyncStep
+
+class OpenStackSyncStep(SyncStep):
+    """ XOS Sync step for copying data to OpenStack 
+    """ 
+    
+    def __init__(self, **args):
+        SyncStep.__init__(self, **args)
+        return
+
+    def __call__(self, **args):
+        return self.call(**args)
diff --git a/xos/synchronizer/steps/__init__.py b/xos/synchronizer/steps/__init__.py
new file mode 100644
index 0000000..c70b0c0
--- /dev/null
+++ b/xos/synchronizer/steps/__init__.py
@@ -0,0 +1,6 @@
+#from .sync_controller_sites import SyncControllerSites
+#from .sync_controller_slices import SyncControllerSlices
+#from .sync_controller_users import SyncControllerUsers
+#from .sync_controller_site_privileges import SyncControllerSitePrivileges
+#from .sync_controller_slice_privileges import SyncControllerSlicePrivileges
+#from .sync_controller_networks import SyncControllerNetworks
diff --git a/xos/synchronizer/steps/delete_slivers.yaml b/xos/synchronizer/steps/delete_slivers.yaml
new file mode 100644
index 0000000..fa6b879
--- /dev/null
+++ b/xos/synchronizer/steps/delete_slivers.yaml
@@ -0,0 +1,8 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+
+  - nova_compute:
+      state: absent
+      name: {{ name }}
diff --git a/xos/synchronizer/steps/purge_disabled_users.py b/xos/synchronizer/steps/purge_disabled_users.py
new file mode 100644
index 0000000..6b1dac3
--- /dev/null
+++ b/xos/synchronizer/steps/purge_disabled_users.py
@@ -0,0 +1,25 @@
+import os
+import base64
+import datetime
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from core.models.user import User
+from xos.logger import observer_logger as logger
+
+#class SyncRoles(OpenStackSyncStep):
+#    provides=[User]
+#    requested_interval=0
+#    observes=User
+#
+#    def fetch_pending(self, deleted):
+#        if (deleted):
+#            # users marked as deleted
+#            return User.deleted_objects.all()
+#        else:
+#            # disabled users that haven't been updated in over a week
+#            one_week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
+#            return User.objects.filter(is_active=False, updated__gt=one_week_ago)
+#
+#    def sync_record(self, user):
+#        user.delete()
diff --git a/xos/synchronizer/steps/sliver.yaml b/xos/synchronizer/steps/sliver.yaml
new file mode 100644
index 0000000..e630415
--- /dev/null
+++ b/xos/synchronizer/steps/sliver.yaml
@@ -0,0 +1,17 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  - nova_compute:
+       state: present
+       auth_url: http://172.31.38.128:5000/v2.0/
+       login_username: admin
+       login_password: 6a789bf69dd647e2
+       login_tenant_name: admin
+       name: gloopy
+       image_id: 3ee851df-b35a-41c5-8551-f681e7209095
+       key_name: boo
+       wait_for: 200
+       flavor_id: 3
+       nics:
+         - net-id: d1de537b-80dc-4c1b-aa5f-4a197b33b5f6
diff --git a/xos/synchronizer/steps/sync_container.py b/xos/synchronizer/steps/sync_container.py
new file mode 100644
index 0000000..41e1305
--- /dev/null
+++ b/xos/synchronizer/steps/sync_container.py
@@ -0,0 +1,162 @@
+import hashlib
+import os
+import socket
+import sys
+import base64
+import time
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+from synchronizers.base.syncstep import SyncStep, DeferredException
+from synchronizers.base.ansible import run_template_ssh
+from core.models import Service, Slice, Instance
+from xos.logger import Logger, logging
+
+# hpclibrary will be in steps/..
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+logger = Logger(level=logging.INFO)
+
+class SyncContainer(SyncInstanceUsingAnsible):
+    provides=[Instance]
+    observes=Instance
+    requested_interval=0
+    template_name = "sync_container.yaml"
+
+    def __init__(self, *args, **kwargs):
+        super(SyncContainer, self).__init__(*args, **kwargs)
+
+    def fetch_pending(self, deletion=False):
+        objs = super(SyncContainer, self).fetch_pending(deletion)
+        objs = [x for x in objs if x.isolation in ["container", "container_vm"]]
+        return objs
+
+    def get_instance_port(self, container_port):
+        for p in container_port.network.links.all():
+            if (p.instance) and (p.instance.isolation=="vm") and (p.instance.node == container_port.instance.node) and (p.mac):
+                return p
+        return None
+
+    def get_parent_port_mac(self, instance, port):
+        if not instance.parent:
+            raise Exception("instance has no parent")
+        for parent_port in instance.parent.ports.all():
+            if parent_port.network == port.network:
+                if not parent_port.mac:
+                     raise DeferredException("parent port on network %s does not have mac yet" % parent_port.network.name)
+                return parent_port.mac
+        raise Exception("failed to find corresponding parent port for network %s" % port.network.name)
+
+    def get_ports(self, o):
+        i=0
+        ports = []
+        if (o.slice.network in ["host", "bridged"]):
+            pass # no ports in host or bridged mode
+        else:
+            for port in o.ports.all():
+                if (not port.ip):
+                    # 'unmanaged' ports may have an ip, but no mac
+                    # XXX: are there any ports that have a mac but no ip?
+                    raise DeferredException("Port on network %s is not yet ready" % port.network.name)
+
+                pd={}
+                pd["mac"] = port.mac or ""
+                pd["ip"] = port.ip or ""
+                pd["xos_network_id"] = port.network.id
+
+                if port.network.name == "wan_network":
+                    if port.ip:
+                        (a, b, c, d) = port.ip.split('.')
+                        pd["mac"] = "02:42:%02x:%02x:%02x:%02x" % (int(a), int(b), int(c), int(d))
+
+
+                if o.isolation == "container":
+                    # container on bare metal
+                    instance_port = self.get_instance_port(port)
+                    if not instance_port:
+                        raise DeferredException("No instance on slice for port on network %s" % port.network.name)
+
+                    pd["snoop_instance_mac"] = instance_port.mac
+                    pd["snoop_instance_id"] = instance_port.instance.instance_id
+                    pd["src_device"] = ""
+                    pd["bridge"] = "br-int"
+                else:
+                    # container in VM
+                    pd["snoop_instance_mac"] = ""
+                    pd["snoop_instance_id"] = ""
+                    pd["parent_mac"] = self.get_parent_port_mac(o, port)
+                    pd["bridge"] = ""
+
+                for (k,v) in port.get_parameters().items():
+                    pd[k] = v
+
+                ports.append(pd)
+
+            # for any ports that don't have a device, assign one
+            used_ports = [x["device"] for x in ports if ("device" in x)]
+            avail_ports = ["eth%d"%i for i in range(0,64) if ("eth%d"%i not in used_ports)]
+            for port in ports:
+                if not port.get("device",None):
+                    port["device"] = avail_ports.pop(0)
+
+        return ports
+
+    def get_extra_attributes(self, o):
+        fields={}
+        fields["ansible_tag"] = "container-%s" % str(o.id)
+        if o.image.tag:
+            fields["docker_image"] = o.image.path + ":" + o.image.tag
+        else:
+            fields["docker_image"] = o.image.path
+        fields["ports"] = self.get_ports(o)
+        if o.volumes:
+            fields["volumes"] = [x.strip() for x in o.volumes.split(",")]
+        else:
+            fields["volumes"] = ""
+        fields["network_method"] = o.slice.network or "default"
+        return fields
+
+    def sync_record(self, o):
+        logger.info("sync'ing object %s" % str(o),extra=o.tologdict())
+
+        fields = self.get_ansible_fields(o)
+
+        # If 'o' defines a 'sync_attributes' list, then we'll copy those
+        # attributes into the Ansible recipe's field list automatically.
+        if hasattr(o, "sync_attributes"):
+            for attribute_name in o.sync_attributes:
+                fields[attribute_name] = getattr(o, attribute_name)
+
+        fields.update(self.get_extra_attributes(o))
+
+        self.run_playbook(o, fields)
+
+        o.instance_id = fields["container_name"]
+        o.instance_name = fields["container_name"]
+
+        o.save()
+
+    def delete_record(self, o):
+        logger.info("delete'ing object %s" % str(o),extra=o.tologdict())
+
+        fields = self.get_ansible_fields(o)
+
+        # If 'o' defines a 'sync_attributes' list, then we'll copy those
+        # attributes into the Ansible recipe's field list automatically.
+        if hasattr(o, "sync_attributes"):
+            for attribute_name in o.sync_attributes:
+                fields[attribute_name] = getattr(o, attribute_name)
+
+        fields.update(self.get_extra_attributes(o))
+
+        self.run_playbook(o, fields, "teardown_container.yaml")
+
+    def run_playbook(self, o, fields, template_name=None):
+        if not template_name:
+            template_name = self.template_name
+        tStart = time.time()
+        run_template_ssh(template_name, fields, path="container")
+        logger.info("playbook execution time %d" % int(time.time()-tStart),extra=o.tologdict())
+
+
diff --git a/xos/synchronizer/steps/sync_container.yaml b/xos/synchronizer/steps/sync_container.yaml
new file mode 100644
index 0000000..4ae4eb2
--- /dev/null
+++ b/xos/synchronizer/steps/sync_container.yaml
@@ -0,0 +1,124 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: {{ username }}
+  sudo: yes
+
+  vars:
+    container_name: {{ container_name }}
+    docker_image: {{ docker_image }}
+    network_method: {{ network_method }}
+    ports:
+    {% for port in ports %}
+       - device: {{ port.device }}
+         xos_network_id: {{ port.xos_network_id }}
+         mac: {{ port.mac|default("") }}
+         ip: {{ port.ip }}
+         snoop_instance_mac: {{ port.snoop_instance_mac }}
+         snoop_instance_id: {{ port.snoop_instance_id }}
+         parent_mac: {{ port.parent_mac|default("") }}
+         s_tag: {{ port.s_tag|default("")  }}
+         c_tag: {{ port.c_tag|default("") }}
+         next_hop: {{ port.next_hop|default("") }}
+         bridge: {{ port.bridge }}
+    {% endfor %}
+    volumes:
+    {% for volume in volumes %}
+       - {{ volume }}
+    {% endfor %}
+
+  tasks:
+
+#  - name: Fix /etc/hosts
+#    lineinfile:
+#      dest=/etc/hosts
+#      regexp="127.0.0.1 localhost"
+#      line="127.0.0.1 localhost {{ instance_hostname }}"
+
+  - name: Add repo key
+    apt_key:
+      keyserver=hkp://pgp.mit.edu:80
+      id=58118E89F3A912897C070ADBF76221572C52609D
+
+  - name: Install Docker repo
+    apt_repository:
+      repo="deb https://apt.dockerproject.org/repo ubuntu-trusty main"
+      state=present
+
+  - name: Install Docker
+    apt:
+      name={{ '{{' }} item {{ '}}' }}
+      state=latest
+      update_cache=yes
+    with_items:
+# XXX docker 1.10 is not working on cloudlab
+#    - docker-engine
+    - python-pip
+    - python-httplib2
+
+  - name: Install Docker 1.9.1
+    apt:
+      name={{ '{{' }} item {{ '}}' }}
+      update_cache=yes
+    with_items:
+    - docker-engine=1.9.1-0~trusty
+
+  # Something is installing a requests library that is incompative with pip, and
+  # will cause this recipe to fail next time it tries to run pip. Only the one
+  # in /usr/local/lib is bad. There's still a good one in /usr/lib
+  - name: check if bad requests library installed
+    stat: path=/usr/local/lib/python2.7/dist-packages/requests
+    register: bad_requests
+
+  - name: remove bad request library
+    shell: mv /usr/local/lib/python2.7/dist-packages/requests /usr/local/lib/python2.7/dist-packages/requests-bad
+    when: bad_requests.stat.exists == True
+
+  - name: Install docker-py
+    pip:
+      name=docker-py
+      state=latest
+
+  - name: install Pipework
+    get_url: url=https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework
+       dest=/usr/local/bin/pipework
+       mode=0755
+
+#  - name: Start Container
+#    docker:
+#      docker_api_version: "1.18"
+#      name: {{ container_name }}
+#      # was: reloaded
+#      state: running
+#      image: {{ docker_image }}
+
+  - name: check if systemd is installed
+    stat: path=/usr/bin/systemctl
+    register: systemctl
+
+  - name: container upstart
+    template: src=/opt/xos/synchronizers/openstack/templates/container.conf.j2 dest=/etc/init/container-{{ container_name }}.conf
+
+  - name: container systemd
+    template: src=/opt/xos/synchronizers/openstack/templates/container.service.j2 dest=/lib/systemd/system/container-{{ container_name }}.service
+
+  - name: container startup script
+    template: src=/opt/xos/synchronizers/openstack/templates/start-container.sh.j2 dest=/usr/local/sbin/start-container-{{ container_name }}.sh mode=0755
+
+  - name: container teardown script
+    template: src=/opt/xos/synchronizers/openstack/templates/stop-container.sh.j2 dest=/usr/local/sbin/stop-container-{{ container_name }}.sh mode=0755
+
+  - name: restart systemd
+    shell: systemctl daemon-reload
+    when: systemctl.stat.exists == True
+
+{% if ports %}
+  - name: make sure bridges are setup
+    shell: ifconfig {{ '{{' }} item.bridge {{ '}}' }}
+    with_items: "ports"
+{% endif %}
+
+  - name: Make sure container is running
+    service: name=container-{{ container_name }} state=started
+
diff --git a/xos/synchronizer/steps/sync_controller_images.py b/xos/synchronizer/steps/sync_controller_images.py
new file mode 100644
index 0000000..c1e5136
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_images.py
@@ -0,0 +1,44 @@
+import os
+import base64
+from collections import defaultdict
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from synchronizers.base.syncstep import *
+from core.models import Controller
+from core.models import Image, ControllerImages
+from xos.logger import observer_logger as logger 
+from synchronizers.base.ansible import *
+import json
+
+class SyncControllerImages(OpenStackSyncStep):
+    provides=[ControllerImages]
+    observes = ControllerImages
+    requested_interval=0
+    playbook='sync_controller_images.yaml'
+
+    def fetch_pending(self, deleted):
+        if (deleted):
+            return []
+
+        # now we return all images that need to be enacted
+        return ControllerImages.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+    def map_sync_inputs(self, controller_image):
+        image_fields = {'endpoint':controller_image.controller.auth_url,
+                        'endpoint_v3': controller_image.controller.auth_url_v3,
+                        'admin_user':controller_image.controller.admin_user,
+                        'admin_password':controller_image.controller.admin_password,
+                        'domain': controller_image.controller.domain,
+                        'name':controller_image.image.name,
+                        'filepath':controller_image.image.path,
+                        'ansible_tag': '%s@%s'%(controller_image.image.name,controller_image.controller.name), # name of ansible playbook
+                        }
+
+	return image_fields
+
+    def map_sync_outputs(self, controller_image, res):
+        image_id = res[0]['id']
+        controller_image.glance_image_id = image_id
+	controller_image.backend_status = '1 - OK'
+        controller_image.save()
diff --git a/xos/synchronizer/steps/sync_controller_images.yaml b/xos/synchronizer/steps/sync_controller_images.yaml
new file mode 100644
index 0000000..6247a30
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_images.yaml
@@ -0,0 +1,13 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  - glance_image:
+        auth_url={{ endpoint }}
+        login_username="{{ admin_user }}"
+        login_tenant_name="admin"
+        login_password="{{ admin_password }}"
+        name="{{ name }}"
+        file="{{ filepath }}"
+        disk_format='raw'
+        is_public=true
diff --git a/xos/synchronizer/steps/sync_controller_networks.py b/xos/synchronizer/steps/sync_controller_networks.py
new file mode 100644
index 0000000..b61ef7b
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_networks.py
@@ -0,0 +1,163 @@
+import os
+import base64
+import struct
+import socket
+from collections import defaultdict
+from netaddr import IPAddress, IPNetwork
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from synchronizers.base.syncstep import *
+from core.models.network import *
+from core.models.slice import *
+from core.models.instance import Instance
+from xos.logger import observer_logger as logger
+from synchronizers.base.ansible import *
+from openstack_xos.driver import OpenStackDriver
+from xos.config import Config
+import json
+
+import pdb
+
+class SyncControllerNetworks(OpenStackSyncStep):
+    requested_interval = 0
+    provides=[Network]
+    observes=ControllerNetwork	
+    playbook='sync_controller_networks.yaml'
+
+    def alloc_subnet(self, uuid):
+        # 16 bits only
+        uuid_masked = uuid & 0xffff
+        a = 10
+        b = uuid_masked >> 8
+        c = uuid_masked & 0xff
+        d = 0
+
+        cidr = '%d.%d.%d.%d/24'%(a,b,c,d)
+        return cidr
+
+    def alloc_gateway(self, subnet):
+        # given a CIDR, allocate a default gateway using the .1 address within
+        # the subnet.
+        #    10.123.0.0/24 --> 10.123.0.1
+        #    207.141.192.128/28 --> 207.141.192.129
+        (network, bits) = subnet.split("/")
+        network=network.strip()
+        bits=int(bits.strip())
+        netmask = (~(pow(2,32-bits)-1) & 0xFFFFFFFF)
+        ip = struct.unpack("!L", socket.inet_aton(network))[0]
+        ip = ip & netmask | 1
+        return socket.inet_ntoa(struct.pack("!L", ip))
+
+    def save_controller_network(self, controller_network):
+        network_name = controller_network.network.name
+        subnet_name = '%s-%d'%(network_name,controller_network.pk)
+        if controller_network.subnet and controller_network.subnet.strip():
+            # If a subnet is already specified (pass in by the creator), then
+            # use that rather than auto-generating one.
+            cidr = controller_network.subnet.strip()
+            print "CIDR_MS", cidr
+        else:
+            cidr = self.alloc_subnet(controller_network.pk)
+            print "CIDR_AMS", cidr
+
+        if controller_network.network.start_ip and controller_network.network.start_ip.strip():
+            start_ip = controller_network.network.start_ip.strip()
+        else:
+            start_ip = None
+
+        if controller_network.network.end_ip and controller_network.network.end_ip.strip():
+            end_ip = controller_network.network.end_ip.strip()
+        else:
+            end_ip = None
+
+        self.cidr=cidr
+        slice = controller_network.network.owner
+
+        network_fields = {'endpoint':controller_network.controller.auth_url,
+                    'endpoint_v3': controller_network.controller.auth_url_v3,
+                    'admin_user':slice.creator.email,
+                    'admin_password':slice.creator.remote_password,
+                    'admin_project':slice.name,
+                    'domain': controller_network.controller.domain,
+                    'name':network_name,
+                    'subnet_name':subnet_name,
+                    'ansible_tag':'%s-%s@%s'%(network_name,slice.slicename,controller_network.controller.name),
+                    'cidr':cidr,
+                    'gateway':self.alloc_gateway(cidr),
+                    'start_ip':start_ip,
+                    'end_ip':end_ip,
+                    'use_vtn':getattr(Config(), "networking_use_vtn", False),
+                    'delete':False
+                    }
+        return network_fields
+
+    def map_sync_outputs(self, controller_network,res):
+        network_id = res[0]['network']['id']
+        subnet_id = res[1]['subnet']['id']
+        controller_network.net_id = network_id
+        controller_network.subnet = self.cidr
+        controller_network.subnet_id = subnet_id
+	controller_network.backend_status = '1 - OK'
+        controller_network.save()
+
+
+    def map_sync_inputs(self, controller_network):
+        # XXX This check should really be made from booleans, rather than using hardcoded network names
+        #if (controller_network.network.template.name not in ['Private', 'Private-Indirect', 'Private-Direct', 'management_template'):
+        #    logger.info("skipping network controller %s because it is not private" % controller_network)
+        #    # We only sync private networks
+        #    return SyncStep.SYNC_WITHOUT_RUNNING
+
+        # hopefully a better approach than above
+        if (controller_network.network.template.shared_network_name or controller_network.network.template.shared_network_id):
+            return SyncStep.SYNC_WITHOUT_RUNNING
+        
+        if not controller_network.controller.admin_user:
+            logger.info("controller %r has no admin_user, skipping" % controller_network.controller)
+            return
+
+        if controller_network.network.owner and controller_network.network.owner.creator:
+	    return self.save_controller_network(controller_network)
+        else:
+            raise Exception('Could not save network controller %s'%controller_network)
+
+    def map_delete_inputs(self, controller_network):
+        # XXX This check should really be made from booleans, rather than using hardcoded network names
+	if (controller_network.network.template.name not in ['Private', 'Private-Indirect', 'Private-Direct']):
+            # We only sync private networks
+            return
+	try:
+        	slice = controller_network.network.owner # XXX: FIXME!!
+        except:
+                raise Exception('Could not get slice for Network %s'%controller_network.network.name)
+
+	network_name = controller_network.network.name
+        subnet_name = '%s-%d'%(network_name,controller_network.pk)
+	cidr = controller_network.subnet
+	network_fields = {'endpoint':controller_network.controller.auth_url,
+                    'admin_user':slice.creator.email, # XXX: FIXME
+                    'tenant_name':slice.name, # XXX: FIXME
+                    'admin_password':slice.creator.remote_password,
+                    'name':network_name,
+                    'subnet_name':subnet_name,
+                    'ansible_tag':'%s-%s@%s'%(network_name,slice.slicename,controller_network.controller.name),
+                    'cidr':cidr,
+		    'delete':True	
+                    }
+
+        return network_fields
+
+	"""
+        driver = OpenStackDriver().client_driver(caller=controller_network.network.owner.creator,
+                                                 tenant=controller_network.network.owner.name,
+                                                 controller=controller_network.controller.name)
+        if (controller_network.router_id) and (controller_network.subnet_id):
+            driver.delete_router_interface(controller_network.router_id, controller_network.subnet_id)
+        if controller_network.subnet_id:
+            driver.delete_subnet(controller_network.subnet_id)
+        if controller_network.router_id:
+            driver.delete_router(controller_network.router_id)
+        if controller_network.net_id:
+            driver.delete_network(controller_network.net_id)
+	"""
diff --git a/xos/synchronizer/steps/sync_controller_networks.yaml b/xos/synchronizer/steps/sync_controller_networks.yaml
new file mode 100644
index 0000000..7b6075c
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_networks.yaml
@@ -0,0 +1,58 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  - os_network:
+      name: {{ name }}
+      shared: true
+      {% if not delete -%}
+      state: present
+      {% else -%}
+      state: absent
+      {% endif -%}
+      auth:
+        auth_url: {{ endpoint }}
+        username: {{ admin_user }}
+        password: {{ admin_password }}
+        project_name: {{ admin_project }}
+
+{% if not delete %}
+  - os_subnet:
+      name: {{ subnet_name }}
+      network_name: {{ name }}
+      {% if not delete -%}
+      state: present
+      cidr: {{ cidr }}
+      dns_nameservers: 8.8.8.8
+      {% if use_vtn -%}
+      gateway_ip: {{ gateway }}
+      {% endif -%}
+      {% if start_ip -%}
+      allocation_pool_start: {{ start_ip }}
+      {% endif -%}
+      {% if end_ip -%}
+      allocation_pool_end: {{ end_ip }}
+      {% endif -%}
+      {% else -%}
+      state: absent
+      {% endif -%}
+      auth:
+        auth_url: {{ endpoint }}
+        username: {{ admin_user }}
+        password: {{ admin_password }}
+        project_name: {{ admin_project }}
+
+{% if not use_vtn -%}
+# until we get 'no-gateway-ip' arg to os_subnet, in Ansible 2.2
+# https://github.com/ansible/ansible-modules-core/pull/3736
+  - command:
+      neutron \
+      --os-auth-url {{ endpoint }} \
+      --os-username {{ admin_user }} \
+      --os-password {{ admin_password }} \
+      --os-tenant-name {{ admin_project }} \
+      subnet-update --no-gateway {{ subnet_name }}
+{% endif -%}
+
+{% endif %}
+
diff --git a/xos/synchronizer/steps/sync_controller_site_privileges.py b/xos/synchronizer/steps/sync_controller_site_privileges.py
new file mode 100644
index 0000000..59919fe
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_site_privileges.py
@@ -0,0 +1,84 @@
+import os
+import base64
+from collections import defaultdict
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from synchronizers.base.syncstep import *
+from core.models.site import Controller, SitePrivilege 
+from core.models.user import User
+from core.models.controlleruser import ControllerUser, ControllerSitePrivilege
+from xos.logger import observer_logger as logger
+from synchronizers.base.ansible import *
+import json
+
+class SyncControllerSitePrivileges(OpenStackSyncStep):
+    provides=[SitePrivilege]
+    requested_interval=0
+    observes=ControllerSitePrivilege
+    playbook='sync_controller_users.yaml'
+
+    def map_sync_inputs(self, controller_site_privilege):
+	controller_register = json.loads(controller_site_privilege.controller.backend_register)
+        if not controller_site_privilege.controller.admin_user:
+            logger.info("controller %r has no admin_user, skipping" % controller_site_privilege.controller)
+            return
+
+        roles = [controller_site_privilege.site_privilege.role.role]
+	# setup user home site roles at controller 
+        if not controller_site_privilege.site_privilege.user.site:
+            raise Exception('Siteless user %s'%controller_site_privilege.site_privilege.user.email)
+        else:
+            # look up tenant id for the user's site at the controller
+            #ctrl_site_deployments = SiteDeployment.objects.filter(
+            #  site_deployment__site=controller_site_privilege.user.site,
+            #  controller=controller_site_privilege.controller)
+
+            #if ctrl_site_deployments:
+            #    # need the correct tenant id for site at the controller
+            #    tenant_id = ctrl_site_deployments[0].tenant_id  
+            #    tenant_name = ctrl_site_deployments[0].site_deployment.site.login_base
+            user_fields = {
+               'endpoint':controller_site_privilege.controller.auth_url,
+               'endpoint_v3': controller_site_privilege.controller.auth_url_v3,
+               'domain': controller_site_privilege.controller.domain,
+		       'name': controller_site_privilege.site_privilege.user.email,
+               'email': controller_site_privilege.site_privilege.user.email,
+               'password': controller_site_privilege.site_privilege.user.remote_password,
+               'admin_user': controller_site_privilege.controller.admin_user,
+		       'admin_password': controller_site_privilege.controller.admin_password,
+	           'ansible_tag':'%s@%s'%(controller_site_privilege.site_privilege.user.email.replace('@','-at-'),controller_site_privilege.controller.name),
+		       'admin_tenant': controller_site_privilege.controller.admin_tenant,
+		       'roles':roles,
+		       'tenant':controller_site_privilege.site_privilege.site.login_base}    
+	
+	    return user_fields
+
+    def map_sync_outputs(self, controller_site_privilege, res):
+	    # results is an array in which each element corresponds to an 
+	    # "ok" string received per operation. If we get as many oks as
+	    # the number of operations we issued, that means a grand success.
+	    # Otherwise, the number of oks tell us which operation failed.
+            controller_site_privilege.role_id = res[0]['id']
+            controller_site_privilege.save()
+
+    def delete_record(self, controller_site_privilege):
+	controller_register = json.loads(controller_site_privilege.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise InnocuousException('Controller %s is disabled'%controller_site_privilege.controller.name)
+
+        if controller_site_privilege.role_id:
+            driver = self.driver.admin_driver(controller=controller_site_privilege.controller)
+            user = ControllerUser.objects.get(
+                controller=controller_site_privilege.controller, 
+                user=controller_site_privilege.site_privilege.user
+            )
+            site = ControllerSite.objects.get(
+                controller=controller_site_privilege.controller, 
+                user=controller_site_privilege.site_privilege.user
+            )
+            driver.delete_user_role(
+                user.kuser_id, 
+                site.tenant_id, 
+                controller_site_privilege.site_prvilege.role.role
+            )
diff --git a/xos/synchronizer/steps/sync_controller_sites.py b/xos/synchronizer/steps/sync_controller_sites.py
new file mode 100644
index 0000000..1b3c2ba
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_sites.py
@@ -0,0 +1,67 @@
+import os
+import base64
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.openstack.openstacksyncstep import OpenStackSyncStep
+from core.models.site import *
+from synchronizers.base.syncstep import *
+from synchronizers.base.ansible import *
+from xos.logger import observer_logger as logger
+import json
+
+class SyncControllerSites(OpenStackSyncStep):
+    requested_interval=0
+    provides=[Site]
+    observes=ControllerSite
+    playbook = 'sync_controller_sites.yaml'
+
+    def fetch_pending(self, deleted=False):
+        lobjs = ControllerSite.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False),Q(controller__isnull=False))
+        return lobjs
+
+    def map_sync_inputs(self, controller_site):
+	tenant_fields = {'endpoint':controller_site.controller.auth_url,
+                 'endpoint_v3': controller_site.controller.auth_url_v3,
+                 'domain': controller_site.controller.domain,
+		         'admin_user': controller_site.controller.admin_user,
+		         'admin_password': controller_site.controller.admin_password,
+		         'admin_tenant': controller_site.controller.admin_tenant,
+	             'ansible_tag': '%s@%s'%(controller_site.site.login_base,controller_site.controller.name), # name of ansible playbook
+		         'tenant': controller_site.site.login_base,
+		         'tenant_description': controller_site.site.name}
+        return tenant_fields
+
+    def map_sync_outputs(self, controller_site, res):
+	controller_site.tenant_id = res[0]['id']
+	controller_site.backend_status = '1 - OK'
+        controller_site.save()
+            
+    def delete_record(self, controller_site):
+	controller_register = json.loads(controller_site.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise InnocuousException('Controller %s is disabled'%controller_site.controller.name)
+
+	if controller_site.tenant_id:
+            driver = self.driver.admin_driver(controller=controller_site.controller)
+            driver.delete_tenant(controller_site.tenant_id)
+
+	"""
+        Ansible does not support tenant deletion yet
+
+	import pdb
+	pdb.set_trace()
+        template = os_template_env.get_template('delete_controller_sites.yaml')
+	tenant_fields = {'endpoint':controller_site.controller.auth_url,
+		         'admin_user': controller_site.controller.admin_user,
+		         'admin_password': controller_site.controller.admin_password,
+		         'admin_tenant': 'admin',
+	                 'ansible_tag': 'controller_sites/%s@%s'%(controller_site.controller_site.site.login_base,controller_site.controller_site.deployment.name), # name of ansible playbook
+		         'tenant': controller_site.controller_site.site.login_base,
+		         'delete': True}
+
+	rendered = template.render(tenant_fields)
+	res = run_template('sync_controller_sites.yaml', tenant_fields)
+
+	if (len(res)!=1):
+		raise Exception('Could not assign roles for user %s'%tenant_fields['tenant'])
+	"""
diff --git a/xos/synchronizer/steps/sync_controller_sites.yaml b/xos/synchronizer/steps/sync_controller_sites.yaml
new file mode 100644
index 0000000..4129802
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_sites.yaml
@@ -0,0 +1,5 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}"
diff --git a/xos/synchronizer/steps/sync_controller_slice_privileges.py b/xos/synchronizer/steps/sync_controller_slice_privileges.py
new file mode 100644
index 0000000..e5513b0
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_slice_privileges.py
@@ -0,0 +1,79 @@
+import os
+import base64
+from collections import defaultdict
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from synchronizers.base.syncstep import *
+from core.models.slice import Controller, SlicePrivilege 
+from core.models.user import User
+from core.models.controlleruser import ControllerUser, ControllerSlicePrivilege
+from synchronizers.base.ansible import *
+from xos.logger import observer_logger as logger
+import json
+
+class SyncControllerSlicePrivileges(OpenStackSyncStep):
+    provides=[SlicePrivilege]
+    requested_interval=0
+    observes=ControllerSlicePrivilege
+    playbook = 'sync_controller_users.yaml'
+
+    def map_sync_inputs(self, controller_slice_privilege):
+        if not controller_slice_privilege.controller.admin_user:
+            logger.info("controller %r has no admin_user, skipping" % controller_slice_privilege.controller)
+            return
+
+	template = os_template_env.get_template('sync_controller_users.yaml')
+        roles = [controller_slice_privilege.slice_privilege.role.role]
+	# setup user home slice roles at controller 
+        if not controller_slice_privilege.slice_privilege.user.site:
+            raise Exception('Sliceless user %s'%controller_slice_privilege.slice_privilege.user.email)
+        else:
+            # look up tenant id for the user's slice at the controller
+            #ctrl_slice_deployments = SliceDeployment.objects.filter(
+            #  slice_deployment__slice=controller_slice_privilege.user.slice,
+            #  controller=controller_slice_privilege.controller)
+
+            #if ctrl_slice_deployments:
+            #    # need the correct tenant id for slice at the controller
+            #    tenant_id = ctrl_slice_deployments[0].tenant_id  
+            #    tenant_name = ctrl_slice_deployments[0].slice_deployment.slice.login_base
+            user_fields = {
+               'endpoint':controller_slice_privilege.controller.auth_url,
+               'endpoint_v3': controller_slice_privilege.controller.auth_url_v3,
+               'domain': controller_slice_privilege.controller.domain,
+		       'name': controller_slice_privilege.slice_privilege.user.email,
+               'email': controller_slice_privilege.slice_privilege.user.email,
+               'password': controller_slice_privilege.slice_privilege.user.remote_password,
+               'admin_user': controller_slice_privilege.controller.admin_user,
+		       'admin_password': controller_slice_privilege.controller.admin_password,
+               'ansible_tag':'%s@%s@%s'%(controller_slice_privilege.slice_privilege.user.email.replace('@','-at-'),controller_slice_privilege.slice_privilege.slice.name,controller_slice_privilege.controller.name),
+		       'admin_tenant': controller_slice_privilege.controller.admin_tenant,
+		       'roles':roles,
+		       'tenant':controller_slice_privilege.slice_privilege.slice.name}    
+            return user_fields
+	
+    def map_sync_outputs(self, controller_slice_privilege, res):
+        controller_slice_privilege.role_id = res[0]['id']
+        controller_slice_privilege.save()
+
+    def delete_record(self, controller_slice_privilege):
+	controller_register = json.loads(controller_slice_privilege.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise InnocuousException('Controller %s is disabled'%controller_slice_privilege.controller.name)
+
+        if controller_slice_privilege.role_id:
+            driver = self.driver.admin_driver(controller=controller_slice_privilege.controller)
+            user = ControllerUser.objects.get(
+                controller=controller_slice_privilege.controller, 
+                user=controller_slice_privilege.slice_privilege.user
+            )
+            slice = ControllerSlice.objects.get(
+                controller=controller_slice_privilege.controller, 
+                user=controller_slice_privilege.slice_privilege.user
+            )
+            driver.delete_user_role(
+                user.kuser_id, 
+                slice.tenant_id, 
+                controller_slice_privilege.slice_prvilege.role.role
+            )
diff --git a/xos/synchronizer/steps/sync_controller_slices.py b/xos/synchronizer/steps/sync_controller_slices.py
new file mode 100644
index 0000000..0666230
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_slices.py
@@ -0,0 +1,85 @@
+import os
+import base64
+from collections import defaultdict
+from netaddr import IPAddress, IPNetwork
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from synchronizers.base.syncstep import *
+from core.models import *
+from synchronizers.base.ansible import *
+from openstack_xos.driver import OpenStackDriver
+from xos.logger import observer_logger as logger
+import json
+
+class SyncControllerSlices(OpenStackSyncStep):
+    provides=[Slice]
+    requested_interval=0
+    observes=ControllerSlice
+    playbook='sync_controller_slices.yaml'
+
+    def map_sync_inputs(self, controller_slice):
+        logger.info("sync'ing slice controller %s" % controller_slice)
+
+        if not controller_slice.controller.admin_user:
+            logger.info("controller %r has no admin_user, skipping" % controller_slice.controller)
+            return
+
+        controller_users = ControllerUser.objects.filter(user=controller_slice.slice.creator,
+                                                             controller=controller_slice.controller)
+        if not controller_users:
+            raise Exception("slice createor %s has not accout at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name))
+        else:
+            controller_user = controller_users[0]
+            driver = OpenStackDriver().admin_driver(controller=controller_slice.controller)
+            roles = [driver.get_admin_role().name]
+
+        max_instances=int(controller_slice.slice.max_instances)
+        tenant_fields = {'endpoint':controller_slice.controller.auth_url,
+                         'endpoint_v3': controller_slice.controller.auth_url_v3,
+                         'domain': controller_slice.controller.domain,
+                         'admin_user': controller_slice.controller.admin_user,
+                         'admin_password': controller_slice.controller.admin_password,
+                         'admin_tenant': 'admin',
+                         'tenant': controller_slice.slice.name,
+                         'tenant_description': controller_slice.slice.description,
+                         'roles':roles,
+                         'name':controller_user.user.email,
+                         'ansible_tag':'%s@%s'%(controller_slice.slice.name,controller_slice.controller.name),
+                         'max_instances':max_instances}
+
+        return tenant_fields
+
+    def map_sync_outputs(self, controller_slice, res):
+        tenant_id = res[0]['id']
+        if (not controller_slice.tenant_id):
+            try:
+                driver = OpenStackDriver().admin_driver(controller=controller_slice.controller)
+                driver.shell.nova.quotas.update(tenant_id=tenant_id, instances=int(controller_slice.slice.max_instances))
+            except:
+                logger.log_exc('Could not update quota for %s'%controller_slice.slice.name)
+                raise Exception('Could not update quota for %s'%controller_slice.slice.name)
+
+            controller_slice.tenant_id = tenant_id
+            controller_slice.backend_status = '1 - OK'
+            controller_slice.save()
+
+
+    def map_delete_inputs(self, controller_slice):
+        controller_users = ControllerUser.objects.filter(user=controller_slice.slice.creator,
+                                                              controller=controller_slice.controller)
+        if not controller_users:
+            raise Exception("slice createor %s has not accout at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name))
+        else:
+            controller_user = controller_users[0]
+
+        tenant_fields = {'endpoint':controller_slice.controller.auth_url,
+                          'admin_user': controller_slice.controller.admin_user,
+                          'admin_password': controller_slice.controller.admin_password,
+                          'admin_tenant': 'admin',
+                          'tenant': controller_slice.slice.name,
+                          'tenant_description': controller_slice.slice.description,
+                          'name':controller_user.user.email,
+                          'ansible_tag':'%s@%s'%(controller_slice.slice.name,controller_slice.controller.name),
+                          'delete': True}
+	return tenant_fields
diff --git a/xos/synchronizer/steps/sync_controller_slices.yaml b/xos/synchronizer/steps/sync_controller_slices.yaml
new file mode 100644
index 0000000..61470ce
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_slices.yaml
@@ -0,0 +1,12 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  {% if delete -%}
+  - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}" state=absent
+  {% else -%}
+  - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}"
+  {% for role in roles %}
+  - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} user="{{ name }}" role={{ role }} tenant={{ tenant }}
+  {% endfor %}
+  {% endif %}
diff --git a/xos/synchronizer/steps/sync_controller_users.py b/xos/synchronizer/steps/sync_controller_users.py
new file mode 100644
index 0000000..c9de142
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_users.py
@@ -0,0 +1,72 @@
+import os
+import base64
+from collections import defaultdict
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from synchronizers.base.syncstep import *
+from core.models.site import Controller, SiteDeployment, SiteDeployment
+from core.models.user import User
+from core.models.controlleruser import ControllerUser
+from synchronizers.base.ansible import *
+from openstack_xos.driver import OpenStackDriver
+from xos.logger import observer_logger as logger
+import json
+
+class SyncControllerUsers(OpenStackSyncStep):
+    provides=[User]
+    requested_interval=0
+    observes=ControllerUser
+    playbook='sync_controller_users.yaml'
+
+    def map_sync_inputs(self, controller_user):
+        if not controller_user.controller.admin_user:
+            logger.info("controller %r has no admin_user, skipping" % controller_user.controller)
+            return
+
+        # All users will have at least the 'user' role at their home site/tenant.
+        # We must also check if the user should have the admin role
+
+        roles = ['user']
+        if controller_user.user.is_admin:
+            driver = OpenStackDriver().admin_driver(controller=controller_user.controller)
+            roles.append(driver.get_admin_role().name)
+
+        # setup user home site roles at controller
+        if not controller_user.user.site:
+            raise Exception('Siteless user %s'%controller_user.user.email)
+        else:
+            # look up tenant id for the user's site at the controller
+            #ctrl_site_deployments = SiteDeployment.objects.filter(
+            #  site_deployment__site=controller_user.user.site,
+            #  controller=controller_user.controller)
+
+            #if ctrl_site_deployments:
+            #    # need the correct tenant id for site at the controller
+            #    tenant_id = ctrl_site_deployments[0].tenant_id
+            #    tenant_name = ctrl_site_deployments[0].site_deployment.site.login_base
+            user_fields = {
+                'endpoint':controller_user.controller.auth_url,
+                'endpoint_v3': controller_user.controller.auth_url_v3,
+                'domain': controller_user.controller.domain,
+                'name': controller_user.user.email,
+                'email': controller_user.user.email,
+                'password': controller_user.user.remote_password,
+                'admin_user': controller_user.controller.admin_user,
+                'admin_password': controller_user.controller.admin_password,
+                'ansible_tag':'%s@%s'%(controller_user.user.email.replace('@','-at-'),controller_user.controller.name),
+                'admin_project': controller_user.controller.admin_tenant,
+                'roles':roles,
+                'project':controller_user.user.site.login_base
+                }
+	    return user_fields
+
+    def map_sync_outputs(self, controller_user, res):
+        controller_user.kuser_id = res[0]['user']['id']
+        controller_user.backend_status = '1 - OK'
+        controller_user.save()
+
+    def delete_record(self, controller_user):
+        if controller_user.kuser_id:
+            driver = self.driver.admin_driver(controller=controller_user.controller)
+            driver.delete_user(controller_user.kuser_id)
diff --git a/xos/synchronizer/steps/sync_controller_users.yaml b/xos/synchronizer/steps/sync_controller_users.yaml
new file mode 100644
index 0000000..5cb3cc9
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_users.yaml
@@ -0,0 +1,50 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+
+  - name: Create user account for "{{ name }}"
+    os_user:
+      name: "{{ name }}"
+      email: "{{ email }}"
+      password: "{{ password }}"
+      auth:
+        auth_url: {{ endpoint }}
+        username: {{ admin_user }}
+        password: {{ admin_password }}
+        project_name: {{ admin_project }}
+
+  - name: Create project for "{{ project }}"
+    os_project:
+      name: "{{ project }}"
+      auth:
+        auth_url: {{ endpoint }}
+        username: {{ admin_user }}
+        password: {{ admin_password }}
+        project_name: {{ admin_project }}
+
+{% for role in roles %}
+  - name: Creating role "{{ role }}" for "{{ name }}" on "{{ project }}"
+    keystone_user:
+      user: "{{ name }}"
+      role: "{{ role }}"
+      tenant: "{{ project }}"
+      endpoint: {{ endpoint }}
+      login_user: {{ admin_user }}
+      login_password: {{ admin_password }}
+      login_tenant_name: {{ admin_project }}
+{% endfor %}
+
+# FIXME: the below should work in Ansible 2.1, once we get the Admin/admin and
+# Member/user role name issues straightened out.
+#
+#  - name: Creating role "{{ role }}" for "{{ name }}" on "{{ project }}"
+#    os_user_role:
+#      user: "{{ name }}"
+#      role: "{{ role }}"
+#      project: "{{ project }}"
+#      auth:
+#        auth_url: {{ endpoint }}
+#        username: {{ admin_user }}
+#        password: {{ admin_password }}
+#        project_name: {{ admin_project }}
diff --git a/xos/synchronizer/steps/sync_images.py b/xos/synchronizer/steps/sync_images.py
new file mode 100644
index 0000000..1638fd0
--- /dev/null
+++ b/xos/synchronizer/steps/sync_images.py
@@ -0,0 +1,52 @@
+import os
+import base64
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from core.models.image import Image
+from xos.logger import observer_logger as logger
+
+class SyncImages(OpenStackSyncStep):
+    provides=[Image]
+    requested_interval=0
+    observes=Image
+
+    def fetch_pending(self, deleted):
+        # Images come from the back end
+        # You can't delete them
+        if (deleted):
+            logger.info("SyncImages: returning because deleted=True")
+            return []
+
+        # get list of images on disk
+        images_path = Config().observer_images_directory
+
+        logger.info("SyncImages: deleted=False, images_path=%s" % images_path)
+
+        available_images = {}
+        if os.path.exists(images_path):
+            for f in os.listdir(images_path):
+                filename = os.path.join(images_path, f)
+                if os.path.isfile(filename) and filename.endswith(".img"):
+                    available_images[f] = filename
+
+        logger.info("SyncImages: available_images = %s" % str(available_images))
+
+        images = Image.objects.all()
+        image_names = [image.name for image in images]
+
+        for image_name in available_images:
+            #remove file extension
+            clean_name = ".".join(image_name.split('.')[:-1])
+            if clean_name not in image_names:
+                logger.info("SyncImages: adding %s" % clean_name)
+                image = Image(name=clean_name,
+                              disk_format='raw',
+                              container_format='bare', 
+                              path = available_images[image_name])
+                image.save()
+
+        return Image.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None)) 
+
+    def sync_record(self, image):
+        image.save()
diff --git a/xos/synchronizer/steps/sync_instances.py b/xos/synchronizer/steps/sync_instances.py
new file mode 100644
index 0000000..5cec50d
--- /dev/null
+++ b/xos/synchronizer/steps/sync_instances.py
@@ -0,0 +1,220 @@
+import os
+import base64
+import socket
+from django.db.models import F, Q
+from xos.config import Config
+from xos.settings import RESTAPI_HOSTNAME, RESTAPI_PORT
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from core.models.instance import Instance
+from core.models.slice import Slice, SlicePrivilege, ControllerSlice
+from core.models.network import Network, NetworkSlice, ControllerNetwork
+from synchronizers.base.ansible import *
+from synchronizers.base.syncstep import *
+from xos.logger import observer_logger as logger
+
+def escape(s):
+    s = s.replace('\n',r'\n').replace('"',r'\"')
+    return s
+
+class SyncInstances(OpenStackSyncStep):
+    provides=[Instance]
+    requested_interval=0
+    observes=Instance
+    playbook='sync_instances.yaml'
+
+    def fetch_pending(self, deletion=False):
+        objs = super(SyncInstances, self).fetch_pending(deletion)
+        objs = [x for x in objs if x.isolation=="vm"]
+        return objs
+
+    def get_userdata(self, instance, pubkeys):
+        userdata = '#cloud-config\n\nopencloud:\n   slicename: "%s"\n   hostname: "%s"\n   restapi_hostname: "%s"\n   restapi_port: "%s"\n' % (instance.slice.name, instance.node.name, RESTAPI_HOSTNAME, str(RESTAPI_PORT))
+        userdata += 'ssh_authorized_keys:\n'
+        for key in pubkeys:
+            userdata += '  - %s\n' % key
+        return userdata
+
+    def sort_nics(self, nics):
+        result = []
+
+        # Enforce VTN's network order requirement. The access network must be
+        # inserted into the first slot. The management network must be inserted
+        # into the second slot.
+
+        # move the private and/or access network to the first spot
+        for nic in nics[:]:
+            network=nic.get("network", None)
+            if network:
+                tem = network.template
+                if (tem.visibility == "private") and (tem.translation=="none") and ("management" not in tem.name):
+                    result.append(nic)
+                    nics.remove(nic)
+
+        # move the management network to the second spot
+        for net in nics[:]:
+            network=nic.get("network", None)
+            if network:
+                tem = network.template
+                if (tem.visibility == "private") and (tem.translation=="none") and ("management" in tem.name):
+#MCORD
+#                    if len(result)!=1:
+#                        raise Exception("Management network needs to be inserted in slot 1, but there are %d private nics" % len(result))
+                    result.append(nic)
+                    nics.remove(nic)
+
+        # add everything else. For VTN there probably shouldn't be any more.
+        result.extend(nics)
+
+        return result
+
+    def map_sync_inputs(self, instance):
+        inputs = {}
+	metadata_update = {}
+        if (instance.numberCores):
+            metadata_update["cpu_cores"] = str(instance.numberCores)
+
+        for tag in instance.slice.tags.all():
+            if tag.name.startswith("sysctl-"):
+                metadata_update[tag.name] = tag.value
+
+	slice_memberships = SlicePrivilege.objects.filter(slice=instance.slice)
+        pubkeys = set([sm.user.public_key for sm in slice_memberships if sm.user.public_key])
+        if instance.creator.public_key:
+            pubkeys.add(instance.creator.public_key)
+
+        if instance.slice.creator.public_key:
+            pubkeys.add(instance.slice.creator.public_key)
+
+        if instance.slice.service and instance.slice.service.public_key:
+            pubkeys.add(instance.slice.service.public_key)
+
+        nics=[]
+
+        # handle ports the were created by the user
+        port_ids=[]
+        for port in Port.objects.filter(instance=instance):
+            if not port.port_id:
+                raise DeferredException("Instance %s waiting on port %s" % (instance, port))
+            nics.append({"kind": "port", "value": port.port_id, "network": port.network})
+
+        # we want to exclude from 'nics' any network that already has a Port
+        existing_port_networks = [port.network for port in Port.objects.filter(instance=instance)]
+
+        networks = [ns.network for ns in NetworkSlice.objects.filter(slice=instance.slice) if ns.network not in existing_port_networks]
+        controller_networks = ControllerNetwork.objects.filter(network__in=networks,
+                                                                controller=instance.node.site_deployment.controller)
+
+        #controller_networks = self.sort_controller_networks(controller_networks)
+        for controller_network in controller_networks:
+            # Lenient exception - causes slow backoff
+            if controller_network.network.template.visibility == 'private' and \
+               controller_network.network.template.translation == 'none':
+                   if not controller_network.net_id:
+                        raise DeferredException("Instance %s Private Network %s has no id; Try again later" % (instance, controller_network.network.name))
+                   nics.append({"kind": "net", "value": controller_network.net_id, "network": controller_network.network})
+
+        # now include network template
+        network_templates = [network.template.shared_network_name for network in networks \
+                             if network.template.shared_network_name]
+
+        #driver = self.driver.client_driver(caller=instance.creator, tenant=instance.slice.name, controller=instance.controllerNetwork)
+        driver = self.driver.admin_driver(tenant='admin', controller=instance.node.site_deployment.controller)
+        nets = driver.shell.neutron.list_networks()['networks']
+        for net in nets:
+            if net['name'] in network_templates:
+                nics.append({"kind": "net", "value": net['id'], "network": None})
+
+        if (not nics):
+            for net in nets:
+                if net['name']=='public':
+                    nics.append({"kind": "net", "value": net['id'], "network": None})
+
+        nics = self.sort_nics(nics)
+
+        image_name = None
+        controller_images = instance.image.controllerimages.filter(controller=instance.node.site_deployment.controller)
+        if controller_images:
+            image_name = controller_images[0].image.name
+            logger.info("using image from ControllerImage object: " + str(image_name))
+
+        if image_name is None:
+            controller_driver = self.driver.admin_driver(controller=instance.node.site_deployment.controller)
+            images = controller_driver.shell.glanceclient.images.list()
+            for image in images:
+                if image.name == instance.image.name or not image_name:
+                    image_name = image.name
+                    logger.info("using image from glance: " + str(image_name))
+
+	try:
+            legacy = Config().observer_legacy
+        except:
+            legacy = False
+
+        if (legacy):
+            host_filter = instance.node.name.split('.',1)[0]
+        else:
+            host_filter = instance.node.name.strip()
+
+        availability_zone_filter = 'nova:%s'%host_filter
+        instance_name = '%s-%d'%(instance.slice.name,instance.id)
+        self.instance_name = instance_name
+
+        userData = self.get_userdata(instance, pubkeys)
+        if instance.userData:
+            userData += instance.userData
+
+        controller = instance.node.site_deployment.controller
+        fields = {'endpoint':controller.auth_url,
+                     'endpoint_v3': controller.auth_url_v3,
+                     'domain': controller.domain,
+                     'admin_user': instance.creator.email,
+                     'admin_password': instance.creator.remote_password,
+                     'project_name': instance.slice.name,
+                     'tenant': instance.slice.name,
+                     'tenant_description': instance.slice.description,
+                     'name':instance_name,
+                     'ansible_tag':instance_name,
+                     'availability_zone': availability_zone_filter,
+                     'image_name':image_name,
+                     'flavor_name':instance.flavor.name,
+                     'nics':nics,
+                     'meta':metadata_update,
+                     'user_data':r'%s'%escape(userData)}
+        return fields
+
+
+    def map_sync_outputs(self, instance, res):
+	instance_id = res[0]['openstack']['OS-EXT-SRV-ATTR:instance_name']
+        instance_uuid = res[0]['id']
+
+	try:
+            hostname = res[0]['openstack']['OS-EXT-SRV-ATTR:hypervisor_hostname']
+            ip = socket.gethostbyname(hostname)
+            instance.ip = ip
+        except:
+            pass
+
+        instance.instance_id = instance_id
+        instance.instance_uuid = instance_uuid
+        instance.instance_name = self.instance_name
+        instance.save()
+	
+	
+    def map_delete_inputs(self, instance):
+        controller_register = json.loads(instance.node.site_deployment.controller.backend_register)
+
+        if (controller_register.get('disabled',False)):
+            raise InnocuousException('Controller %s is disabled'%instance.node.site_deployment.controller.name)
+
+        instance_name = '%s-%d'%(instance.slice.name,instance.id)
+        controller = instance.node.site_deployment.controller
+        input = {'endpoint':controller.auth_url,
+                     'admin_user': instance.creator.email,
+                     'admin_password': instance.creator.remote_password,
+                     'admin_tenant': instance.slice.name,
+                     'tenant': instance.slice.name,
+                     'tenant_description': instance.slice.description,
+                     'name':instance_name,
+                     'ansible_tag':instance_name,
+                     'delete': True}
+        return input
diff --git a/xos/synchronizer/steps/sync_instances.yaml b/xos/synchronizer/steps/sync_instances.yaml
new file mode 100644
index 0000000..476890f
--- /dev/null
+++ b/xos/synchronizer/steps/sync_instances.yaml
@@ -0,0 +1,35 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  - os_server:
+      name: {{ name }}
+      auth:
+        auth_url: {{ endpoint }}
+        username: {{ admin_user }}
+        password: {{ admin_password }}
+        project_name: {{ project_name }}
+      {% if delete -%}
+      state: absent
+      {% else -%}
+      state: present
+      availability_zone: "{{ availability_zone }}"
+      image: {{ image_name }}
+      flavor: {{ flavor_name }}
+      timeout: 200
+      userdata: "{{ user_data }}"
+      config_drive: yes
+      auto_ip: no
+      nics:
+      {% for nic in nics %}
+          - {{ nic.kind }}-id: {{ nic.value }}
+      {% endfor %}
+
+      {% if meta %}
+      meta:
+      {% for k,v in meta.items() %}
+          {{ k }} : "{{ v }}"
+      {% endfor %}
+      {% endif %}
+      {% endif %}
+
diff --git a/xos/synchronizer/steps/sync_object.py b/xos/synchronizer/steps/sync_object.py
new file mode 100644
index 0000000..aaf2f25
--- /dev/null
+++ b/xos/synchronizer/steps/sync_object.py
@@ -0,0 +1,20 @@
+import os
+import base64
+from collections import defaultdict
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from synchronizers.base.syncstep import *
+from core.models import *
+from synchronizers.base.ansible import *
+from openstack_xos.driver import OpenStackDriver
+from xos.logger import observer_logger as logger
+import json
+
+class SyncObject(OpenStackSyncStep):
+    provides=[] # Caller fills this in
+    requested_interval=0
+    observes=[] # Caller fills this in
+
+    def sync_record(self, r):
+        raise DeferredException('Waiting for Service dependency: %r'%r)
diff --git a/xos/synchronizer/steps/sync_ports.py b/xos/synchronizer/steps/sync_ports.py
new file mode 100644
index 0000000..5e0ff04
--- /dev/null
+++ b/xos/synchronizer/steps/sync_ports.py
@@ -0,0 +1,230 @@
+import os
+import base64
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from core.models import Controller
+from core.models.network import *
+from xos.logger import observer_logger as logger
+
+class SyncPorts(OpenStackSyncStep):
+    requested_interval = 0 # 3600
+    provides=[Port]
+    observes=Port
+
+    #     The way it works is to enumerate the all of the ports that neutron
+    #     has, and then work backward from each port's network-id to determine
+    #     which Network is associated from the port.
+
+    def call(self, failed=[], deletion=False):
+        if deletion:
+            self.delete_ports()
+        else:
+            self.sync_ports()
+
+    def get_driver(self, port):
+        # We need to use a client driver that specifies the tenant
+        # of the destination instance. Nova-compute will not connect
+        # ports to instances if the port's tenant does not match
+        # the instance's tenant.
+
+        # A bunch of stuff to compensate for OpenStackDriver.client_driver()
+        # not being in working condition.
+        from openstack_xos.client import OpenStackClient
+        from openstack_xos.driver import OpenStackDriver
+        controller = port.instance.node.site_deployment.controller
+        slice = port.instance.slice
+        caller = port.network.owner.creator
+        auth = {'username': caller.email,
+                'password': caller.remote_password,
+                'tenant': slice.name}
+        client = OpenStackClient(controller=controller, **auth)
+        driver = OpenStackDriver(client=client)
+
+        return driver
+
+    def sync_ports(self):
+        logger.info("sync'ing Ports [delete=False]")
+
+        ports = Port.objects.all()
+        ports_by_id = {}
+        ports_by_neutron_port = {}
+        for port in ports:
+            ports_by_id[port.id] = port
+            ports_by_neutron_port[port.port_id] = port
+
+        networks = Network.objects.all()
+        networks_by_id = {}
+        for network in networks:
+            for nd in network.controllernetworks.all():
+                networks_by_id[nd.net_id] = network
+
+        #logger.info("networks_by_id = ")
+        #for (network_id, network) in networks_by_id.items():
+        #    logger.info("   %s: %s" % (network_id, network.name))
+
+        instances = Instance.objects.all()
+        instances_by_instance_uuid = {}
+        for instance in instances:
+            instances_by_instance_uuid[instance.instance_uuid] = instance
+
+        # Get all ports in all controllers
+
+        ports_by_id = {}
+        templates_by_id = {}
+        for controller in Controller.objects.all():
+            if not controller.admin_tenant:
+                logger.info("controller %s has no admin_tenant" % controller)
+                continue
+            try:
+                driver = self.driver.admin_driver(controller = controller)
+                ports = driver.shell.neutron.list_ports()["ports"]
+            except:
+                logger.log_exc("failed to get ports from controller %s" % controller)
+                continue
+
+            for port in ports:
+                ports_by_id[port["id"]] = port
+
+            # public-nat and public-dedicated networks don't have a net-id anywhere
+            # in the data model, so build up a list of which ids map to which network
+            # templates.
+            try:
+                neutron_networks = driver.shell.neutron.list_networks()["networks"]
+            except:
+                print "failed to get networks from controller %s" % controller
+                continue
+            for network in neutron_networks:
+                for template in NetworkTemplate.objects.all():
+                    if template.shared_network_name == network["name"]:
+                        templates_by_id[network["id"]] = template
+
+        for port in ports_by_id.values():
+            #logger.info("port %s" % str(port))
+            if port["id"] in ports_by_neutron_port:
+                # we already have it
+                #logger.info("already accounted for port %s" % port["id"])
+                continue
+
+            if port["device_owner"] != "compute:nova":
+                # we only want the ports that connect to instances
+                #logger.info("port %s is not a compute port, it is a %s" % (port["id"], port["device_owner"]))
+                continue
+
+            instance = instances_by_instance_uuid.get(port['device_id'], None)
+            if not instance:
+                logger.info("no instance for port %s device_id %s" % (port["id"], port['device_id']))
+                continue
+
+            network = networks_by_id.get(port['network_id'], None)
+            if not network:
+                # maybe it's public-nat or public-dedicated. Search the templates for
+                # the id, then see if the instance's slice has some network that uses
+                # that template
+                template = templates_by_id.get(port['network_id'], None)
+                if template and instance.slice:
+                    for candidate_network in instance.slice.networks.all():
+                         if candidate_network.template == template:
+                             network=candidate_network
+            if not network:
+                logger.info("no network for port %s network %s" % (port["id"], port["network_id"]))
+
+                # we know it's associated with a instance, but we don't know
+                # which network it is part of.
+
+                continue
+
+            if network.template.shared_network_name:
+                # If it's a shared network template, then more than one network
+                # object maps to the neutron network. We have to do a whole bunch
+                # of extra work to find the right one.
+                networks = network.template.network_set.all()
+                network = None
+                for candidate_network in networks:
+                    if (candidate_network.owner == instance.slice):
+                        logger.info("found network %s" % candidate_network)
+                        network = candidate_network
+
+                if not network:
+                    logger.info("failed to find the correct network for a shared template for port %s network %s" % (port["id"], port["network_id"]))
+                    continue
+
+            if not port["fixed_ips"]:
+                logger.info("port %s has no fixed_ips" % port["id"])
+                continue
+
+            ip=port["fixed_ips"][0]["ip_address"]
+            mac=port["mac_address"]
+            logger.info("creating Port (%s, %s, %s, %s)" % (str(network), str(instance), ip, str(port["id"])))
+
+            ns = Port(network=network,
+                               instance=instance,
+                               ip=ip,
+                               mac=mac,
+                               port_id=port["id"])
+
+            try:
+                ns.save()
+            except:
+                logger.log_exc("failed to save port %s" % str(ns))
+                continue
+
+        # For ports that were created by the user, find that ones
+        # that don't have neutron ports, and create them.
+        for port in Port.objects.filter(Q(port_id__isnull=True), Q(instance__isnull=False) ):
+            logger.info("XXX working on port %s" % port)
+            controller = port.instance.node.site_deployment.controller
+            slice = port.instance.slice
+
+            if controller:
+                cn=port.network.controllernetworks.filter(controller=controller)
+                if not cn:
+                    logger.log_exc("no controllernetwork for %s" % port)
+                    continue
+                cn=cn[0]
+                if cn.lazy_blocked:
+                    cn.lazy_blocked=False
+                    cn.save()
+                    logger.info("deferring port %s because controllerNetwork was lazy-blocked" % port)
+                    continue
+                if not cn.net_id:
+                    logger.info("deferring port %s because controllerNetwork does not have a port-id yet" % port)
+                    continue
+                try:
+                    driver = self.get_driver(port)
+
+                    args = {"network_id": cn.net_id}
+                    neutron_port_name = port.get_parameters().get("neutron_port_name", None)
+                    if neutron_port_name:
+                        args["name"] = neutron_port_name
+
+                    neutron_port = driver.shell.neutron.create_port({"port": args})["port"]
+                    port.port_id = neutron_port["id"]
+                    if neutron_port["fixed_ips"]:
+                        port.ip = neutron_port["fixed_ips"][0]["ip_address"]
+                    port.mac = neutron_port["mac_address"]
+                    port.xos_created = True
+                    logger.info("created neutron port %s for %s" % (port.port_id, port))
+                except:
+                    logger.log_exc("failed to create neutron port for %s" % port)
+                    continue
+                port.save()
+
+    def delete_ports(self):
+        logger.info("sync'ing Ports [delete=True]")
+        for port in Port.deleted_objects.all():
+            self.delete_record(port)
+
+    def delete_record(self, port):
+        if port.xos_created and port.port_id:
+            logger.info("calling openstack to destroy port %s" % port.port_id)
+            try:
+                driver = self.get_driver(port)
+                driver.shell.neutron.delete_port(port.port_id)
+            except:
+                logger.log_exc("failed to delete port %s from neutron" % port.port_id)
+                return
+
+        logger.info("Purging port %s" % port)
+        port.delete(purge=True)
+
diff --git a/xos/synchronizer/steps/sync_roles.py b/xos/synchronizer/steps/sync_roles.py
new file mode 100644
index 0000000..e859316
--- /dev/null
+++ b/xos/synchronizer/steps/sync_roles.py
@@ -0,0 +1,23 @@
+import os
+import base64
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from core.models.role import Role
+from core.models.site import SiteRole, Controller, ControllerRole
+from core.models.slice import SliceRole
+from xos.logger import observer_logger as logger
+
+class SyncRoles(OpenStackSyncStep):
+    provides=[Role]
+    requested_interval=0
+    observes=[SiteRole,SliceRole,ControllerRole]
+
+    def sync_record(self, role):
+        if not role.enacted:
+            controllers = Controller.objects.all()
+       	    for controller in controllers:
+                driver = self.driver.admin_driver(controller=controller)
+                driver.create_role(role.role)
+            role.save()
+    
diff --git a/xos/synchronizer/steps/teardown_container.yaml b/xos/synchronizer/steps/teardown_container.yaml
new file mode 100644
index 0000000..5cabc78
--- /dev/null
+++ b/xos/synchronizer/steps/teardown_container.yaml
@@ -0,0 +1,33 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: {{ username }}
+  sudo: yes
+
+  vars:
+    container_name: {{ container_name }}
+    docker_image: {{ docker_image }}
+    ports:
+    {% for port in ports %}
+       - device: {{ port.device }}
+         xos_network_id: {{ port.xos_network_id }}
+         mac: {{ port.mac|default("") }}
+         ip: {{ port.ip }}
+         snoop_instance_mac: {{ port.snoop_instance_mac }}
+         snoop_instance_id: {{ port.snoop_instance_id }}
+         parent_mac: {{ port.parent_mac|default("") }}
+         s_tag: {{ port.s_tag|default("")  }}
+         c_tag: {{ port.c_tag|default("") }}
+         next_hop: {{ port.next_hop|default("") }}
+         bridge: {{ port.bridge }}
+    {% endfor %}
+    volumes:
+    {% for volume in volumes %}
+       - {{ volume }}
+    {% endfor %}
+
+  tasks:
+  - name: Make sure container is stopped
+    service: name=container-{{ container_name }} state=stopped
+
diff --git a/xos/synchronizer/templates/container.conf.j2 b/xos/synchronizer/templates/container.conf.j2
new file mode 100644
index 0000000..7cbb880
--- /dev/null
+++ b/xos/synchronizer/templates/container.conf.j2
@@ -0,0 +1,14 @@
+# Upstart script for container
+description "container"
+author "smbaker@gmail.com"
+start on filesystem and started docker
+stop on runlevel [!2345]
+respawn
+
+script
+  /usr/local/sbin/start-container-{{ container_name }}.sh ATTACH
+end script
+
+post-stop script
+  /usr/local/sbin/stop-container-{{ container_name }}.sh
+end script
\ No newline at end of file
diff --git a/xos/synchronizer/templates/container.service.j2 b/xos/synchronizer/templates/container.service.j2
new file mode 100644
index 0000000..817d6d7
--- /dev/null
+++ b/xos/synchronizer/templates/container.service.j2
@@ -0,0 +1,11 @@
+[Unit]
+Description={{ container_name }}
+After=docker.service
+
+[Service]
+ExecStart=/bin/bash -c "/usr/local/sbin/start-container-{{ container_name }}.sh ATTACH"
+ExecStop=/bin/bash -c "/usr/local/sbin/stop-container-{{ container_name }}.sh"
+SuccessExitStatus=0 137
+
+[Install]
+WantedBy=multi-user.target
diff --git a/xos/synchronizer/templates/start-container.sh.j2 b/xos/synchronizer/templates/start-container.sh.j2
new file mode 100644
index 0000000..2fbf478
--- /dev/null
+++ b/xos/synchronizer/templates/start-container.sh.j2
@@ -0,0 +1,136 @@
+#!/bin/bash
+
+iptables -L > /dev/null
+ip6tables -L > /dev/null
+
+CONTAINER={{ container_name }}
+IMAGE={{ docker_image }}
+
+function mac_to_iface {
+    PARENT_MAC=$1
+    ifconfig|grep $PARENT_MAC| awk '{print $1}'|grep -v '\.'
+}
+
+function encapsulate_stag {
+    LAN_IFACE=$1
+    STAG=$2
+    ifconfig $LAN_IFACE >> /dev/null
+    if [ "$?" == 0 ]; then
+        STAG_IFACE=$LAN_IFACE.$STAG
+        ifconfig $LAN_IFACE up
+        ifconfig $STAG_IFACE
+        if [ "$?" == 0 ]; then
+            echo $STAG_IFACE is already created
+        else
+            ifconfig $STAG_IFACE >> /dev/null || ip link add link $LAN_IFACE name $STAG_IFACE type vlan id $STAG
+        fi
+            ifconfig $STAG_IFACE up
+    else
+        echo There is no $LAN_IFACE. Aborting.
+        exit -1
+    fi
+}
+
+
+{% if volumes %}
+{% for volume in volumes %}
+DEST_DIR=/var/container_volumes/$CONTAINER/{{ volume }}
+mkdir -p $DEST_DIR
+VOLUME_ARGS="$VOLUME_ARGS -v $DEST_DIR:{{ volume }}"
+{% endfor %}
+{% endif %}
+
+docker inspect $CONTAINER > /dev/null 2>&1
+if [ "$?" == 1 ]
+then
+    docker pull $IMAGE
+{% if network_method=="host" %}
+    docker run -d --name=$CONTAINER --privileged=true --net=host $VOLUME_ARGS $IMAGE
+{% elif network_method=="bridged" %}
+    docker run -d --name=$CONTAINER --privileged=true --net=bridge $VOLUME_ARGS $IMAGE
+{% else %}
+    docker run -d --name=$CONTAINER --privileged=true --net=none $VOLUME_ARGS $IMAGE
+{% endif %}
+else
+    docker start $CONTAINER
+fi
+
+{% if ports %}
+{% for port in ports %}
+
+{% if port.next_hop %}
+NEXTHOP_ARG="@{{ port.next_hop }}"
+{% else %}
+NEXTHOP_ARG=""
+{% endif %}
+
+{% if port.c_tag %}
+CTAG_ARG="@{{ port.c_tag }}"
+{% else %}
+CTAG_ARG=""
+{% endif %}
+
+{% if port.parent_mac %}
+# container-in-VM
+SRC_DEV=$( mac_to_iface "{{ port.parent_mac }}" )
+CMD="docker exec $CONTAINER ifconfig $SRC_DEV >> /dev/null || pipework $SRC_DEV -i {{ port.device }} $CONTAINER {{ port.ip }}/24$NEXTHOP_ARG {{ port.mac }} $CTAG_ARG"
+echo $CMD
+eval $CMD
+
+{% else %}
+# container-on-metal
+IP="{{ port.ip }}"
+{% if port.mac %}
+MAC="{{ port.mac }}"
+{% else %}
+MAC=""
+{% endif %}
+
+DEVICE="{{ port.device }}"

+BRIDGE="{{ port.bridge }}"

+{% if port.s_tag %}

+# This is intended for lan_network. Assume that BRIDGE is set to br_lan. We

+# create a device that strips off the S-TAG.

+STAG="{{ port.s_tag }}"

+encapsulate_stag $BRIDGE $STAG

+SRC_DEV=$STAG_IFACE

+{% else %}

+# This is for a standard neutron private network. We use a donor VM to setup

+# openvswitch for us, and we snoop at its devices and create a tap using the

+# same settings.

+XOS_NETWORK_ID="{{ port.xos_network_id }}"

+INSTANCE_MAC="{{ port.snoop_instance_mac }}"
+INSTANCE_ID="{{ port.snoop_instance_id }}"
+INSTANCE_TAP=`virsh domiflist $INSTANCE_ID | grep -i $INSTANCE_MAC | awk '{print $1}'`
+INSTANCE_TAP=${INSTANCE_TAP:3}
+VLAN_ID=`ovs-vsctl show | grep -i -A 1 port.*$INSTANCE_TAP | grep -i tag | awk '{print $2}'`
+# One tap for all containers per XOS/neutron network. Included the VLAN_ID in the
+# hash, to cover the case where XOS is reinstalled and the XOS network ids
+# get reused.
+TAP="con`echo ${XOS_NETWORK_ID}_$VLAN_ID|md5sum|awk '{print $1}'`"
+TAP=${TAP:0:10}
+echo im=$INSTANCE_MAC ii=$INSTANCE_ID it=$INSTANCE_TAP vlan=$VLAN_ID tap=$TAP con=$CONTAINER dev=$DEVICE mac=$MAC
+ovs-vsctl show | grep -i $TAP
+if [[ $? == 1 ]]; then
+    echo creating tap
+    ovs-vsctl add-port $BRIDGE $TAP tag=$VLAN_ID -- set interface $TAP type=internal
+else
+    echo tap exists
+fi
+SRC_DEV=$TAP
+{% endif %}
+
+CMD="docker exec $CONTAINER ifconfig $DEVICE >> /dev/null || pipework $SRC_DEV -i $DEVICE $CONTAINER $IP/24$NEXTHOP_ARG $MAC $CTAG_ARG"
+echo $CMD
+eval $CMD
+{% endif %}
+{% endfor %}
+{% endif %}
+
+# Attach to container
+# (this is only done when using upstart, since upstart expects to be attached
+#  to a running service)
+if [[ "$1" == "ATTACH" ]]; then
+    docker start -a $CONTAINER
+fi
+
diff --git a/xos/synchronizer/templates/stop-container.sh.j2 b/xos/synchronizer/templates/stop-container.sh.j2
new file mode 100644
index 0000000..9cabb00
--- /dev/null
+++ b/xos/synchronizer/templates/stop-container.sh.j2
@@ -0,0 +1,4 @@
+CONTAINER={{ container_name }}
+
+docker stop $CONTAINER
+docker rm $CONTAINER
diff --git a/xos/synchronizer/xos-synchronizer.py b/xos/synchronizer/xos-synchronizer.py
new file mode 100644
index 0000000..0036012
--- /dev/null
+++ b/xos/synchronizer/xos-synchronizer.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+import os
+import argparse
+import sys
+
+sys.path.append('/opt/xos')
+
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
+from synchronizers.base.backend import Backend
+from xos.config import Config, DEFAULT_CONFIG_FN
+from core.models import Instance,NetworkTemplate
+from xos.logger import Logger, logging, logger
+from django.db import ProgrammingError
+import time
+
+try:
+    from django import setup as django_setup # django 1.7
+except:
+    django_setup = False
+
+config = Config()
+
+# set the driver.
+from openstack_xos.driver import OpenStackDriver
+set_driver(OpenStackDriver())
+
+# after http://www.erlenstar.demon.co.uk/unix/faq_2.html
+def daemon():
+    """Daemonize the current process."""
+    if os.fork() != 0: os._exit(0)
+    os.setsid()
+    if os.fork() != 0: os._exit(0)
+    os.umask(0)
+    devnull = os.open(os.devnull, os.O_RDWR)
+    os.dup2(devnull, 0)
+    # xxx fixme - this is just to make sure that nothing gets stupidly lost - should use devnull
+    logdir=os.path.dirname(config.observer_logfile)
+    # when installed in standalone we might not have httpd installed
+    if not os.path.isdir(logdir): os.mkdir(logdir)
+    crashlog = os.open('%s'%config.observer_logfile, os.O_RDWR | os.O_APPEND | os.O_CREAT, 0644)
+    os.dup2(crashlog, 1)
+    os.dup2(crashlog, 2)
+
+    if hasattr(config, "observer_pidfile"):
+        pidfile = config.get("observer_pidfile")
+    else:
+        pidfile = "/var/run/xosobserver.pid"
+    try:
+        file(pidfile,"w").write(str(os.getpid()))
+    except:
+        print "failed to create pidfile %s" % pidfile
+
+def main():
+    # Generate command line parser
+    parser = argparse.ArgumentParser(usage='%(prog)s [options]')
+    parser.add_argument('-d', '--daemon', dest='daemon', action='store_true', default=False,
+                        help='Run as daemon.')
+    # smbaker: util/config.py parses sys.argv[] directly to get config file name; include the option here to avoid
+    #   throwing unrecognized argument exceptions
+    parser.add_argument('-C', '--config', dest='config_file', action='store', default=DEFAULT_CONFIG_FN,
+                        help='Name of config file.')
+    args = parser.parse_args()
+
+    if args.daemon: daemon()
+
+    if django_setup: # 1.7
+        django_setup()
+
+    models_active = False
+    wait = False
+    while not models_active:
+        try:
+            _ = Instance.objects.first()
+            _ = NetworkTemplate.objects.first()
+            models_active = True
+        except Exception,e:
+            logger.info(str(e))
+            logger.info('Waiting for data model to come up before starting...')
+            time.sleep(10)
+            wait = True
+
+    if (wait):
+        time.sleep(60) # Safety factor, seeing that we stumbled waiting for the data model to come up.
+    backend = Backend()
+    backend.run()    
+
+if __name__ == '__main__':
+    
+    main()