Controller replaces Deployment
diff --git a/planetstack/openstack_observer/steps/__init__.py b/planetstack/openstack_observer/steps/__init__.py
index 0151af3..3f074a5 100644
--- a/planetstack/openstack_observer/steps/__init__.py
+++ b/planetstack/openstack_observer/steps/__init__.py
@@ -1,7 +1,7 @@
 ##from .sync_external_routes import SyncExternalRoutes
 #from .sync_network_slivers import SyncNetworkSlivers
 #from .sync_networks import SyncNetworks
-#from .sync_network_deployments import SyncNetworkDeployments
+#from .sync_controller_networks import SyncControllerNetworks
 #from .sync_site_privileges import SyncSitePrivileges
 #from .sync_sites import SyncSites
 #from .sync_slice_memberships import SyncSliceMemberships
@@ -12,5 +12,5 @@
 #from .sync_roles import SyncRoles
 #from .sync_nodes import SyncNodes
 #from .sync_images import SyncImages
-#from .sync_image_deployments import SyncImageDeployments
+#from .sync_controller_images import SyncControllerImages
 #from .garbage_collector import GarbageCollector
diff --git a/planetstack/openstack_observer/steps/sync_controller_images.py b/planetstack/openstack_observer/steps/sync_controller_images.py
index 20c22a2..67cd7b5 100644
--- a/planetstack/openstack_observer/steps/sync_controller_images.py
+++ b/planetstack/openstack_observer/steps/sync_controller_images.py
@@ -4,74 +4,74 @@
 from django.db.models import F, Q
 from planetstack.config import Config
 from observer.openstacksyncstep import OpenStackSyncStep
-from core.models import Deployment
-from core.models import Image, ImageDeployments
+from core.models import Controller
+from core.models import Image, ControllerImages
 from util.logger import Logger, logging
 

 logger = Logger(level=logging.INFO)
 
-class SyncImageDeployments(OpenStackSyncStep):
-    provides=[ImageDeployments]
+class SyncControllerImages(OpenStackSyncStep):
+    provides=[ControllerImages]
     requested_interval=0
 
     def fetch_pending(self, deleted):
         if (deleted):
             return []
-         # smbaker: commented out automatic creation of ImageDeployments as
+         # smbaker: commented out automatic creation of ControllerImages as
          #    as they will now be configured in GUI. Not sure if this is
          #    sufficient.
 
-#        # ensure images are available across all deployments
-#        image_deployments = ImageDeployments.objects.all()
+#        # ensure images are available across all controllers
+#        controller_images = ControllerImages.objects.all()
 #        image_deploy_lookup = defaultdict(list)
-#        for image_deployment in image_deployments:
-#            image_deploy_lookup[image_deployment.image].append(image_deployment.deployment)
+#        for controller_image in controller_images:
+#            image_deploy_lookup[controller_image.image].append(controller_image.controller)
 #
-#        all_deployments = Deployment.objects.all()
+#        all_controllers = Controller.objects.all()
 #        for image in Image.objects.all():
-#            expected_deployments = all_deployments
-#            for expected_deployment in expected_deployments:
+#            expected_controllers = all_controllers
+#            for expected_controller in expected_controllers:
 #                if image not in image_deploy_lookup or \
-#                  expected_deployment not in image_deploy_lookup[image]:
-#                    id = ImageDeployments(image=image, deployment=expected_deployment)
+#                  expected_controller not in image_deploy_lookup[image]:
+#                    id = ControllerImages(image=image, controller=expected_controller)
 #                    id.save()
 
         # now we return all images that need to be enacted
-        return ImageDeployments.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+        return ControllerImages.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
 
-    def sync_record(self, image_deployment):
-        logger.info("Working on image %s on deployment %s" % (image_deployment.image.name, image_deployment.deployment.name))
-        driver = self.driver.admin_driver(deployment=image_deployment.deployment.name)
+    def sync_record(self, controller_image):
+        logger.info("Working on image %s on controller %s" % (controller_image.image.name, controller_image.controller))
+        driver = self.driver.admin_driver(controller=controller_image.controller.name)
         images = driver.shell.glance.get_images()
         glance_image = None
         for image in images:
-            if image['name'] == image_deployment.image.name:
+            if image['name'] == controller_image.image.name:
                 glance_image = image
                 break
         if glance_image:
-            logger.info("Found image %s on deployment %s" % (image_deployment.image.name, image_deployment.deployment.name))
-            image_deployment.glance_image_id = glance_image['id']
-        elif image_deployment.image.path:
+            logger.info("Found image %s on controller %s" % (controller_image.image.name, controller_image.controller.name))
+            controller_image.glance_image_id = glance_image['id']
+        elif controller_image.image.path:
             image = {
-                'name': image_deployment.image.name,
+                'name': controller_image.image.name,
                 'is_public': True,
                 'disk_format': 'raw',
                 'container_format': 'bare',
-                'file': image_deployment.image.path,
+                'file': controller_image.image.path,
             }
 
-            logger.info("Creating image %s on deployment %s" % (image_deployment.image.name, image_deployment.deployment.name))
+            logger.info("Creating image %s on controller %s" % (controller_image.image.name, controller_image.controller.name))
 
-            glance_image = driver.shell.glanceclient.images.create(name=image_deployment.image.name,
+            glance_image = driver.shell.glanceclient.images.create(name=controller_image.image.name,
                                                                    is_public=True,
                                                                    disk_format='raw',
                                                                    container_format='bare')
-            glance_image.update(data=open(image_deployment.image.path, 'rb'))
+            glance_image.update(data=open(controller_image.image.path, 'rb'))
 
             # While the images returned by driver.shell.glance.get_images()
             #   are dicts, the images returned by driver.shell.glanceclient.images.create
             #   are not dicts. We have to use getattr() instead of [] operator.
             if not glance_image or not getattr(glance_image,"id",None):
-                raise Exception, "Add image failed at deployment %s" % image_deployment.deployment.name
-            image_deployment.glance_image_id = getattr(glance_image, "id")
-        image_deployment.save()
+                raise Exception, "Add image failed at controller %s" % controller_image.controller.name
+            controller_image.glance_image_id = getattr(glance_image, "id")
+        controller_image.save()
diff --git a/planetstack/openstack_observer/steps/sync_controller_networks.py b/planetstack/openstack_observer/steps/sync_controller_networks.py
index a6fc389..bb18b39 100644
--- a/planetstack/openstack_observer/steps/sync_controller_networks.py
+++ b/planetstack/openstack_observer/steps/sync_controller_networks.py
@@ -12,21 +12,21 @@
 
 logger = Logger(level=logging.INFO)
 
-class SyncNetworkDeployments(OpenStackSyncStep):
+class SyncControllerNetworks(OpenStackSyncStep):
     requested_interval = 0
-    provides=[Network, NetworkDeployments, Sliver]
+    provides=[Network, ControllerNetworks, Sliver]
 
     def fetch_pending(self, deleted):
         if (deleted):
-            return NetworkDeployments.deleted_objects.all()
+            return ControllerNetworks.deleted_objects.all()
         else:
-            return NetworkDeployments.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+            return ControllerNetworks.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
 
-    def get_next_subnet(self, deployment=None):
+    def get_next_subnet(self, controller=None):
         # limit ourself to 10.0.x.x for now
         valid_subnet = lambda net: net.startswith('10.0')
 
-        driver = self.driver.admin_driver(deployment=deployment,tenant='admin')
+        driver = self.driver.admin_driver(controller=controller,tenant='admin')
         subnets = driver.shell.quantum.list_subnets()['subnets']
         ints = [int(IPNetwork(subnet['cidr']).ip) for subnet in subnets \
                 if valid_subnet(subnet['cidr'])]
@@ -39,98 +39,98 @@
         next_network = IPNetwork(str(IPAddress(last_network) + last_network.size) + "/24")
         return next_network
 
-    def save_network_deployment(self, network_deployment):
-        if (not network_deployment.net_id) and network_deployment.network.template.sharedNetworkName:
+    def save_controller_network(self, controller_network):
+        if (not controller_network.net_id) and controller_network.network.template.sharedNetworkName:
             # It's a shared network, try to find the shared network id
 
-            quantum_networks = self.driver.shell.quantum.list_networks(name=network_deployment.network.template.sharedNetworkName)["networks"]
+            quantum_networks = self.driver.shell.quantum.list_networks(name=controller_network.network.template.sharedNetworkName)["networks"]
             if quantum_networks:
                 logger.info("set shared network id %s" % quantum_networks[0]["id"])
-                network_deployment.net_id = quantum_networks[0]["id"]
+                controller_network.net_id = quantum_networks[0]["id"]
             else:
-                logger.info("failed to find shared network id for deployment")
+                logger.info("failed to find shared network id for controller")
                 return
 
         # At this point, it must be a private network, so create it if it does
         # not exist.
 
-        if not network_deployment.net_id:
-            network_name = network_deployment.network.name
+        if not controller_network.net_id:
+            network_name = controller_network.network.name
 
             # create network
             os_network = self.driver.create_network(network_name, shared=True)
-            network_deployment.net_id = os_network['id']
+            controller_network.net_id = os_network['id']
 
             # create router
             #router = self.driver.create_router(network_name)
-            #network_deployment.router_id = router['id']
+            #controller_network.router_id = router['id']
 
             # create subnet
-            next_subnet = self.get_next_subnet(deployment=network_deployment.deployment.name)
+            next_subnet = self.get_next_subnet(controller=controller_network.controller.name)
             cidr = str(next_subnet.cidr)
             ip_version = next_subnet.version
             start = str(next_subnet[2])
             end = str(next_subnet[-2])
             subnet = self.driver.create_subnet(name=network_name,
-                                               network_id = network_deployment.net_id,
+                                               network_id = controller_network.net_id,
                                                cidr_ip = cidr,
                                                ip_version = ip_version,
                                                start = start,
                                                end = end)
-            network_deployment.subnet = cidr
-            network_deployment.subnet_id = subnet['id']
+            controller_network.subnet = cidr
+            controller_network.subnet_id = subnet['id']
             # add subnet as interface to slice's router
             #self.driver.add_router_interface(router['id'], subnet['id'])
             # add external route
             #self.driver.add_external_route(subnet)
-            logger.info("created private subnet (%s) for network: %s" % (cidr, network_deployment.network))
+            logger.info("created private subnet (%s) for network: %s" % (cidr, controller_network.network))
 
         # Now, figure out the subnet and subnet_id for the network. This works
         # for both private and shared networks.
 
-        if (not network_deployment.subnet_id) or (not network_deployment.subnet):
-            (network_deployment.subnet_id, network_deployment.subnet) = self.driver.get_network_subnet(network_deployment.net_id)
-            logger.info("sync'ed subnet (%s) for network: %s" % (network_deployment.subnet, network_deployment.network))
+        if (not controller_network.subnet_id) or (not controller_network.subnet):
+            (controller_network.subnet_id, controller_network.subnet) = self.driver.get_network_subnet(controller_network.net_id)
+            logger.info("sync'ed subnet (%s) for network: %s" % (controller_network.subnet, controller_network.network))
 
-        if (not network_deployment.subnet):
+        if (not controller_network.subnet):
             # this will generate a non-null database constraint error
             #   ... which in turn leads to transaction errors
             # it's probably caused by networks that no longer exist at the
             # quantum level.
 
-            logger.info("null subnet for network %s, skipping save" % network_deployment.network)
+            logger.info("null subnet for network %s, skipping save" % controller_network.network)
             return
 
-        network_deployment.save()
+        controller_network.save()
 
-    def sync_record(self, network_deployment):
-        logger.info("sync'ing network deployment %s for network %s slice %s deployment %s" % (network_deployment, network_deployment.network, str(network_deployment.network.owner), network_deployment.deployment))
+    def sync_record(self, controller_network):
+        logger.info("sync'ing network controller %s for network %s slice %s controller %s" % (controller_network, controller_network.network, str(controller_network.network.owner), controller_network.controller))
 
-        if not network_deployment.deployment.admin_user:
-            logger.info("deployment %r has no admin_user, skipping" % network_deployment.deployment)
+        if not controller_network.controller.admin_user:
+            logger.info("controller %r has no admin_user, skipping" % controller_network.controller)
             return
 
-        self.driver = self.driver.admin_driver(deployment=network_deployment.deployment,tenant='admin')
-        if network_deployment.network.owner and network_deployment.network.owner.creator:
+        self.driver = self.driver.admin_driver(controller=controller_network.controller,tenant='admin')
+        if controller_network.network.owner and controller_network.network.owner.creator:
             try:
                 # update manager context
 		# Bring back
-                self.save_network_deployment(network_deployment)
-                logger.info("saved network deployment: %s" % (network_deployment))
+                self.save_controller_network(controller_network)
+                logger.info("saved network controller: %s" % (controller_network))
             except Exception,e:
-                logger.log_exc("save network deployment failed: %s" % network_deployment)
+                logger.log_exc("save network controller failed: %s" % controller_network)
                 raise e
 
 
-    def delete_record(self, network_deployment):
-        driver = OpenStackDriver().client_driver(caller=network_deployment.network.owner.creator,
-                                                 tenant=network_deployment.network.owner.name,
-                                                 deployment=network_deployment.deployment.name)
-        if (network_deployment.router_id) and (network_deployment.subnet_id):
-            driver.delete_router_interface(network_deployment.router_id, network_deployment.subnet_id)
-        if network_deployment.subnet_id:
-            driver.delete_subnet(network_deployment.subnet_id)
-        if network_deployment.router_id:
-            driver.delete_router(network_deployment.router_id)
-        if network_deployment.net_id:
-            driver.delete_network(network_deployment.net_id)
+    def delete_record(self, controller_network):
+        driver = OpenStackDriver().client_driver(caller=controller_network.network.owner.creator,
+                                                 tenant=controller_network.network.owner.name,
+                                                 controller=controller_network.controller.name)
+        if (controller_network.router_id) and (controller_network.subnet_id):
+            driver.delete_router_interface(controller_network.router_id, controller_network.subnet_id)
+        if controller_network.subnet_id:
+            driver.delete_subnet(controller_network.subnet_id)
+        if controller_network.router_id:
+            driver.delete_router(controller_network.router_id)
+        if controller_network.net_id:
+            driver.delete_network(controller_network.net_id)
diff --git a/planetstack/openstack_observer/steps/sync_controller_site_deployments.py b/planetstack/openstack_observer/steps/sync_controller_site_deployments.py
new file mode 100644
index 0000000..89bea2c
--- /dev/null
+++ b/planetstack/openstack_observer/steps/sync_controller_site_deployments.py
@@ -0,0 +1,37 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.site import *
+from observer.ansible import *
+
+class SyncControllerSites(OpenStackSyncStep):
+    requested_interval=0
+    provides=[ControllerSiteDeployments, SiteDeployments]
+
+    def sync_record(self, controller_site_deployment):
+
+	template = os_template_env.get_template('sync_controller_site_deployments.yaml')
+	tenant_fields = {'endpoint':controller_site_deployment.controller.auth_url,
+		         'admin_user': controller_site_deployment.controller.admin_user,
+		         'admin_password': controller_site_deployment.controller.admin_password,
+		         'admin_tenant': 'admin',
+		         'tenant': controller_site_deployment.site_deployment.site.login_base,
+		         'tenant_description': controller_site_deployment.site_deployment.site.name}
+
+	rendered = template.render(tenant_fields)
+	res = run_template('sync_controller_site_deployments.yaml', tenant_fields)
+
+	if (len(res)==1):
+		controller_site_deployment.tenant_id = res[0]['id']
+        	controller_site_deployment.save()
+	elif (len(res)):
+		raise Exception('Could not assign roles for user %s'%tenant_fields['tenant'])
+	else:
+		raise Exception('Could not create or update user %s'%tenant_fields['tenant'])
+            
+    def delete_record(self, controller_site_deployment):
+        if controller_site_deployment.tenant_id:
+            driver = self.driver.admin_driver(controller=controller_site_deployment.controller)
+            driver.delete_tenant(controller_site_deployment.tenant_id)
diff --git a/planetstack/openstack_observer/steps/sync_controller_site_deployments.yaml b/planetstack/openstack_observer/steps/sync_controller_site_deployments.yaml
new file mode 100644
index 0000000..4129802
--- /dev/null
+++ b/planetstack/openstack_observer/steps/sync_controller_site_deployments.yaml
@@ -0,0 +1,5 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}"
diff --git a/planetstack/openstack_observer/steps/sync_controller_sites.py b/planetstack/openstack_observer/steps/sync_controller_sites.py
new file mode 100644
index 0000000..7e76cc4
--- /dev/null
+++ b/planetstack/openstack_observer/steps/sync_controller_sites.py
@@ -0,0 +1,37 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.site import *
+from observer.ansible import *
+
+class SyncControllerSites(OpenStackSyncStep):
+    requested_interval=0
+    provides=[Controller, Site]
+
+    def sync_record(self, controller):
+
+	template = os_template_env.get_template('sync_controller_sites.yaml')
+	tenant_fields = {'endpoint':controller.auth_url,
+		         'admin_user': controller.admin_user,
+		         'admin_password': controller.admin_password,
+		         'admin_tenant': 'admin',
+		         'tenant': controller.site_deployment.site.login_base,
+		         'tenant_description': controller.site_deployment.site.name}
+
+	rendered = template.render(tenant_fields)
+	res = run_template('sync_controller_sites.yaml', tenant_fields)
+
+	if (len(res)==1):
+		controller.tenant_id = res[0]['id']
+        	controller.save()
+	elif (len(res)):
+		raise Exception('Could not assign roles for user %s'%tenant_fields['tenant'])
+	else:
+		raise Exception('Could not create or update user %s'%tenant_fields['tenant'])
+            
+    def delete_record(self, controller):
+        if controller.tenant_id:
+            driver = self.driver.admin_driver(controller=controller)
+            driver.delete_tenant(controller.tenant_id)
diff --git a/planetstack/openstack_observer/steps/sync_controller_sites.yaml b/planetstack/openstack_observer/steps/sync_controller_sites.yaml
new file mode 100644
index 0000000..4129802
--- /dev/null
+++ b/planetstack/openstack_observer/steps/sync_controller_sites.yaml
@@ -0,0 +1,5 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}"
diff --git a/planetstack/openstack_observer/steps/sync_controller_slices.py b/planetstack/openstack_observer/steps/sync_controller_slices.py
new file mode 100644
index 0000000..e51ba7e
--- /dev/null
+++ b/planetstack/openstack_observer/steps/sync_controller_slices.py
@@ -0,0 +1,105 @@
+import os
+import base64
+from collections import defaultdict
+from netaddr import IPAddress, IPNetwork
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.site import Controller, ControllerSites
+from core.models.slice import Slice, ControllerSlices
+from core.models.usercontrollers import ControllerUsers
+from util.logger import Logger, logging
+from observer.ansible import *
+
+logger = Logger(level=logging.INFO)
+
+class SyncControllerSlices(OpenStackSyncStep):
+    provides=[ControllerSlices]
+    requested_interval=0
+
+    def fetch_pending(self, deleted):
+        if (deleted):
+            return ControllerSlices.deleted_objects.all()
+        else:
+            return ControllerSlices.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+    def get_next_subnet(self, controller=None):
+        # limit ourself to 10.0.x.x for now
+        valid_subnet = lambda net: net.startswith('10.0')
+        driver = self.driver.admin_driver(controller=controller)
+        subnets = driver.shell.quantum.list_subnets()['subnets']
+        ints = [int(IPNetwork(subnet['cidr']).ip) for subnet in subnets \
+                if valid_subnet(subnet['cidr'])]
+        ints.sort()
+        if ints:
+            last_ip = IPAddress(ints[-1])
+        else:
+            last_ip = IPAddress('10.0.0.1')
+        last_ip = IPAddress(ints[-1])
+        last_network = IPNetwork(str(last_ip) + "/24")
+        next_network = IPNetwork(str(IPAddress(last_network) + last_network.size) + "/24")
+        return next_network
+
+
+    def sync_record(self, controller_slice):
+        logger.info("sync'ing slice controller %s" % controller_slice)
+
+        if not controller_slice.controller.admin_user:
+            logger.info("controller %r has no admin_user, skipping" % controller_slice.controller)
+            return
+
+	controller_users = ControllerUsers.objects.filter(user=controller_slice.slice.creator,
+                                                             controller=controller_slice.controller)            
+    	if not controller_users:
+	    logger.info("slice createor %s has not accout at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name))
+	    roles = []
+    	else:
+	    controller_user = controller_users[0]
+	    roles = ['admin']
+	    
+	max_instances=int(controller_slice.slice.max_slivers)
+	tenant_fields = {'endpoint':controller_slice.controller.auth_url,
+		         'admin_user': controller_slice.controller.admin_user,
+		         'admin_password': controller_slice.controller.admin_password,
+		         'admin_tenant': 'admin',
+		         'tenant': controller_slice.slice.name,
+		         'tenant_description': controller_slice.slice.description,
+			 'roles':roles,
+			 'name':controller_user.user.email,
+			 'max_instances':max_instances}
+
+	res = run_template('sync_controller_slices.yaml', tenant_fields)
+	expected_num = len(roles)+1
+	if (len(res)!=expected_num):
+	    raise Exception('Could not sync tenants for slice %s'%controller_slice.slice.name)
+	else:
+	    tenant_id = res[0]['id']
+	    if (not controller_slice.tenant_id):
+	        handle = os.popen('nova quota-update --instances %d %s'%(max_instances,tenant_id))
+		output = handle.read()
+		result = handle.close()
+		if (result):
+		    logging.info('Could not update quota for %s'%controller_slice.slice.name)
+		controller_slice.tenant_id = tenant_id
+		controller_slice.save()
+			
+
+
+    def delete_record(self, controller_slice):
+        user = User.objects.get(id=controller_slice.slice.creator.id)
+        driver = OpenStackDriver().admin_driver(controller=controller_slice.controller.name)
+        client_driver = driver.client_driver(caller=user,
+                                             tenant=controller_slice.slice.name,
+                                             controller=controller_slice.controller.name)
+
+        if controller_slice.router_id and controller_slice.subnet_id:
+            client_driver.delete_router_interface(controller_slice.router_id, controller_slice.subnet_id)
+        if controller_slice.subnet_id:
+            client_driver.delete_subnet(controller_slice.subnet_id)
+        if controller_slice.router_id:    
+            client_driver.delete_router(controller_slice.router_id)
+        if controller_slice.network_id:
+            client_driver.delete_network(controller_slice.network_id)
+        if controller_slice.tenant_id:
+            driver.delete_tenant(controller_slice.tenant_id)
+        
diff --git a/planetstack/openstack_observer/steps/sync_controller_slices.yaml b/planetstack/openstack_observer/steps/sync_controller_slices.yaml
new file mode 100644
index 0000000..de1caf4
--- /dev/null
+++ b/planetstack/openstack_observer/steps/sync_controller_slices.yaml
@@ -0,0 +1,8 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}"
+  {% for role in roles %}
+  - keystone_user: endpoint={{ endpoint}} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} user="{{ name }}" role={{ role }} tenant={{ tenant }}
+  {% endfor %}
diff --git a/planetstack/openstack_observer/steps/sync_controller_users.py b/planetstack/openstack_observer/steps/sync_controller_users.py
new file mode 100644
index 0000000..63f5ca3
--- /dev/null
+++ b/planetstack/openstack_observer/steps/sync_controller_users.py
@@ -0,0 +1,86 @@
+import os
+import base64
+import hashlib
+from collections import defaultdict
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.site import ControllerSites, Controller
+from core.models.user import User
+from core.models.usercontrollers import ControllerUsers
+from util.logger import Logger, logging
+
+from observer.ansible import *
+
+logger = Logger(level=logging.INFO)
+
+class SyncControllerUsers(OpenStackSyncStep):
+    provides=[ControllerUsers, User]
+    requested_interval=0
+
+    def fetch_pending(self, deleted):
+
+        if (deleted):
+            return ControllerUsers.deleted_objects.all()
+        else:
+            return ControllerUsers.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None)) 
+
+    def sync_record(self, controller_user):
+        logger.info("sync'ing user %s at controller %s" % (controller_user.user, controller_user.controller))
+
+        if not controller_user.controller.admin_user:
+            logger.info("controller %r has no admin_user, skipping" % controller_user.controller)
+            return
+
+	template = os_template_env.get_template('sync_controller_users.yaml')
+	
+        name = controller_user.user.email[:controller_user.user.email.find('@')]
+
+	roles = []
+	# setup user controller home site roles  
+        if controller_user.user.site:
+            site_controllers = ControllerSites.objects.filter(site=controller_user.user.site,
+                                                              controller=controller_user.controller)
+            if site_controllers:
+                # need the correct tenant id for site at the controller
+                tenant_id = site_controllers[0].tenant_id  
+		tenant_name = site_controllers[0].site.login_base
+
+		roles.append('user')
+                if controller_user.user.is_admin:
+                    roles.append('admin')
+	    else:
+		raise Exception('Internal error. Missing ControllerSites for user %s'%controller_user.user.email)
+	else:
+	    raise Exception('Siteless user %s'%controller_user.user.email)
+
+
+        user_fields = {'endpoint':controller_user.controller.auth_url,
+		       'name': controller_user.user.email,
+                       'email': controller_user.user.email,
+                       'password': hashlib.md5(controller_user.user.password).hexdigest()[:6],
+                       'admin_user': controller_user.controller.admin_user,
+		       'admin_password': controller_user.controller.admin_password,
+		       'admin_tenant': 'admin',
+		       'roles':roles,
+		       'tenant':tenant_name}    
+	
+	rendered = template.render(user_fields)
+	res = run_template('sync_controller_users.yaml', user_fields)
+
+	# results is an array in which each element corresponds to an 
+	# "ok" string received per operation. If we get as many oks as
+	# the number of operations we issued, that means a grand success.
+	# Otherwise, the number of oks tell us which operation failed.
+	expected_length = len(roles) + 1
+	if (len(res)==expected_length):
+        	controller_user.save()
+	elif (len(res)):
+		raise Exception('Could not assign roles for user %s'%user_fields['name'])
+	else:
+		raise Exception('Could not create or update user %s'%user_fields['name'])
+
+    def delete_record(self, controller_user):
+        if controller_user.kuser_id:
+            driver = self.driver.admin_driver(controller=controller_user.controller)
+            driver.delete_user(controller_user.kuser_id)
diff --git a/planetstack/openstack_observer/steps/sync_controller_users.yaml b/planetstack/openstack_observer/steps/sync_controller_users.yaml
new file mode 100644
index 0000000..95cdba3
--- /dev/null
+++ b/planetstack/openstack_observer/steps/sync_controller_users.yaml
@@ -0,0 +1,16 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  - keystone_user: 
+       endpoint={{ endpoint }} 
+       user="{{ name }}" 
+       email={{ email }} 
+       password={{ password }} 
+       login_user={{ admin_user }} 
+       login_password={{ admin_password }} 
+       login_tenant_name={{ admin_tenant }} 
+       tenant={{ tenant }}
+  {% for role in roles %}
+  - keystone_user: endpoint={{ endpoint}} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} user="{{ name }}" role={{ role }} tenant={{ tenant }}
+  {% endfor %}
diff --git a/planetstack/openstack_observer/steps/sync_external_routes.py b/planetstack/openstack_observer/steps/sync_external_routes.py
deleted file mode 100644
index 28d24cc..0000000
--- a/planetstack/openstack_observer/steps/sync_external_routes.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import os
-import base64
-from planetstack.config import Config
-from observer.openstacksyncstep import OpenStackSyncStep
-from core.models.site import Deployment
-
-class SyncExternalRoutes(OpenStackSyncStep):
-    # XXX what does this provide?
-    provides=[]
-    requested_interval = 86400 # This step is slow like a pig. Let's run it infrequently
-
-    def call(self, **args):
-	deployments = Deployment.objects.all()
-	self.driver = self.driver.admin_driver(deployment=deployments[0],tenant='admin')
-        routes = self.driver.get_external_routes()
-        subnets = self.driver.shell.quantum.list_subnets()['subnets']
-        for subnet in subnets:
-            try:
-                self.driver.add_external_route(subnet, routes)
-            except:
-                logger.log_exc("failed to add external route for subnet %s" % subnet)
diff --git a/planetstack/openstack_observer/steps/sync_network_slivers.py b/planetstack/openstack_observer/steps/sync_network_slivers.py
index 06ee856..22cfa82 100644
--- a/planetstack/openstack_observer/steps/sync_network_slivers.py
+++ b/planetstack/openstack_observer/steps/sync_network_slivers.py
@@ -3,6 +3,7 @@
 from django.db.models import F, Q
 from planetstack.config import Config
 from observer.openstacksyncstep import OpenStackSyncStep
+from core.models import Controller
 from core.models.network import *
 from util.logger import Logger, logging
 
@@ -29,7 +30,7 @@
         networks = Network.objects.all()
         networks_by_id = {}
         for network in networks:
-            for nd in network.networkdeployments.all():
+            for nd in network.controllernetworks.all():
                 networks_by_id[nd.net_id] = network
 
         #logger.info("networks_by_id = ")
@@ -41,18 +42,18 @@
         for sliver in slivers:
             slivers_by_instance_id[sliver.instance_id] = sliver
 
-        # Get all ports in all deployments
+        # Get all ports in all controllers
 
         ports_by_id = {}
-        for deployment in Deployment.objects.all():
-            if not deployment.admin_tenant:
-                logger.info("deployment %s has no admin_tenant" % deployment.name)
+        for controller in Controller.objects.all():
+            if not controller.admin_tenant:
+                logger.info("controller %s has no admin_tenant" % controller)
                 continue
             try:
-                driver = self.driver.admin_driver(deployment=deployment.name,tenant='admin')
+                driver = self.driver.admin_driver(controller = controller,tenant='admin')
                 ports = driver.shell.quantum.list_ports()["ports"]
             except:
-                logger.log_exc("failed to get ports from deployment %s" % deployment.name)
+                logger.log_exc("failed to get ports from controller %s" % controller)
                 continue
 
             for port in ports:
@@ -139,7 +140,7 @@
             if (neutron_nat_list != nat_list):
                 logger.info("Setting nat:forward_ports for port %s network %s sliver %s to %s" % (str(networkSliver.port_id), str(networkSliver.network.id), str(networkSliver.sliver), str(nat_list)))
                 try:
-                    driver = self.driver.admin_driver(deployment=networkSliver.sliver.node.deployment,tenant='admin')
+                    driver = self.driver.admin_driver(controller=networkSliver.sliver.node.site_deployment.controller,tenant='admin')
                     driver.shell.quantum.update_port(networkSliver.port_id, {"port": {"nat:forward_ports": nat_list}})
                 except:
                     logger.log_exc("failed to update port with nat_list %s" % str(nat_list))
diff --git a/planetstack/openstack_observer/steps/sync_networks.py b/planetstack/openstack_observer/steps/sync_networks.py
index 99f2c01..a4e9552 100644
--- a/planetstack/openstack_observer/steps/sync_networks.py
+++ b/planetstack/openstack_observer/steps/sync_networks.py
@@ -5,7 +5,7 @@
 from observer.openstacksyncstep import OpenStackSyncStep
 from core.models.network import *
 from util.logger import Logger, logging
-from observer.steps.sync_network_deployments import *
+from observer.steps.sync_controller_networks import *
 
 logger = Logger(level=logging.INFO)
 
@@ -17,10 +17,10 @@
         network.save()
 
     def delete_record(self, network):
-        network_deployment_deleter = SyncNetworkDeployments().delete_record
-        for network_deployment in NetworkDeployments.objects.filter(network=network):
+        controller_networks_deleter = SyncControllerNetworks().delete_record
+        for controller_network in ControllerNetworks.objects.filter(network=network):
             try:
-                network_deployment_deleter(network_deployment)    
+                controller_network_deleter(controller_network)    
             except Exception,e:
-                logger.log_exc("Failed to delete network deployment %s" % network_deployment)
+                logger.log_exc("Failed to delete controller network %s" % controller_network)
                 raise e
diff --git a/planetstack/openstack_observer/steps/sync_nodes.py b/planetstack/openstack_observer/steps/sync_nodes.py
index 3936311..abd5b98 100644
--- a/planetstack/openstack_observer/steps/sync_nodes.py
+++ b/planetstack/openstack_observer/steps/sync_nodes.py
@@ -6,7 +6,7 @@
 from planetstack.config import Config
 from observer.openstacksyncstep import OpenStackSyncStep
 from core.models.node import Node
-from core.models.site import Site, Deployment
+from core.models.site import SiteDeployments, Controller
 from util.logger import Logger, logging
 
 logger = Logger(level=logging.INFO)
@@ -22,28 +22,28 @@
             return []
 
         # collect local nodes
-        sites = Site.objects.all()
+        site_deployments = SiteDeployments.objects.all()
         nodes = Node.objects.all()
         node_hostnames = [node.name for node in nodes]
 
-        # fetch all nodes from each deployment
-        deployments = Deployment.objects.all()
+        # fetch all nodes from each controller
+        controllers = Controller.objects.all()
         new_nodes = []
-        for deployment in deployments:
+        for controller in controllers:
             try:
-                driver = self.driver.admin_driver(deployment=deployment.name)
+                driver = self.driver.admin_driver(controller=controller.name)
                 compute_nodes = driver.shell.nova.hypervisors.list()
             except:
-                logger.log_exc("Failed to get nodes from deployment %s" % str(deployment))
+                logger.log_exc("Failed to get nodes from controller %s" % str(controller))
                 continue
 
             for compute_node in compute_nodes:
                 if compute_node.hypervisor_hostname not in node_hostnames:
                     # XX TODO:figure out how to correctly identify a node's site.
                     # XX pick a random site to add the node to for now
-                    site_index = random.randint(0, len(sites))
+                    site_index = random.randint(0, len(site_deployments))
                     node = Node(name=compute_node.hypervisor_hostname,
-                                site=sites[site_index], deployment=deployment)
+                                site_deployment=site_deployments[site_index], controller=controller)
                     new_nodes.append(node)
 
         return new_nodes    
diff --git a/planetstack/openstack_observer/steps/sync_roles.py b/planetstack/openstack_observer/steps/sync_roles.py
index bb837db..91c0abb 100644
--- a/planetstack/openstack_observer/steps/sync_roles.py
+++ b/planetstack/openstack_observer/steps/sync_roles.py
@@ -4,7 +4,7 @@
 from planetstack.config import Config
 from observer.openstacksyncstep import OpenStackSyncStep
 from core.models.role import Role
-from core.models.site import SiteRole, Deployment, DeploymentRole
+from core.models.site import SiteRole, Controller, ControllerRole
 from core.models.slice import SliceRole
 
 class SyncRoles(OpenStackSyncStep):
@@ -18,24 +18,24 @@
 
         site_roles = SiteRole.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
         slice_roles = SliceRole.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
-        deployment_roles = DeploymentRole.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+        controller_roles = ControllerRole.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
 
         roles = []
         for site_role in site_roles:
             roles.append(site_role)
         for slice_role in slice_roles:
             roles.append(slice_role)
-        for deployment_role in deployment_roles:
-            roles.append(deployment_role)
+        for controller_role in controller_roles:
+            roles.append(controller_role)
 
         return roles
 
 
     def sync_record(self, role):
         if not role.enacted:
-            deployments = Deployment.objects.all()
-       	    for deployment in deployments:
-                driver = self.driver.admin_driver(deployment=deployment.name)
+            controllers = Controller.objects.all()
+       	    for controller in controllers:
+                driver = self.driver.admin_driver(controller=controller.name)
                 driver.create_role(role.role)
             role.save()
     
diff --git a/planetstack/openstack_observer/steps/sync_site_privileges.py b/planetstack/openstack_observer/steps/sync_site_privileges.py
index d07b279..2378b88 100644
--- a/planetstack/openstack_observer/steps/sync_site_privileges.py
+++ b/planetstack/openstack_observer/steps/sync_site_privileges.py
@@ -3,7 +3,7 @@
 from django.db.models import F, Q
 from planetstack.config import Config
 from observer.openstacksyncstep import OpenStackSyncStep
-from core.models import User, UserDeployments, SitePrivilege, SiteDeployments   
+from core.models import User, ControllerUsers, SitePrivilege, ControllerSites   
 
 class SyncSitePrivileges(OpenStackSyncStep):
     requested_interval=0
@@ -17,13 +17,13 @@
         return SitePrivilege.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
 
     def sync_record(self, site_priv):
-        # sync site privileges at all site deployments
-        site_deployments = SiteDeployments.objects.filter(site=site_priv.site)
-        for site_deployment in site_deployments:
-            user_deployments = UserDeployments.objects.filter(deployment=site_deployment.deployment)
-            if user_deployments:
-                kuser_id  = user_deployments[0].kuser_id
-                driver = self.driver.admin_driver(deployment=site_deployment.deployment.name)
+        # sync site privileges at all site controllers
+        controller_sites = ControllerSites.objects.filter(site=site_priv.site)
+        for controller_site in controller_sites:
+            controller_users = ControllerUsers.objects.filter(controller=controller_site.controller)
+            if controller_users:
+                kuser_id  = controller_users[0].kuser_id
+                driver = self.driver.admin_driver(controller=controller_site.controller)
                 driver.add_user_role(kuser_id,
-                                     site_deployment.tenant_id,
+                                     controller_site.tenant_id,
                                      site_priv.role.role)
diff --git a/planetstack/openstack_observer/steps/sync_sites.py b/planetstack/openstack_observer/steps/sync_sites.py
index c560a6a..530301a 100644
--- a/planetstack/openstack_observer/steps/sync_sites.py
+++ b/planetstack/openstack_observer/steps/sync_sites.py
@@ -4,7 +4,7 @@
 from planetstack.config import Config
 from observer.openstacksyncstep import OpenStackSyncStep
 from core.models.site import Site
-from observer.steps.sync_site_deployments import *
+from observer.steps.sync_controller_sites import *
 
 class SyncSites(OpenStackSyncStep):
     provides=[Site]
@@ -14,7 +14,7 @@
         site.save()
 
     def delete_record(self, site):
-        site_deployments = SiteDeployments.objects.filter(site=site)
-        site_deployment_deleter = SyncSiteDeployments().delete_record
-        for site_deployment in site_deployments:
-            site_deployment_deleter(site_deployment)
+        controller_sites = ControllerSites.objects.filter(site=site)
+        controller_site_deleter = SyncControllerSites().delete_record
+        for controller_site in controller_sites:
+            controller_site_deleter(controller_site)
diff --git a/planetstack/openstack_observer/steps/sync_slice_memberships.py b/planetstack/openstack_observer/steps/sync_slice_memberships.py
index b1cd223..600f012 100644
--- a/planetstack/openstack_observer/steps/sync_slice_memberships.py
+++ b/planetstack/openstack_observer/steps/sync_slice_memberships.py
@@ -4,7 +4,7 @@
 from planetstack.config import Config
 from observer.openstacksyncstep import OpenStackSyncStep
 from core.models.slice import *
-from core.models.userdeployments import UserDeployments
+from core.models.controllerusers import ControllerUsers
 from util.logger import Logger, logging
 
 logger = Logger(level=logging.INFO)
@@ -20,17 +20,17 @@
         return SlicePrivilege.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
 
     def sync_record(self, slice_memb):
-        # sync slice memberships at all slice deployments 
+        # sync slice memberships at all slice controllers 
         logger.info("syncing slice privilege: %s %s" % (slice_memb.slice.name, slice_memb.user.email))
-        slice_deployments = SliceDeployments.objects.filter(slice=slice_memb.slice)
-        for slice_deployment in slice_deployments:
-            if not slice_deployment.tenant_id:
+        slice_controllers = ControllerSlices.objects.filter(slice=slice_memb.slice)
+        for slice_controller in slice_controllers:
+            if not slice_controller.tenant_id:
                 continue
-            user_deployments = UserDeployments.objects.filter(deployment=slice_deployment.deployment,
+            controller_users = ControllerUsers.objects.filter(controller=slice_controller.controller,
                                                               user=slice_memb.user)
-            if user_deployments:
-                kuser_id  = user_deployments[0].kuser_id
-                driver = self.driver.admin_driver(deployment=slice_deployment.deployment.name)
+            if controller_users:
+                kuser_id  = controller_users[0].kuser_id
+                driver = self.driver.admin_driver(controller=slice_controller.controller.name)
                 driver.add_user_role(kuser_id,
-                                     slice_deployment.tenant_id,
+                                     slice_controller.tenant_id,
                                      slice_memb.role.role)
diff --git a/planetstack/openstack_observer/steps/sync_slices.py b/planetstack/openstack_observer/steps/sync_slices.py
index a6073b6..a792b48 100644
--- a/planetstack/openstack_observer/steps/sync_slices.py
+++ b/planetstack/openstack_observer/steps/sync_slices.py
@@ -4,9 +4,9 @@
 from django.db.models import F, Q
 from planetstack.config import Config
 from observer.openstacksyncstep import OpenStackSyncStep
-from core.models.slice import Slice, SliceDeployments
+from core.models.slice import Slice, ControllerSlices
 from util.logger import Logger, logging
-from observer.steps.sync_slice_deployments import *
+from observer.steps.sync_controller_slices import *
 
 logger = Logger(level=logging.INFO)
 
@@ -15,16 +15,16 @@
     requested_interval=0
 
     def sync_record(self, slice):
-        for slice_deployment in SliceDeployments.objects.filter(slice=slice):
+        for controller_slice in ControllerSlices.objects.filter(slice=slice):
             # bump the 'updated' timestamp and trigger observer to update
-            # slice across all deployments 
-            slice_deployment.save()    
+            # slice across all controllers 
+            controller_slice.save()    
 
     def delete_record(self, slice):
-        slice_deployment_deleter = SyncSliceDeployments().delete_record
-        for slice_deployment in SliceDeployments.objects.filter(slice=slice):
+        controller_slice_deleter = SyncControllerSlices().delete_record
+        for controller_slice in ControllerSlices.objects.filter(slice=slice):
             try:
-                slice_deployment_deleter(slice_deployment)
+                controller_slice_deleter(controller_slice)
             except Exception,e:
-                logger.log_exc("Failed to delete slice_deployment %s" % slice_deployment) 
+                logger.log_exc("Failed to delete controller_slice %s" % controller_slice) 
                 raise e
diff --git a/planetstack/openstack_observer/steps/sync_slivers.py b/planetstack/openstack_observer/steps/sync_slivers.py
index cf41446..de2c919 100644
--- a/planetstack/openstack_observer/steps/sync_slivers.py
+++ b/planetstack/openstack_observer/steps/sync_slivers.py
@@ -4,8 +4,8 @@
 from planetstack.config import Config
 from observer.openstacksyncstep import OpenStackSyncStep
 from core.models.sliver import Sliver
-from core.models.slice import Slice, SlicePrivilege, SliceDeployments
-from core.models.network import Network, NetworkSlice, NetworkDeployments
+from core.models.slice import Slice, SlicePrivilege, ControllerSlices
+from core.models.network import Network, NetworkSlice, ControllerNetworks
 from util.logger import Logger, logging
 from observer.ansible import *
 
@@ -24,7 +24,7 @@
         return userdata
 
     def sync_record(self, sliver):
-        logger.info("sync'ing sliver:%s slice:%s deployment:%s " % (sliver, sliver.slice.name, sliver.node.deployment))
+        logger.info("sync'ing sliver:%s slice:%s controller:%s " % (sliver, sliver.slice.name, sliver.node.site_controller))
 
         metadata_update = {}
         if (sliver.numberCores):
@@ -43,87 +43,74 @@
         if sliver.slice.creator.public_key:
             pubkeys.add(sliver.slice.creator.public_key)
 
-        nics = []
-        networks = [ns.network for ns in NetworkSlice.objects.filter(slice=sliver.slice)]
-        network_deployments = NetworkDeployments.objects.filter(network__in=networks,
-                                                                deployment=sliver.node.deployment)
+	nics = []
+	networks = [ns.network for ns in NetworkSlice.objects.filter(slice=sliver.slice)]   
+	controller_networks = ControllerNetworks.objects.filter(network__in=networks, 
+								controller=sliver.node.site_controller.controller)
 
-        for network_deployment in network_deployments:
-            if network_deployment.network.template.visibility == 'private' and \
-               network_deployment.network.template.translation == 'none' and network_deployment.net_id:
-                nics.append(network_deployment.net_id)
+	for controller_network in controller_networks:
+	    if controller_network.network.template.visibility == 'private' and \
+	       controller_network.network.template.translation == 'none' and controller_network.net_id: 
+		nics.append(controller_network.net_id)
 
         # now include network template
         network_templates = [network.template.sharedNetworkName for network in networks \
                              if network.template.sharedNetworkName]
 
-        #driver = self.driver.client_driver(caller=sliver.creator, tenant=sliver.slice.name, deployment=sliver.deploymentNetwork)
-        driver = self.driver.admin_driver(tenant='admin', deployment=sliver.deploymentNetwork)
-        nets = driver.shell.quantum.list_networks()['networks']
-        for net in nets:
-            if net['name'] in network_templates:
-                nics.append(net['id'])
+        #driver = self.driver.client_driver(caller=sliver.creator, tenant=sliver.slice.name, controller=sliver.controllerNetwork)
+        driver = self.driver.admin_driver(tenant='admin', controller=sliver.controllerNetwork)
+	nets = driver.shell.quantum.list_networks()['networks']
+	for net in nets:
+	    if net['name'] in network_templates: 
+		nics.append(net['id']) 
 
-        if (not nics):
-            for net in nets:
-                if net['name']=='public':
-                    nics.append(net['id'])
+	if (not nics):
+	    for net in nets:
+	        if net['name']=='public':
+	    	    nics.append(net['id'])
 
-        # look up image id
-        deployment_driver = self.driver.admin_driver(deployment=sliver.deploymentNetwork.name)
-        image_id = None
-        images = deployment_driver.shell.glanceclient.images.list()
-        for image in images:
-            if image.name == sliver.image.name or not image_id:
-                image_id = image.id
+	# look up image id
+	controller_driver = self.driver.admin_driver(controller=sliver.controllerNetwork.name)
+	image_id = None
+	images = controller_driver.shell.glanceclient.images.list()
+	for image in images:
+	    if image.name == sliver.image.name or not image_id:
+		image_id = image.id
+		
+	# look up key name at the controller
+	# create/fetch keypair
+	keyname = None
+	keyname = sliver.creator.email.lower().replace('@', 'AT').replace('.', '') +\
+		  sliver.slice.name
+	key_fields =  {'name': keyname,
+		       'public_key': sliver.creator.public_key}
+	    
 
-        # look up key name at the deployment
-        # create/fetch keypair
-        keyname = None
-        keyname = sliver.creator.email.lower().replace('@', 'AT').replace('.', '') +\
-                  sliver.slice.name
-        key_fields =  {'name': keyname,
-                       'public_key': sliver.creator.public_key}
+	userData = self.get_userdata(sliver)
+	if sliver.userData:
+	    userData = sliver.userData
+	    
+	sliver_name = '@'.join([sliver.slice.name,sliver.node.name])
+	tenant_fields = {'endpoint':sliver.node.controller.auth_url,
+		     'admin_user': sliver.node.controller.admin_user,
+		     'admin_password': sliver.node.controller.admin_password,
+		     'admin_tenant': 'admin',
+		     'tenant': sliver.slice.name,
+		     'tenant_description': sliver.slice.description,
+		     'name':sliver_name,
+		     'image_id':image_id,
+		     'key_name':keyname,
+		     'flavor_id':1,
+		     'nics':nics,
+		     'meta':metadata_update,
+		     'key':key_fields,
+		     'user_data':r'%s'%escape(userData)}
 
-
-        userData = self.get_userdata(sliver)
-        if sliver.userData:
-            userData = sliver.userData
-
-        try:
-            legacy = Config().observer_legacy
-        except:
-            legacy = False
-
-        if (legacy):
-            host_filter = sliver.node.name.split('.',1)[0]
-        else:
-            host_filter = sliver.node.name
-
-        availability_zone_filter = 'nova:%s'%host_filter
-        sliver_name = '@'.join([sliver.slice.name,sliver.node.name])
-        tenant_fields = {'endpoint':sliver.node.deployment.auth_url,
-                     'admin_user': sliver.node.deployment.admin_user,
-                     'admin_password': sliver.node.deployment.admin_password,
-                     'admin_tenant': 'admin',
-                     'tenant': sliver.slice.name,
-                     'availability_zone': availability_zone_filter,
-                     'tenant_description': sliver.slice.description,
-                     'name':sliver_name,
-                     'ansible_tag':sliver_name,
-                     'image_id':image_id,
-                     'key_name':keyname,
-                     'flavor_id':3,
-                     'nics':nics,
-                     'meta':metadata_update,
-                     'key':key_fields,
-                     'user_data':r'%s'%escape(userData)}
-
-        res = run_template('sync_slivers.yaml', tenant_fields, path='slivers')
-        if (len(res)!=2):
-            raise Exception('Could not sync sliver %s'%sliver.slice.name)
-        else:
-            sliver_id = res[1]['id'] # 0 is for the key
+	res = run_template('sync_slivers.yaml', tenant_fields)
+	if (len(res)!=2):
+	    raise Exception('Could not sync sliver %s'%sliver.slice.name)
+	else:
+	    sliver_id = res[1]['id'] # 0 is for the key
 
             sliver.instance_id = sliver_id
             sliver.instance_name = sliver_name
@@ -135,4 +122,3 @@
                          'ansible_tag':sliver_name
                         }
         res = run_template('delete_slivers.yaml', tenant_fields, path='slivers')
-
diff --git a/planetstack/openstack_observer/steps/sync_users.py b/planetstack/openstack_observer/steps/sync_users.py
index a22c213..3c69cad 100644
--- a/planetstack/openstack_observer/steps/sync_users.py
+++ b/planetstack/openstack_observer/steps/sync_users.py
@@ -5,20 +5,20 @@
 from planetstack.config import Config
 from observer.openstacksyncstep import OpenStackSyncStep
 from core.models.user import User
-from core.models.userdeployments import  UserDeployments
-from observer.steps.sync_user_deployments import SyncUserDeployments
+from core.models.controllerusers import  ControllerUsers
+from observer.steps.sync_controller_users import SyncControllerUsers
 
 class SyncUsers(OpenStackSyncStep):
     provides=[User]
     requested_interval=0
 
     def sync_record(self, user):
-        for user_deployment in UserDeployments.objects.filter(user=user):
+        for controller_user in ControllerUsers.objects.filter(user=user):
             # bump the 'updated' field so user account are updated across 
-            # deployments.
-            user_deployment.save()
+            # controllers.
+            controller_user.save()
 
     def delete_record(self, user):
-        user_deployment_deleter = SyncUserDeployments().delete_record
-        for user_deployment in UserDeployments.objects.filter(user=user):
-            user_deployment_deleter(user_deployment)
+        controller_user_deleter = SyncControllerUsers().delete_record
+        for controller_user in ControllerUsers.objects.filter(user=user):
+            controller_user_deleter(controller_user)