observer now supports remote openstack deployments
diff --git a/planetstack/observer/deleters/site_deleter.py b/planetstack/observer/deleters/site_deleter.py
index bb29c94..a558eaf 100644
--- a/planetstack/observer/deleters/site_deleter.py
+++ b/planetstack/observer/deleters/site_deleter.py
@@ -1,5 +1,5 @@
 from core.models import Site
-from observer.delete import Deleter
+from observer.deleter import Deleter
 
 class SiteDeleter(Deleter):
     model='Site'
diff --git a/planetstack/observer/steps/garbage_collector.py b/planetstack/observer/steps/garbage_collector.py
index a13fd22..34ecd77 100644
--- a/planetstack/observer/steps/garbage_collector.py
+++ b/planetstack/observer/steps/garbage_collector.py
@@ -6,6 +6,7 @@
 from planetstack.config import Config
 from util.logger import Logger, logging
 from observer.openstacksyncstep import OpenStackSyncStep
+from deployment_auth import deployment_auth
 from core.models import *
 
 logger = Logger(level=logging.INFO)
@@ -69,17 +70,20 @@
             slice_dict[slice.name] = slice
 
         # delete keystone tenants that don't have a site record
-        tenants = self.driver.shell.keystone.tenants.findall()
-        system_tenants = ['admin','service', 'invisible_to_admin']
-        for tenant in tenants:
-            if tenant.name in system_tenants: 
-                continue
-            if tenant.name not in site_dict and tenant.name not in slice_dict:
-                try:
-                    self.driver.delete_tenant(tenant.id)
-                    logger.info("deleted tenant: %s" % (tenant))
-                except:
-                    logger.log_exc("delete tenant failed: %s" % tenant)
+        for deployment in deployment_auth:
+            driver = self.driver.admin_driver(deployment=deployment)
+            tenants = driver.shell.keystone.tenants.findall()
+
+            system_tenants = ['admin','service', 'invisible_to_admin']
+            for tenant in tenants:
+                if tenant.name in system_tenants: 
+                    continue
+                if tenant.name not in site_dict and tenant.name not in slice_dict:
+                    try:
+                        driver.delete_tenant(tenant.id)
+                        logger.info("deleted tenant: %s" % (tenant))
+                    except:
+                        logger.log_exc("delete tenant failed: %s" % tenant)
 
 
     def gc_users(self):
@@ -96,16 +100,18 @@
 
         # delete keystone users that don't have a user record
         system_users = ['admin', 'nova', 'quantum', 'glance', 'cinder', 'swift', 'service', 'demo']
-        users = self.driver.shell.keystone.users.findall()
-        for user in users:
-            if user.name in system_users:
-                continue
-            if user.id not in user_dict:
-                try:
-                    self.driver.delete_user(user.id)
-                    logger.info("deleted user: %s" % user)
-                except:
-                    logger.log_exc("delete user failed: %s" % user)
+        for deployment in deployment_auth:
+            driver = self.driver.admin_driver(deployment=deployment)
+            users = driver.shell.keystone.users.findall()
+            for user in users:
+                if user.name in system_users:
+                    continue
+                if user.id not in user_dict:
+                    try:
+                        self.driver.delete_user(user.id)
+                        logger.info("deleted user: %s" % user)
+                    except:
+                        logger.log_exc("delete user failed: %s" % user)
                     
 
     def gc_user_tenant_roles(self):
@@ -126,39 +132,43 @@
         # 2. Never remove a user's role at a slice they've created.
         # Keep track of all roles that must be preserved.     
         users = User.objects.all()
-        preserved_roles = {}
-        for user in users:
-            tenant_ids = [s['tenant_id'] for s in user.slices.values()]
-            if user.site:
-                tenant_ids.append(user.site.tenant_id) 
-            preserved_roles[user.kuser_id] = tenant_ids
+        for deployment in deployment_auth:
+            driver = self.driver.admin_driver(deployment=deployment)
+            tenants = driver.shell.keystone.tenants.list() 
+            for user in users:
+                # skip admin roles
+                if user.kuser_id == self.driver.admin_user.id:
+                    continue
+     
+                ignore_tenant_ids = []
+                k_user = driver.shell.keystone.users.find(id=user.kuser_id)
+                ignore_tenant_ids = [s['tenant_id'] for s in user.slices.values()]
+                if user.site:
+                    ignore_tenant_ids.append(user.site.tenant_id) 
 
- 
-        # begin removing user tenant roles from keystone. This is stored in the 
-        # Metadata table.
-        for metadata in self.driver.shell.keystone_db.get_metadata():
-            # skip admin roles
-            if metadata.user_id == self.driver.admin_user.id:
-                continue
-            # skip preserved tenant ids
-            if metadata.user_id in preserved_roles and \
-               metadata.tenant_id in preserved_roles[metadata.user_id]: 
-                continue           
-            # get roles for user at this tenant
-            user_tenant_role_ids = user_tenant_roles.get((metadata.user_id, metadata.tenant_id), [])
+                # get user roles in keystone
+                for tenant in tenants:
+                    # skip preserved tenant ids
+                    if tenant.tenant_id in ignore_tenant_ids: 
+                        continue          
+                    # compare user tenant roles
+                    user_tenant_role_ids = user_tenant_roles.get((user.kuser_id, tenant.id), [])
 
-            if user_tenant_role_ids:
-                # The user has roles at the tenant. Check if roles need to 
-                # be updated.
-                user_keystone_role_ids = metadata.data.get('roles', [])
-                for role_id in user_keystone_role_ids:
-                    if role_id not in user_tenant_role_ids: 
-                        user_keystone_role_ids.pop(user_keystone_role_ids.index(role_id))
-            else:
-                # The user has no roles at this tenant. 
-                metadata.data['roles'] = [] 
-            #session.add(metadata)
-            logger.info("pruning metadata for %s at %s" % (metadata.user_id, metadata.tenant_id))
+                    if user_tenant_role_ids:
+                        # The user has roles at the tenant. Check if roles need to 
+                        # be updated.
+                        k_user_roles =  driver.shell.keystone.roles.roles_for_user(k_user, tenant)
+                        for k_user_role in k_user_roles:
+                            if k_user_role.role_id not in user_tenant_role_ids: 
+                                driver.shell.keyston.remove_user_role(k_user, k_user_role, tenant) 
+                                logger.info("removed user role %s for %s at %s" % \
+                                           (k_user_role, k_user.username, tenant.name))
+                    else:
+                        # remove all roles the user has at the tenant. 
+                        for k_user_role in k_user_roles:
+                            driver.shell.keyston.remove_user_role(k_user, k_user_role, tenant) 
+                            logger.info("removed user role %s for %s at %s" % \
+                                       (k_user_role, k_user.username, tenant.name))
  
     def gc_slivers(self):
         """
@@ -172,20 +182,17 @@
         for sliver in slivers:
             sliver_dict[sliver.instance_id] = sliver
 
-        # delete sliver that don't have a sliver record
-        ctx = self.driver.shell.nova_db.ctx 
-        instances = self.driver.shell.nova_db.instance_get_all(ctx)
-        for instance in instances:
-            if instance.uuid not in sliver_dict:
-                try:
-                    # lookup tenant and update context  
-                    tenant = self.driver.shell.keystone.tenants.find(id=instance.project_id)
-                    driver = self.driver.client_driver(tenant=tenant.name) 
-                    driver.destroy_instance(instance.uuid)
-                    logger.info("destroyed sliver: %s" % (instance))
-                except:
-                    logger.log_exc("destroy sliver failed: %s" % instance) 
-                
+        for tenant in self.driver.shell.keystone.tenants.list():
+            # delete sliver that don't have a sliver record
+            tenant_driver = self.driver.client_driver(tenant=tenant.name, deployment=sliver.node.deployment)
+            for instance in tenant_driver.nova.servers.list():
+                if instance.uuid not in sliver_dict:
+                    try:
+                        tenant_driver.destroy_instance(instance.uuid)
+                        logger.info("destroyed sliver: %s" % (instance))
+                    except:
+                        logger.log_exc("destroy sliver failed: %s" % instance)
+               
 
     def gc_sliver_ips(self):
         """
@@ -195,7 +202,8 @@
         slivers = Sliver.objects.filter(ip=None)
         for sliver in slivers:
             # update connection
-            driver = self.driver.client_driver(tenant=sliver.slice.name)
+            
+            driver = self.driver.client_driver(tenant=sliver.slice.name, deployment=sliver.node.deployment)
             servers = driver.shell.nova.servers.findall(id=sliver.instance_id)
             if not servers:
                 continue
@@ -217,10 +225,12 @@
             nodes_dict[node.name] = node
 
         # collect nova nodes:
-        compute_nodes = self.client.nova.hypervisors.list()
         compute_nodes_dict = {}
-        for compute_node in compute_nodes:
-            compute_nodes_dict[compute_node.hypervisor_hostname] = compute_node
+        for deployment in deployment_auth:
+            driver = self.driver.admin_driver(deployment=deployment) 
+            compute_nodes = driver.nova.hypervisors.list()
+            for compute_node in compute_nodes:
+                compute_nodes_dict[compute_node.hypervisor_hostname] = compute_node
 
         # remove old nodes
         old_node_names = set(nodes_dict.keys()).difference(compute_nodes_dict.keys())
@@ -234,10 +244,12 @@
             images_dict[image.name] = image
 
         # collect glance images
-        glance_images = self.driver.shell.glance.get_images()
         glance_images_dict = {}
-        for glance_image in glance_images:
-            glance_images_dict[glance_image['name']] = glance_image
+        for deployment in deployment_auth:
+            driver = self.driver.admin_driver(deployment=deployment)
+            glance_images = driver.shell.glance.get_images()
+            for glance_image in glance_images:
+                glance_images_dict[glance_image['name']] = glance_image
 
         # remove old images
         old_image_names = set(images_dict.keys()).difference(glance_images_dict.keys())
diff --git a/planetstack/observer/steps/sync_network_slivers.py b/planetstack/observer/steps/sync_network_slivers.py
index 09dc7ed..7e69330 100644
--- a/planetstack/observer/steps/sync_network_slivers.py
+++ b/planetstack/observer/steps/sync_network_slivers.py
@@ -30,7 +30,8 @@
         for sliver in slivers:
             slivers_by_instance_id[sliver.instance_id] = sliver
 
-        ports = self.driver.shell.quantum.list_ports()["ports"]
+        driver = self.driver.admin_driver(caller=sliver.creator, tenant=sliver.slice.name, deployment=sliver.node.deployment.name)
+        ports = driver.shell.quantum.list_ports()["ports"]
         for port in ports:
             if port["id"] in networkSlivers_by_port:
                 # we already have it
diff --git a/planetstack/observer/steps/sync_nodes.py b/planetstack/observer/steps/sync_nodes.py
index a1f0803..bef0ca0 100644
--- a/planetstack/observer/steps/sync_nodes.py
+++ b/planetstack/observer/steps/sync_nodes.py
@@ -14,28 +14,26 @@
     requested_interval=0
 
     def fetch_pending(self):
-        config = Config()
-        deployment = Deployment.objects.filter(name=config.plc_deployment)[0]
-        login_bases = ['princeton', 'stanford', 'gt', 'uw', 'mpisws']
-        sites = Site.objects.filter(login_base__in=login_bases)
-        
         # collect local nodes
+        sites = Site.objects.all()
         nodes = Node.objects.all()
         node_hostnames  = [node.name for node in nodes]
 
-        # collect nova nodes
-        # generate list of new nodes
-        new_nodes = []
-        compute_nodes = self.driver.shell.nova.hypervisors.list()
-        for compute_node in compute_nodes:
+        # fetch all nodes from each deployment 
+        deployments = Deployment.objects.all()
+        for deployment in deployments:
+            driver = self.driver.admin_driver(deployment=deployment.name)
+            compute_nodes = driver.shell.nova.hypervisors.list()
             if compute_node.hypervisor_hostname not in node_hostnames:
-                # pick a random site to add the node to for now
+                # XX TODO:figure out how to correctly identify a node's site.
+                # XX pick a random site to add the node to for now
                 site_index = random.randint(0, len(sites))
-                node = Node(name=compute_node.hypervisor_hostname, 
+                node = Node(name=compute_node.hypervisor_hostname,
                             site=sites[site_index], deployment=deployment)
-                new_nodes.append(node) 
-        
-        return new_nodes
+                new_nodes.append(node)
+
+        return new_nodes    
+                 
 
     def sync_record(self, node):
         node.save()
diff --git a/planetstack/observer/steps/sync_site_deployments.py b/planetstack/observer/steps/sync_site_deployments.py
new file mode 100644
index 0000000..a996c85
--- /dev/null
+++ b/planetstack/observer/steps/sync_site_deployments.py
@@ -0,0 +1,28 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.site import *
+
+class SyncSiteDeployments(OpenStackSyncStep):
+    requested_interval=0
+    provides=[Site, SiteDeployments]
+
+    def fetch_pending(self):
+        return SiteDeployments.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+    def sync_record(self, site_deployment):
+        if not site_deployment.tenant_id:
+            driver = self.driver.admin_driver(deployment=site_deployment.deployment.name)
+            tenant = driver.create_tenant(tenant_name=site_deployment.site.login_base,
+                                               description=site_deployment.site.name,
+                                               enabled=site_deployment.site.enabled)
+            site_deployment.tenant_id = tenant.id
+            site_deployment.save()
+        elif site_deployment.site.id and site_deployment.tenant_id:
+            driver = self.driver.admin_driver(deployment=site_deployment.name)
+            driver.update_tenant(site_deployment.tenant_id,
+                                 description=site_deployment.site.name,
+                                 enabled=site_deployment.site.enabled)
+            
diff --git a/planetstack/observer/steps/sync_site_privileges.py b/planetstack/observer/steps/sync_site_privileges.py
index 8287d44..922f579 100644
--- a/planetstack/observer/steps/sync_site_privileges.py
+++ b/planetstack/observer/steps/sync_site_privileges.py
@@ -4,6 +4,7 @@
 from planetstack.config import Config
 from observer.openstacksyncstep import OpenStackSyncStep
 from core.models.site import *
+from core.models.user import UserDeployments    
 
 class SyncSitePrivileges(OpenStackSyncStep):
     requested_interval=0
@@ -17,3 +18,14 @@
             self.driver.add_user_role(site_priv.user.kuser_id,
                                       site_priv.site.tenant_id,
                                       site_priv.role.role) 
+
+        # sync site privileges at all site deployments
+        site_deployments = SiteDeployments.objects.filter(site=site_priv.site)
+        for site_deployment in site_deployments:
+            user_deployments = UserDeployments.objects.filter(deployment=site_deployment.deployment)
+            if user_deployments:
+                kuser_id  = user_deployments[0].kuser_id
+                driver = self.driver.admin_driver(deployment=site_deployment.name)
+                driver.add_user_role(kuser_id,
+                                     site_deployment.tenant_id,
+                                     slice_memb.role.role)
diff --git a/planetstack/observer/steps/sync_sites.py b/planetstack/observer/steps/sync_sites.py
index 2013c6d..e128e9a 100644
--- a/planetstack/observer/steps/sync_sites.py
+++ b/planetstack/observer/steps/sync_sites.py
@@ -13,22 +13,5 @@
         return Site.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
 
     def sync_record(self, site):
-        save_site = False
-        if not site.tenant_id:
-            tenant = self.driver.create_tenant(tenant_name=site.login_base,
-                                               description=site.name,
-                                               enabled=site.enabled)
-            site.tenant_id = tenant.id
-            save_site = True
-            # XXX - What's caller?
-            # self.driver.add_user_role(self.caller.kuser_id, tenant.id, 'admin')
-
-        # update the record
-        if site.id and site.tenant_id:
-            self.driver.update_tenant(site.tenant_id,
-                                      description=site.name,
-                                      enabled=site.enabled)
-
-        if (save_site):
-            site.save() # 
+        site.save()
 
diff --git a/planetstack/observer/steps/sync_slice_deployments.py b/planetstack/observer/steps/sync_slice_deployments.py
new file mode 100644
index 0000000..0f92796
--- /dev/null
+++ b/planetstack/observer/steps/sync_slice_deployments.py
@@ -0,0 +1,105 @@
+import os
+import base64
+from collections import defaultdict
+from netaddr import IPAddress, IPNetwork
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.site import SiteDeployments
+from core.models.slice import Slice, SliceDeployments
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class SyncSliceDeployments(OpenStackSyncStep):
+    provides=[Slice, SliceDeployments]
+    requested_interval=0
+
+    def fetch_pending(self):
+        # slice deployments are not visible to users. We must ensure
+        # slices are deployed at all deploymets available to their site.
+        site_deployments = SiteDeployment.objects.all()
+        site_deploy_lookup = defaultdict(list)
+        for site_deployment in site_deployments:
+            site_deploy_lookup[site_deployment.site].append(site_deployment.deployment)
+        
+        slice_deployments = SliceDeployment.objects.all()
+        slice_deploy_lookup = defaultdict(list)
+        for slice_deployment in slice_deployments:
+            slice_deploy_lookup[slice_deployment.slice].append(slice_deployment.deployment)
+        
+        for slice in Slice.objects.all():
+            expected_deployments = site_deploy_lookup[slice.site]
+            for expected_deployment in expected_deployments:
+                if slice not in slice_deploy_lookup or \
+                   expected_deployment not in slice_deploy_lookup[slice]:
+                    sd = SliceDeployments(slice=slice, deployment=expected_deployment)
+                    sd.save()
+
+        # now we can return all slice deployments that need to be enacted   
+        return SliceDeployments.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+    def get_next_subnet(self, deployment=None):
+        # limit ourself to 10.0.x.x for now
+        valid_subnet = lambda net: net.startswith('10.0')
+        driver = self.driver.admin_driver(deployment=deployment)
+        subnets = driver.shell.quantum.list_subnets()['subnets']
+        ints = [int(IPNetwork(subnet['cidr']).ip) for subnet in subnets \
+                if valid_subnet(subnet['cidr'])]
+        ints.sort()
+        last_ip = IPAddress(ints[-1])
+        last_network = IPNetwork(str(last_ip) + "/24")
+        next_network = IPNetwork(str(IPAddress(last_network) + last_network.size) + "/24")
+        return next_network
+
+    def sync_record(self, slice_deployment):
+        logger.info("sync'ing slice deployment %s" % slice_deployment)
+        if not slice_deployment.tenant_id:
+            nova_fields = {'tenant_name': slice_deployment.slice.name,
+                   'description': slice_deployment.slice.description,
+                   'enabled': slice_deployment.slice.enabled}
+            driver = self.driver.admin_driver(deployment=slice_deployment.deployment.name)
+            tenant = driver.create_tenant(**nova_fields)
+            slice_deployment.tenant_id = tenant.id
+
+            # XXX give caller an admin role at the tenant they've created
+            driver.add_user_role(slice_deployment.slice.creator.kuser_id, tenant.id, 'admin')
+
+            # refresh credentials using this tenant
+            client_driver = self.driver.client_driver(tenant=tenant.name, 
+                                                      deployment=slice_deployment.deployment.name)
+
+            # create network
+            network = client_driver.create_network(slice.name)
+            slice_deployment.network_id = network['id']
+
+            # create router
+            router = client_driver.create_router(slice.name)
+            slice_deployment.router_id = router['id']
+
+            # create subnet for slice's private network
+            next_subnet = self.get_next_subnet(deployment=slice_deployment.deployment.name)
+            cidr = str(next_subnet.cidr)
+            ip_version = next_subnet.version
+            start = str(next_subnet[2])
+            end = str(next_subnet[-2]) 
+            subnet = client_driver.create_subnet(name=slice.name,
+                                               network_id = network['id'],
+                                               cidr_ip = cidr,
+                                               ip_version = ip_version,
+                                               start = start,
+                                               end = end)
+            slice_deployment.subnet_id = subnet['id']
+            # add subnet as interface to slice's router
+            client_driver.add_router_interface(router['id'], subnet['id'])
+            # add external route
+            client_driver.add_external_route(subnet)
+
+
+        if slice_deployment.id and slice_deployment.tenant_id:
+            driver = self.driver.admin_driver(deployment=slice_deployment.deployment.name)
+            driver.update_tenant(slice_deployment.tenant_id,
+                                 description=slice_deployment.slice.description,
+                                 enabled=slice_deployment.slice.enabled)   
+
+        slice_deployment.save()
diff --git a/planetstack/observer/steps/sync_slice_memberships.py b/planetstack/observer/steps/sync_slice_memberships.py
index ffc6eb8..6def794 100644
--- a/planetstack/observer/steps/sync_slice_memberships.py
+++ b/planetstack/observer/steps/sync_slice_memberships.py
@@ -4,6 +4,7 @@
 from planetstack.config import Config
 from observer.openstacksyncstep import OpenStackSyncStep
 from core.models.slice import *
+from core.models.user import UserDeployments
 
 class SyncSliceMemberships(OpenStackSyncStep):
     requested_interval=0
@@ -17,3 +18,14 @@
                 self.driver.add_user_role(slice_memb.user.kuser_id,
                                           slice_memb.slice.tenant_id,
                                           slice_memb.role.role)
+
+        # sync slice memberships at all slice deployments 
+        slice_deployments = SliceDeployments.objects.filter(slice=slice_memb.slice)
+        for slice_deployment in slice_deployments:
+            user_deployments = UserDeployments.objects.filter(deployment=slice_deployment.deployment)
+            if user_deployments:
+                kuser_id  = user_deployments[0].kuser_id
+                driver = self.driver.admin_driver(deployment=slice_deployment.name)
+                driver.add_user_role(kuser_id,
+                                     slice_deployment.tenant_id,
+                                     slice_memb.role.role)
diff --git a/planetstack/observer/steps/sync_slices.py b/planetstack/observer/steps/sync_slices.py
index f91c0fc..cc1220c 100644
--- a/planetstack/observer/steps/sync_slices.py
+++ b/planetstack/observer/steps/sync_slices.py
@@ -16,63 +16,5 @@
     def fetch_pending(self):
         return Slice.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
 
-    def get_next_subnet(self):
-        # limit ourself to 10.0.x.x for now
-        valid_subnet = lambda net: net.startswith('10.0')
-        subnets = self.driver.shell.quantum.list_subnets()['subnets']
-        ints = [int(IPNetwork(subnet['cidr']).ip) for subnet in subnets \
-                if valid_subnet(subnet['cidr'])]
-        ints.sort()
-        last_ip = IPAddress(ints[-1])
-        last_network = IPNetwork(str(last_ip) + "/24")
-        next_network = IPNetwork(str(IPAddress(last_network) + last_network.size) + "/24")
-        return next_network
-
     def sync_record(self, slice):
-        logger.info("sync'ing slice %s" % slice.name)
-        if not slice.tenant_id:
-            nova_fields = {'tenant_name': slice.name,
-                   'description': slice.description,
-                   'enabled': slice.enabled}
-            tenant = self.driver.create_tenant(**nova_fields)
-            slice.tenant_id = tenant.id
-
-            # XXX give caller an admin role at the tenant they've created
-            self.driver.add_user_role(slice.creator.kuser_id, tenant.id, 'admin')
-
-            # refresh credentials using this tenant
-            client_driver = self.driver.client_driver(tenant=tenant.name)
-
-            # create network
-            network = client_driver.create_network(slice.name)
-            slice.network_id = network['id']
-
-            # create router
-            router = client_driver.create_router(slice.name)
-            slice.router_id = router['id']
-
-            # create subnet for slice's private network
-            next_subnet = self.get_next_subnet()
-            cidr = str(next_subnet.cidr)
-            ip_version = next_subnet.version
-            start = str(next_subnet[2])
-            end = str(next_subnet[-2]) 
-            subnet = client_driver.create_subnet(name=slice.name,
-                                               network_id = network['id'],
-                                               cidr_ip = cidr,
-                                               ip_version = ip_version,
-                                               start = start,
-                                               end = end)
-            slice.subnet_id = subnet['id']
-            # add subnet as interface to slice's router
-            client_driver.add_router_interface(router['id'], subnet['id'])
-            # add external route
-            client_driver.add_external_route(subnet)
-
-
-        if slice.id and slice.tenant_id:
-            self.driver.update_tenant(slice.tenant_id,
-                                      description=slice.description,
-                                      enabled=slice.enabled)   
-
         slice.save()
diff --git a/planetstack/observer/steps/sync_sliver_ips.py b/planetstack/observer/steps/sync_sliver_ips.py
index 2d7f1f8..83e33eb 100644
--- a/planetstack/observer/steps/sync_sliver_ips.py
+++ b/planetstack/observer/steps/sync_sliver_ips.py
@@ -14,7 +14,8 @@
         return slivers
 
     def sync_record(self, sliver):
-        driver = self.driver.client_driver(tenant=sliver.slice.name)  
+        driver = self.driver.client_driver(tenant=sliver.slice.name, 
+                                           deployment=sliver.node.deployment.name)  
         servers = driver.shell.nova.servers.findall(id=sliver.instance_id)
         if not servers:
             return
diff --git a/planetstack/observer/steps/sync_slivers.py b/planetstack/observer/steps/sync_slivers.py
index a3f423c..03383d9 100644
--- a/planetstack/observer/steps/sync_slivers.py
+++ b/planetstack/observer/steps/sync_slivers.py
@@ -40,7 +40,7 @@
             slice_memberships = SlicePrivilege.objects.filter(slice=sliver.slice)
             pubkeys = [sm.user.public_key for sm in slice_memberships if sm.user.public_key]
             pubkeys.append(sliver.creator.public_key)
-            driver = self.driver.client_driver(caller=sliver.creator, tenant=sliver.slice.name)
+            driver = self.driver.client_driver(caller=sliver.creator, tenant=sliver.slice.name, deployment=sliver.node.deployment.name)
             instance = driver.spawn_instance(name=sliver.name,
                                 key_name = sliver.creator.keyname,
                                 image_id = sliver.image.image_id,
diff --git a/planetstack/observer/steps/sync_user_deployments.py b/planetstack/observer/steps/sync_user_deployments.py
new file mode 100644
index 0000000..e7454b8
--- /dev/null
+++ b/planetstack/observer/steps/sync_user_deployments.py
@@ -0,0 +1,76 @@
+import os
+import base64
+from collections import defaultdict
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.site import SiteDeployments
+from core.models.user import User, UserDeployments
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class SyncUserDeployments(OpenStackSyncStep):
+    provides=[User, UserDeployments]
+    requested_interval=0
+
+    def fetch_pending(self):
+        # user deployments are not visible to users. We must ensure
+        # user are deployed at all deploymets available to their sites.
+        site_deployments = SiteDeployment.objects.all()
+        site_deploy_lookup = defaultdict(list)
+        for site_deployment in site_deployments:
+            site_deploy_lookup[site_deployment.site].append(site_deployment.deployment)
+        
+        user_deployments = UserDeployment.objects.all()
+        user_deploy_lookup = defaultdict(list)
+        for user_deployment in user_deployments:
+            user_deploy_lookup[user_deployment.user].append(user_deployment.deployment)
+        
+        for user in User.objects.all():
+            expected_deployments = site_deploy_lookup[user.site]
+            for expected_deployment in expected_deployments:
+                if expected_deployment not in user_deploy_lookup[user]:
+                    ud = UserDeployments(user=user, deployment=expected_deployment)
+                    ud.save()
+
+        # now we can return all slice deployments that need to be enacted   
+        return UserDeployments.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+    def sync_record(self, user_deployment):
+        logger.info("sync'ing user deployment %s" % user_deployment.name)
+        name = user_deployment.user.email[:user_deployment.user.email.find('@')]
+        user_fields = {'name': name,
+                       'email': user_deployment.user.email,
+                       'password': hashlib.md5(user_deployment.user.password).hexdigest()[:6],
+                       'enabled': True}    
+        driver = self.driver.admin_driver(deployment=user_deployment.deployment.name)
+        if not user_deployment.kuser_id:
+            keystone_user = self.driver.create_user(**user_fields)
+            user_deployment.kuser_id = keystone_user.id
+        else:
+            driver.update_user(user_deployment.kuser_id, user_fields)
+
+        # setup user deployment site roles  
+        if user_deployment.user.site:
+            site_deployments = SiteDeployments.objects.filter(site=user_deployment.user.site,
+                                                              deployment=user_deployment.deployment)
+            if site_deployments:
+                # need the correct tenant id for site at the deployment
+                tenant_id = site_deployments[0].tenant_id  
+                driver.add_user_role(user_deployment.kuser_id, 
+                                     tenant_id, 'user')
+                if user_deployment.user.is_admin:
+                    driver.add_user_role(user_deployment.kuser_id, tenant_id, 'admin')
+                else:
+                    # may have admin role so attempt to remove it
+                    driver.delete_user_role(user_deployment.kuser_id, tenant_id, 'admin')
+
+        if user_deployment.user.public_key:
+            user_driver = self.driver.client_driver(caller=user, tenant=user.site.login_base, 
+                                                    deployment=user_deployment.deployment.name)
+            key_fields =  {'name': user_deployment.user.keyname,
+                           'public_key': user_deployment.user.public_key}
+            user_driver.create_keypair(**key_fields)
+
+        user_deployment.save()
diff --git a/planetstack/observer/steps/sync_users.py b/planetstack/observer/steps/sync_users.py
index 25f093e..4bd2826 100644
--- a/planetstack/observer/steps/sync_users.py
+++ b/planetstack/observer/steps/sync_users.py
@@ -14,29 +14,4 @@
         return User.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
 
     def sync_record(self, user):
-        name = user.email[:user.email.find('@')]
-        user_fields = {'name': name,
-                       'email': user.email,
-                       'password': hashlib.md5(user.password).hexdigest()[:6],
-                       'enabled': True}
-        if not user.kuser_id:
-            keystone_user = self.driver.create_user(**user_fields)
-            user.kuser_id = keystone_user.id
-        else:
-            self.driver.update_user(user.kuser_id, user_fields)        
-
-        if user.site:
-            self.driver.add_user_role(user.kuser_id, user.site.tenant_id, 'user')
-            if user.is_admin:
-                self.driver.add_user_role(user.kuser_id, user.site.tenant_id, 'admin')
-            else:
-                # may have admin role so attempt to remove it
-                self.driver.delete_user_role(user.kuser_id, user.site.tenant_id, 'admin')
-
-        if user.public_key:
-            driver = self.driver.client_driver(caller=user, tenant=user.site.login_base) 
-            key_fields =  {'name': user.keyname,
-                           'public_key': user.public_key}
-            driver.create_keypair(**key_fields)
-
         user.save()