Merged ansible branch into master
diff --git a/planetstack/core/models/credential.py b/planetstack/core/models/credential.py
index 13bc1c8..b74e540 100644
--- a/planetstack/core/models/credential.py
+++ b/planetstack/core/models/credential.py
@@ -6,7 +6,7 @@
 from core.models import Deployment,DeploymentLinkManager,DeploymentLinkDeletionManager
 
 class UserCredential(PlCoreBase):
-    user = models.ForeignKey(User, related_name='credentials', help_text="The User this credential is associated with")
+    user = models.ForeignKey(User, related_name='usercredentials', help_text="The User this credential is associated with")
 
     name = models.SlugField(help_text="The credential type, e.g. ec2", max_length=128)
     key_id = models.CharField(help_text="The backend id of this credential", max_length=1024)
@@ -17,7 +17,7 @@
         return self.name
 
 class SiteCredential(PlCoreBase):
-    site = models.ForeignKey(Site, related_name='credentials', help_text="The User this credential is associated with")
+    site = models.ForeignKey(Site, related_name='sitecredentials', help_text="The User this credential is associated with")
 
     name = models.SlugField(help_text="The credential type, e.g. ec2", max_length=128)
     key_id = models.CharField(help_text="The backend id of this credential", max_length=1024)
@@ -28,7 +28,7 @@
         return self.name
 
 class SliceCredential(PlCoreBase):
-    slice = models.ForeignKey(Slice, related_name='credentials', help_text="The User this credential is associated with")
+    slice = models.ForeignKey(Slice, related_name='slicecredentials', help_text="The User this credential is associated with")
 
     name = models.SlugField(help_text="The credential type, e.g. ec2", max_length=128)
     key_id = models.CharField(help_text="The backend id of this credential", max_length=1024)
@@ -41,7 +41,7 @@
 class DeploymentCredential(PlCoreBase):
     objects = DeploymentLinkManager()
     deleted_objects = DeploymentLinkDeletionManager()
-    deployment = models.ForeignKey(Deployment, related_name='credentials', help_text="The User this credential is associated with")
+    deployment = models.ForeignKey(Deployment, related_name='deploymentcredentials', help_text="The User this credential is associated with")
 
     name = models.SlugField(help_text="The credential type, e.g. ec2", max_length=128)
     key_id = models.CharField(help_text="The backend id of this credential", max_length=1024)
diff --git a/planetstack/core/models/flavor.py b/planetstack/core/models/flavor.py
index 27b2642..04747af 100644
--- a/planetstack/core/models/flavor.py
+++ b/planetstack/core/models/flavor.py
@@ -32,7 +32,7 @@
     parameter = models.ForeignKey(FlavorParameterType, related_name="parameters", help_text="The type of the parameter")
     value = models.CharField(help_text="The value of this parameter", max_length=1024)
 
-    flavor = models.ForeignKey(Flavor)
+    flavor = models.ForeignKey(Flavor,related_name='flavorparameter')
 
     def __unicode__(self):
         return self.parameter.name
diff --git a/planetstack/core/models/image.py b/planetstack/core/models/image.py
index 752dfe6..fdeb2cc 100644
--- a/planetstack/core/models/image.py
+++ b/planetstack/core/models/image.py
@@ -17,8 +17,8 @@
 class ImageDeployments(PlCoreBase):
     objects = DeploymentLinkManager()
     deleted_objects = DeploymentLinkDeletionManager()
-    image = models.ForeignKey(Image)
-    deployment = models.ForeignKey(Deployment)
+    image = models.ForeignKey(Image,related_name='imagedeployments')
+    deployment = models.ForeignKey(Deployment,related_name='imagedeployments')
     glance_image_id = models.CharField(null=True, blank=True, max_length=200, help_text="Glance image id") 
 
     def __unicode__(self):  return u'%s %s' % (self.image, self.deployment)
diff --git a/planetstack/core/models/slice.py b/planetstack/core/models/slice.py
index 7412fe2..ed10caf 100644
--- a/planetstack/core/models/slice.py
+++ b/planetstack/core/models/slice.py
@@ -124,8 +124,8 @@
     objects = DeploymentLinkManager()
     deleted_objects = DeploymentLinkDeletionManager()
 
-    slice = models.ForeignKey(Slice)
-    deployment = models.ForeignKey(Deployment)
+    slice = models.ForeignKey(Slice, related_name='slicedeployments')
+    deployment = models.ForeignKey(Deployment, related_name='slicedeployments')
     tenant_id = models.CharField(null=True, blank=True, max_length=200, help_text="Keystone tenant id")
     network_id = models.CharField(null=True, blank=True, max_length=256, help_text="Quantum network")
     router_id = models.CharField(null=True, blank=True, max_length=256, help_text="Quantum router id")
diff --git a/planetstack/core/models/sliver.py b/planetstack/core/models/sliver.py
index c694a1e..d9d2028 100644
--- a/planetstack/core/models/sliver.py
+++ b/planetstack/core/models/sliver.py
@@ -1,7 +1,8 @@
 import os
 from django.db import models
+from django.db.models import Q
 from django.core import exceptions
-from core.models import PlCoreBase
+from core.models import PlCoreBase,PlCoreBaseManager,PlCoreBaseDeletionManager
 from core.models import Image
 from core.models import Slice
 from core.models import Node
@@ -11,6 +12,9 @@
 from core.models import Tag
 from core.models import Flavor
 from django.contrib.contenttypes import generic
+from planetstack.config import Config
+
+config = Config()
 
 def get_default_flavor(deployment = None):
     # Find a default flavor that can be used for a sliver. This is particularly
@@ -31,8 +35,49 @@
 
     return flavors[0]
 
+class SliverDeletionManager(PlCoreBaseDeletionManager):
+    def get_queryset(self):
+        parent=super(SliverDeletionManager, self)
+        try:
+            backend_type = config.observer_backend_type
+        except AttributeError:
+            backend_type = None
+
+        parent_queryset = parent.get_queryset() if hasattr(parent, "get_queryset") else parent.get_query_set()
+        if (backend_type):
+            return parent_queryset.filter(Q(node__deployment__backend_type=backend_type))
+        else:
+            return parent_queryset
+
+    # deprecated in django 1.7 in favor of get_queryset().
+    def get_query_set(self):
+        return self.get_queryset()
+
+
+class SliverManager(PlCoreBaseManager):
+    def get_queryset(self):
+        parent=super(SliverManager, self)
+
+        try:
+            backend_type = config.observer_backend_type
+        except AttributeError:
+            backend_type = None
+
+        parent_queryset = parent.get_queryset() if hasattr(parent, "get_queryset") else parent.get_query_set()
+
+        if backend_type:
+            return parent_queryset.filter(Q(node__deployment__backend_type=backend_type))
+        else:
+            return parent_queryset
+
+    # deprecated in django 1.7 in favor of get_queryset().
+    def get_query_set(self):
+        return self.get_queryset()
+
 # Create your models here.
 class Sliver(PlCoreBase):
+    objects = SliverManager()
+    deleted_objects = SliverDeletionManager()
     instance_id = models.CharField(null=True, blank=True, max_length=200, help_text="Nova instance id")
     name = models.CharField(max_length=200, help_text="Sliver name")
     instance_name = models.CharField(blank=True, null=True, max_length=200, help_text="OpenStack generated name")
diff --git a/planetstack/model_policy.py b/planetstack/model_policy.py
index 9224c50..38cc4df 100644
--- a/planetstack/model_policy.py
+++ b/planetstack/model_policy.py
@@ -2,6 +2,7 @@
 from django.dispatch import receiver
 import pdb
 from core.models import *
+from dependency_walker import *
 import model_policies
 
 modelPolicyEnabled = True
@@ -10,18 +11,33 @@
     global modelPolicyEnabled
     modelPolicyEnabled = x
 
+def update_dep(d, o):
+	if (d.updated < o.updated):
+		d.save(update_fields=['updated'])
+	
+def delete_if_inactive(d, o):
+	#print "Deleting %s (%s)"%(d,d.__class__.__name__)
+	d.delete()	
+	return
+
 @receiver(post_save)
 def post_save_handler(sender, instance, **kwargs):
-	sender_name = sender.__name__
-	policy_name = 'model_policy_%s'%sender_name
-
         if not modelPolicyEnabled:
             return
+
+	sender_name = sender.__name__
+	policy_name = 'model_policy_%s'%sender_name
 	
 	if (not kwargs['update_fields']):
+		# Automatic dirtying
+		walk_inv_deps(update_dep, instance)
+
 		try:
 			policy_handler = getattr(model_policies, policy_name, None)
 			if policy_handler is not None:
 				policy_handler.handle(instance)
 		except:
 			pass
+	elif 'deleted' in kwargs['update_fields']:
+		walk_inv_deps(delete_if_inactive, instance)
+	
diff --git a/planetstack/observer b/planetstack/observer
index 10522a2..ae75af5 120000
--- a/planetstack/observer
+++ b/planetstack/observer
@@ -1 +1 @@
-ec2_observer
\ No newline at end of file
+openstack_observer
\ No newline at end of file
diff --git a/planetstack/openstack/client.py b/planetstack/openstack/client.py
index af91387..6974dad 100644
--- a/planetstack/openstack/client.py
+++ b/planetstack/openstack/client.py
@@ -1,15 +1,12 @@
 import urlparse
 try:
     from keystoneclient.v2_0 import client as keystone_client
-    from glance import client as glance_client
+    #from glance import client as glance_client
     import glanceclient
     from novaclient.v1_1 import client as nova_client
-    from quantumclient.v2_0 import client as quantum_client
+    from neutronclient.v2_0 import client as quantum_client
     from nova.db.sqlalchemy import api as nova_db_api 
     from nova.context import get_admin_context
-    from keystone.common.sql import core  
-    core.CONF(args=[], project='keystone', default_config_files=['/etc/keystone/keystone.conf'])
-    from keystone.identity.backends.sql import Metadata
     has_openstack = True
 except:
     has_openstack = False
@@ -70,17 +67,6 @@
         #if '@' in self.username:
         #    self.username = self.username[:self.username.index('@')]
 
-class KeystoneDB:
-    @require_enabled
-    def get_session(self):
-        return core.Base().get_session()
-
-    @require_enabled
-    def get_metadata(self):
-        session = self.get_session()
-        return session.query(Metadata).all()     
-
-
 class KeystoneClient(Client):
     def __init__(self, *args, **kwds):
         Client.__init__(self, *args, **kwds)
@@ -104,7 +90,7 @@
     def __init__(self, *args, **kwds):
         Client.__init__(self, *args, **kwds)
         if has_openstack:
-            self.client = glance_client.get_client(host='0.0.0.0',
+            self.client = glanceclient.get_client(host='0.0.0.0',
                                                    username=self.username,
                                                    password=self.password,
                                                    tenant=self.tenant,
@@ -190,12 +176,11 @@
         url_parsed = urlparse.urlparse(self.keystone.url)
         hostname = url_parsed.netloc.split(':')[0]
         token = self.keystone.client.tokens.authenticate(username=self.keystone.username, password=self.keystone.password, tenant_name=self.keystone.tenant)
-        self.keystone_db = KeystoneDB()
-        self.glance = GlanceClient(*args, **kwds)
+        #self.glance = GlanceClient(*args, **kwds)
         
         self.glanceclient = GlanceClientNew('1', endpoint='http://%s:9292' % hostname, token=token.id, **kwds)
         self.nova = NovaClient(*args, **kwds)
-        self.nova_db = NovaDB(*args, **kwds)
+        # self.nova_db = NovaDB(*args, **kwds)
         self.quantum = QuantumClient(*args, **kwds)
     
 
diff --git a/planetstack/openstack/driver.py b/planetstack/openstack/driver.py
index 093ab5a..6fb81a5 100644
--- a/planetstack/openstack/driver.py
+++ b/planetstack/openstack/driver.py
@@ -28,18 +28,18 @@
         self.admin_user = None
 
     def client_driver(self, caller=None, tenant=None, deployment=None):
-        admin_driver = self.admin_driver(tenant=tenant, deployment=deployment)
         if caller:
             auth = {'username': caller.email,
                     'password': hashlib.md5(caller.password).hexdigest()[:6],
                     'tenant': tenant}
-            client = OpenStackClient(deployment=admin_driver.deployment, **auth)
+            client = OpenStackClient(deployment=deployment, **auth)
         else:
+            admin_driver = self.admin_driver(tenant=tenant, deployment=deployment)
             client = OpenStackClient(tenant=tenant, deployment=admin_driver.deployment)
 
         driver = OpenStackDriver(client=client)
-        driver.admin_user = admin_driver.admin_user
-        driver.deployment = admin_driver.deployment
+        #driver.admin_user = admin_driver.admin_user
+        #driver.deployment = admin_driver.deployment
         return driver
 
     def admin_driver(self, tenant=None, deployment=None):
diff --git a/planetstack/openstack_observer/event_loop.py b/planetstack/openstack_observer/event_loop.py
index 12965bb..500c0e0 100644
--- a/planetstack/openstack_observer/event_loop.py
+++ b/planetstack/openstack_observer/event_loop.py
@@ -254,10 +254,13 @@
 		except KeyError:
 			has_deps = False
 
+		go = False
+
 		if (has_deps):
 			for d in deps:
                                 if d==step.__name__:
                                     logger.info("   step %s self-wait skipped" % step.__name__)
+				    go = True
                                     continue
 
 				cond = self.step_conditions[d]
@@ -266,7 +269,7 @@
                                         logger.info("  step %s wait on dep %s" % (step.__name__, d))
 					cond.wait()
 				cond.release()
-			go = self.step_status[d] == STEP_STATUS_OK
+			go = go or self.step_status[d] == STEP_STATUS_OK
 		else:
 			go = True
 
diff --git a/planetstack/openstack_observer/steps/sync_external_routes.py b/planetstack/openstack_observer/steps/sync_external_routes.py
index 334d19d..28d24cc 100644
--- a/planetstack/openstack_observer/steps/sync_external_routes.py
+++ b/planetstack/openstack_observer/steps/sync_external_routes.py
@@ -2,6 +2,7 @@
 import base64
 from planetstack.config import Config
 from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.site import Deployment
 
 class SyncExternalRoutes(OpenStackSyncStep):
     # XXX what does this provide?
@@ -9,6 +10,8 @@
     requested_interval = 86400 # This step is slow like a pig. Let's run it infrequently
 
     def call(self, **args):
+	deployments = Deployment.objects.all()
+	self.driver = self.driver.admin_driver(deployment=deployments[0],tenant='admin')
         routes = self.driver.get_external_routes()
         subnets = self.driver.shell.quantum.list_subnets()['subnets']
         for subnet in subnets:
diff --git a/planetstack/openstack_observer/steps/sync_network_deployments.py b/planetstack/openstack_observer/steps/sync_network_deployments.py
index 77d3a3a..0312f3a 100644
--- a/planetstack/openstack_observer/steps/sync_network_deployments.py
+++ b/planetstack/openstack_observer/steps/sync_network_deployments.py
@@ -25,7 +25,8 @@
     def get_next_subnet(self, deployment=None):
         # limit ourself to 10.0.x.x for now
         valid_subnet = lambda net: net.startswith('10.0')
-        driver = self.driver.admin_driver(deployment=deployment)
+
+        driver = self.driver.admin_driver(deployment=deployment,tenant='admin')
         subnets = driver.shell.quantum.list_subnets()['subnets']
         ints = [int(IPNetwork(subnet['cidr']).ip) for subnet in subnets \
                 if valid_subnet(subnet['cidr'])]
@@ -107,15 +108,12 @@
             logger.info("deployment %r has no admin_user, skipping" % network_deployment.deployment)
             return
 
+        self.driver = self.driver.admin_driver(deployment=network_deployment.deployment,tenant='admin')
         if network_deployment.network.owner and network_deployment.network.owner.creator:
             try:
                 # update manager context
-                real_driver = self.driver
-                self.driver = self.driver.client_driver(caller=network_deployment.network.owner.creator,
-                                                        tenant=network_deployment.network.owner.name,
-                                                        deployment=network_deployment.deployment.name)
+		# Bring back
                 self.save_network_deployment(network_deployment)
-                self.driver = real_driver
                 logger.info("saved network deployment: %s" % (network_deployment))
             except Exception,e:
                 logger.log_exc("save network deployment failed: %s" % network_deployment)
diff --git a/planetstack/openstack_observer/steps/sync_network_slivers.py b/planetstack/openstack_observer/steps/sync_network_slivers.py
index c003ba8..3e85e05 100644
--- a/planetstack/openstack_observer/steps/sync_network_slivers.py
+++ b/planetstack/openstack_observer/steps/sync_network_slivers.py
@@ -47,7 +47,7 @@
                 logger.info("deployment %s has no admin_tenant" % deployment.name)
                 continue
             try:
-                driver = self.driver.admin_driver(deployment=deployment.name)
+                driver = self.driver.admin_driver(deployment=deployment.name,tenant='admin')
                 ports = driver.shell.quantum.list_ports()["ports"]
             except:
                 logger.log_exc("failed to get ports from deployment %s" % deployment.name)
@@ -137,7 +137,7 @@
             if (neutron_nat_list != nat_list):
                 logger.info("Setting nat:forward_ports for port %s network %s sliver %s to %s" % (str(networkSliver.port_id), str(networkSliver.network.id), str(networkSliver.sliver), str(nat_list)))
                 try:
-                    driver = self.driver.client_driver(caller=networkSliver.sliver.creator, tenant=networkSliver.sliver.slice.name, deployment=networkSliver.sliver.node.deployment.name)
+                    driver = self.driver.admin_driver(deployment=networkSliver.sliver.node.deployment,tenant='admin')
                     driver.shell.quantum.update_port(networkSliver.port_id, {"port": {"nat:forward_ports": nat_list}})
                 except:
                     logger.log_exc("failed to update port with nat_list %s" % str(nat_list))
diff --git a/planetstack/openstack_observer/steps/sync_nodes.py b/planetstack/openstack_observer/steps/sync_nodes.py
index d648b7d..3936311 100644
--- a/planetstack/openstack_observer/steps/sync_nodes.py
+++ b/planetstack/openstack_observer/steps/sync_nodes.py
@@ -24,7 +24,7 @@
         # collect local nodes
         sites = Site.objects.all()
         nodes = Node.objects.all()
-        node_hostnames  = [node.name for node in nodes]
+        node_hostnames = [node.name for node in nodes]
 
         # fetch all nodes from each deployment
         deployments = Deployment.objects.all()
diff --git a/planetstack/openstack_observer/steps/sync_site_deployments.py b/planetstack/openstack_observer/steps/sync_site_deployments.py
index a8a00f6..b5e9f9a 100644
--- a/planetstack/openstack_observer/steps/sync_site_deployments.py
+++ b/planetstack/openstack_observer/steps/sync_site_deployments.py
@@ -4,24 +4,32 @@
 from planetstack.config import Config
 from observer.openstacksyncstep import OpenStackSyncStep
 from core.models.site import *
+from observer.ansible import *
 
 class SyncSiteDeployments(OpenStackSyncStep):
     requested_interval=0
     provides=[SiteDeployments, Site]
 
     def sync_record(self, site_deployment):
-        if not site_deployment.tenant_id:
-            driver = self.driver.admin_driver(deployment=site_deployment.deployment.name)
-            tenant = driver.create_tenant(tenant_name=site_deployment.site.login_base,
-                                               description=site_deployment.site.name,
-                                               enabled=site_deployment.site.enabled)
-            site_deployment.tenant_id = tenant.id
-            site_deployment.save()
-        elif site_deployment.site.id and site_deployment.tenant_id:
-            driver = self.driver.admin_driver(deployment=site_deployment.deployment.name)
-            driver.update_tenant(site_deployment.tenant_id,
-                                 description=site_deployment.site.name,
-                                 enabled=site_deployment.site.enabled)
+
+	template = os_template_env.get_template('sync_site_deployments.yaml')
+	tenant_fields = {'endpoint':site_deployment.deployment.auth_url,
+		         'admin_user': site_deployment.deployment.admin_user,
+		         'admin_password': site_deployment.deployment.admin_password,
+		         'admin_tenant': 'admin',
+		         'tenant': site_deployment.site.login_base,
+		         'tenant_description': site_deployment.site.name}
+
+	rendered = template.render(tenant_fields)
+	res = run_template('sync_site_deployments.yaml', tenant_fields)
+
+	if (len(res)==1):
+		site_deployment.tenant_id = res[0]['id']
+        	site_deployment.save()
+	elif (len(res)):
+		raise Exception('Could not assign roles for user %s'%tenant_fields['tenant'])
+	else:
+		raise Exception('Could not create or update user %s'%tenant_fields['tenant'])
             
     def delete_record(self, site_deployment):
         if site_deployment.tenant_id:
diff --git a/planetstack/openstack_observer/steps/sync_slice_deployments.py b/planetstack/openstack_observer/steps/sync_slice_deployments.py
index 03ea2ca..97196d6 100644
--- a/planetstack/openstack_observer/steps/sync_slice_deployments.py
+++ b/planetstack/openstack_observer/steps/sync_slice_deployments.py
@@ -9,6 +9,7 @@
 from core.models.slice import Slice, SliceDeployments
 from core.models.userdeployments import UserDeployments
 from util.logger import Logger, logging
+from observer.ansible import *
 
 logger = Logger(level=logging.INFO)
 
@@ -47,46 +48,41 @@
             logger.info("deployment %r has no admin_user, skipping" % slice_deployment.deployment)
             return
 
-        if not slice_deployment.tenant_id:
-            nova_fields = {'tenant_name': slice_deployment.slice.name,
-                   'description': slice_deployment.slice.description,
-                   'enabled': slice_deployment.slice.enabled}
-            driver = self.driver.admin_driver(deployment=slice_deployment.deployment.name)
-            tenant = driver.create_tenant(**nova_fields)
-            slice_deployment.tenant_id = tenant.id
-
-            # XXX give caller an admin role at the tenant they've created
-            deployment_users = UserDeployments.objects.filter(user=slice_deployment.slice.creator,
+	deployment_users = UserDeployments.objects.filter(user=slice_deployment.slice.creator,
                                                              deployment=slice_deployment.deployment)            
-            if not deployment_users:
-                logger.info("slice createor %s has not accout at deployment %s" % (slice_deployment.slice.creator, slice_deployment.deployment.name))
-            else:
-                deployment_user = deployment_users[0]
-                # lookup user id at this deployment
-                kuser= driver.shell.keystone.users.find(email=slice_deployment.slice.creator.email)
+    	if not deployment_users:
+	    logger.info("slice createor %s has not accout at deployment %s" % (slice_deployment.slice.creator, slice_deployment.deployment.name))
+	    roles = []
+    	else:
+	    deployment_user = deployment_users[0]
+	    roles = ['admin']
+	    
+	max_instances=int(slice_deployment.slice.max_slivers)
+	tenant_fields = {'endpoint':slice_deployment.deployment.auth_url,
+		         'admin_user': slice_deployment.deployment.admin_user,
+		         'admin_password': slice_deployment.deployment.admin_password,
+		         'admin_tenant': 'admin',
+		         'tenant': slice_deployment.slice.name,
+		         'tenant_description': slice_deployment.slice.description,
+			 'roles':roles,
+			 'name':deployment_user.email,
+			 'max_instances':max_instances}
 
-                # add required roles at the slice's tenant 
-                driver.add_user_role(kuser.id, tenant.id, 'admin')
-                    
-                # refresh credentials using this tenant
-                client_driver = self.driver.client_driver(caller=deployment_user.user,
-                                                          tenant=tenant.name, 
-                                                          deployment=slice_deployment.deployment.name)
-
-
-        if slice_deployment.id and slice_deployment.tenant_id:
-            # update existing tenant
-            driver = self.driver.admin_driver(deployment=slice_deployment.deployment.name)
-            driver.update_tenant(slice_deployment.tenant_id,
-                                 description=slice_deployment.slice.description,
-                                 enabled=slice_deployment.slice.enabled)  
-
-        if slice_deployment.tenant_id:
-            # update slice/tenant quota
-            driver = self.driver.client_driver(deployment=slice_deployment.deployment.name, tenant=slice_deployment.slice.name)
-            driver.shell.nova.quotas.update(tenant_id=slice_deployment.tenant_id, instances=int(slice_deployment.slice.max_slivers)) 
-
-        slice_deployment.save()
+	res = run_template('sync_slice_deployments.yaml', tenant_fields)
+	expected_num = len(roles)+1
+	if (len(res)!=expected_num):
+	    raise Exception('Could not sync tenants for slice %s'%slice_deployment.slice.name)
+	else:
+	    tenant_id = res[0]['id']
+	    if (not slice_deployment.tenant_id):
+	        handle = os.popen('nova quota-update --instances %d %s'%(max_instances,tenant_id))
+		output = handle.read()
+		result = handle.close()
+		if (result):
+		    logging.info('Could not update quota for %s'%slice_deployment.slice.name)
+		slice_deployment.tenant_id = tenant_id
+		slice_deployment.save()
+			
 
 
     def delete_record(self, slice_deployment):
@@ -106,11 +102,4 @@
             client_driver.delete_network(slice_deployment.network_id)
         if slice_deployment.tenant_id:
             driver.delete_tenant(slice_deployment.tenant_id)
-        # delete external route
-        #subnet = None
-        #subnets = client_driver.shell.quantum.list_subnets()['subnets']
-        #for snet in subnets:
-        #    if snet['id'] == slice_deployment.subnet_id:
-        #        subnet = snet
-        #if subnet:
-        #    driver.delete_external_route(subnet)
+        
diff --git a/planetstack/openstack_observer/steps/sync_slivers.py b/planetstack/openstack_observer/steps/sync_slivers.py
index dcedd1d..4f33bba 100644
--- a/planetstack/openstack_observer/steps/sync_slivers.py
+++ b/planetstack/openstack_observer/steps/sync_slivers.py
@@ -7,9 +7,14 @@
 from core.models.slice import Slice, SlicePrivilege, SliceDeployments
 from core.models.network import Network, NetworkSlice, NetworkDeployments
 from util.logger import Logger, logging
+from observer.ansible import *
 
 logger = Logger(level=logging.INFO)
 
+def escape(s):
+    s = s.replace('\n',r'\n').replace('"',r'\"')
+    return s
+    
 class SyncSlivers(OpenStackSyncStep):
     provides=[Sliver]
     requested_interval=0
@@ -20,81 +25,97 @@
 
     def sync_record(self, sliver):
         logger.info("sync'ing sliver:%s deployment:%s " % (sliver, sliver.node.deployment))
+
         metadata_update = {}
-        if ("numberCores" in sliver.changed_fields):
+	if (sliver.numberCores):
             metadata_update["cpu_cores"] = str(sliver.numberCores)
 
         for tag in sliver.slice.tags.all():
             if tag.name.startswith("sysctl-"):
                 metadata_update[tag.name] = tag.value
 
-        if not sliver.instance_id:
-            driver = self.driver.client_driver(caller=sliver.creator, tenant=sliver.slice.name, deployment=sliver.deploymentNetwork.name)
-            # public keys
-            slice_memberships = SlicePrivilege.objects.filter(slice=sliver.slice)
-            pubkeys = [sm.user.public_key for sm in slice_memberships if sm.user.public_key]
-            if sliver.creator.public_key:
-                pubkeys.append(sliver.creator.public_key)
-            if sliver.slice.creator.public_key:
-                pubkeys.append(sliver.slice.creator.public_key) 
-            # netowrks
-            # include all networks available to the slice and/or associated network templates
-            nics = []
-            networks = [ns.network for ns in NetworkSlice.objects.filter(slice=sliver.slice)]   
-            network_deployments = NetworkDeployments.objects.filter(network__in=networks, 
-                                                                    deployment=sliver.node.deployment)
-            # Gather private networks first. This includes networks with a template that has
-            # visibility = private and translation = none
-            for network_deployment in network_deployments:
-                if network_deployment.network.template.visibility == 'private' and \
-                   network_deployment.network.template.translation == 'none': 
-                    nics.append({'net-id': network_deployment.net_id})
-    
-            # now include network template
-            network_templates = [network.template.sharedNetworkName for network in networks \
-                                 if network.template.sharedNetworkName]
-            #logger.info("%s %s %s %s" % (driver.shell.quantum.username, driver.shell.quantum.password, driver.shell.quantum.tenant, driver.shell.quantum.url))
-            for net in driver.shell.quantum.list_networks()['networks']:
-                if net['name'] in network_templates: 
-                    nics.append({'net-id': net['id']}) 
+        # public keys
+        slice_memberships = SlicePrivilege.objects.filter(slice=sliver.slice)
+        pubkeys = set([sm.user.public_key for sm in slice_memberships if sm.user.public_key])
+    	if sliver.creator.public_key:
+	    pubkeys.add(sliver.creator.public_key)
 
-            # look up image id
-            deployment_driver = self.driver.admin_driver(deployment=sliver.deploymentNetwork.name)
-            image_id = None
-            images = deployment_driver.shell.glance.get_images()
-            for image in images:
-                if image['name'] == sliver.image.name:
-                    image_id = image['id']
-                    
-            # look up key name at the deployment
-            # create/fetch keypair
-            keyname = None
-            if sliver.creator.public_key:
-                keyname = sliver.creator.email.lower().replace('@', 'AT').replace('.', '') +\
-                          sliver.slice.name
-                key_fields =  {'name': keyname,
-                               'public_key': sliver.creator.public_key}
-                driver.create_keypair(**key_fields)
+        if sliver.slice.creator.public_key:
+            pubkeys.add(sliver.slice.creator.public_key) 
 
-            userData = self.get_userdata(sliver)
-            if sliver.userData:
-                userData = sliver.userData
+	nics = []
+	networks = [ns.network for ns in NetworkSlice.objects.filter(slice=sliver.slice)]   
+	network_deployments = NetworkDeployments.objects.filter(network__in=networks, 
+								deployment=sliver.node.deployment)
 
-            instance = driver.spawn_instance(name=sliver.name,
-                                key_name = keyname,
-                                image_id = image_id,
-                                hostname = sliver.node.name,
-                                pubkeys = pubkeys,
-                                nics = nics,
-                                userdata = userData,
-                                flavor_name = sliver.flavor.flavor )
-            sliver.instance_id = instance.id
-            sliver.instance_name = getattr(instance, 'OS-EXT-SRV-ATTR:instance_name')
+	for network_deployment in network_deployments:
+	    if network_deployment.network.template.visibility == 'private' and \
+	       network_deployment.network.template.translation == 'none' and network_deployment.net_id: 
+		nics.append(network_deployment.net_id)
+
+	# now include network template
+	network_templates = [network.template.sharedNetworkName for network in networks \
+			     if network.template.sharedNetworkName]
+
+        #driver = self.driver.client_driver(caller=sliver.creator, tenant=sliver.slice.name, deployment=sliver.deploymentNetwork)
+        driver = self.driver.admin_driver(tenant='admin', deployment=sliver.deploymentNetwork)
+	nets = driver.shell.quantum.list_networks()['networks']
+	for net in nets:
+	    if net['name'] in network_templates: 
+		nics.append(net['id']) 
+
+	if (not nics):
+	    for net in nets:
+	        if net['name']=='public':
+	    	    nics.append(net['id'])
+
+	# look up image id
+	deployment_driver = self.driver.admin_driver(deployment=sliver.deploymentNetwork.name)
+	image_id = None
+	images = deployment_driver.shell.glanceclient.images.list()
+	for image in images:
+	    if image.name == sliver.image.name or not image_id:
+		image_id = image.id
+		
+	# look up key name at the deployment
+	# create/fetch keypair
+	keyname = None
+	keyname = sliver.creator.email.lower().replace('@', 'AT').replace('.', '') +\
+		  sliver.slice.name
+	key_fields =  {'name': keyname,
+		       'public_key': sliver.creator.public_key}
+	    
+
+	userData = self.get_userdata(sliver)
+	if sliver.userData:
+	    userData = sliver.userData
+	    
+	sliver_name = '@'.join([sliver.slice.name,sliver.node.name])
+	tenant_fields = {'endpoint':sliver.node.deployment.auth_url,
+		     'admin_user': sliver.node.deployment.admin_user,
+		     'admin_password': sliver.node.deployment.admin_password,
+		     'admin_tenant': 'admin',
+		     'tenant': sliver.slice.name,
+		     'tenant_description': sliver.slice.description,
+		     'name':sliver_name,
+		     'image_id':image_id,
+		     'key_name':keyname,
+		     'flavor_id':1,
+		     'nics':nics,
+		     'meta':metadata_update,
+		     'key':key_fields,
+		     'user_data':r'%s'%escape(userData)}
+
+	res = run_template('sync_slivers.yaml', tenant_fields)
+	if (len(res)!=2):
+	    raise Exception('Could not sync sliver %s'%sliver.slice.name)
+	else:
+	    sliver_id = res[1]['id'] # 0 is for the key
+
+            sliver.instance_id = sliver_id
+            sliver.instance_name = sliver_name
             sliver.save()    
 
-        if sliver.instance_id and metadata_update:
-            driver.update_instance_metadata(sliver.instance_id, metadata_update)
-
     def delete_record(self, sliver):
         if sliver.instance_id:
             driver = self.driver.client_driver(caller=sliver.creator, 
diff --git a/planetstack/openstack_observer/steps/sync_user_deployments.py b/planetstack/openstack_observer/steps/sync_user_deployments.py
index 0c28392..f7e41a0 100644
--- a/planetstack/openstack_observer/steps/sync_user_deployments.py
+++ b/planetstack/openstack_observer/steps/sync_user_deployments.py
@@ -10,6 +10,8 @@
 from core.models.userdeployments import UserDeployments
 from util.logger import Logger, logging
 
+from observer.ansible import *
+
 logger = Logger(level=logging.INFO)
 
 class SyncUserDeployments(OpenStackSyncStep):
@@ -30,50 +32,55 @@
             logger.info("deployment %r has no admin_user, skipping" % user_deployment.deployment)
             return
 
+	template = os_template_env.get_template('sync_user_deployments.yaml')
+	
         name = user_deployment.user.email[:user_deployment.user.email.find('@')]
-        user_fields = {'name': user_deployment.user.email,
-                       'email': user_deployment.user.email,
-                       'password': hashlib.md5(user_deployment.user.password).hexdigest()[:6],
-                       'enabled': True}    
-        driver = self.driver.admin_driver(deployment=user_deployment.deployment.name)
-        if not user_deployment.kuser_id:
-            keystone_user = driver.create_user(**user_fields)
-            user_deployment.kuser_id = keystone_user.id
-        else:
-            driver.update_user(user_deployment.kuser_id, user_fields)
 
-        # setup user deployment home site roles  
+	roles = []
+	# setup user deployment home site roles  
         if user_deployment.user.site:
             site_deployments = SiteDeployments.objects.filter(site=user_deployment.user.site,
                                                               deployment=user_deployment.deployment)
             if site_deployments:
                 # need the correct tenant id for site at the deployment
                 tenant_id = site_deployments[0].tenant_id  
-                driver.add_user_role(user_deployment.kuser_id, 
-                                     tenant_id, 'user')
+		tenant_name = site_deployments[0].site.login_base
+
+		roles.append('user')
                 if user_deployment.user.is_admin:
-                    driver.add_user_role(user_deployment.kuser_id, tenant_id, 'admin')
-                else:
-                    # may have admin role so attempt to remove it
-                    driver.delete_user_role(user_deployment.kuser_id, tenant_id, 'admin')
+                    roles.append('admin')
+	    else:
+		raise Exception('Internal error. Missing SiteDeployment for user %s'%user_deployment.user.email)
+	else:
+	    raise Exception('Siteless user %s'%user_deployment.user.email)
 
-        #if user_deployment.user.public_key:
-        #    if not user_deployment.user.keyname:
-        #        keyname = user_deployment.user.email.lower().replace('@', 'AT').replace('.', '')
-        #        user_deployment.user.keyname = keyname
-        #        user_deployment.user.save()
-        #    
-        #    user_driver = driver.client_driver(caller=user_deployment.user, 
-        #                                       tenant=user_deployment.user.site.login_base, 
-        #                                       deployment=user_deployment.deployment.name)
-        #    key_fields =  {'name': user_deployment.user.keyname,
-        #                   'public_key': user_deployment.user.public_key}
-        #    user_driver.create_keypair(**key_fields)
 
-        user_deployment.save()
+        user_fields = {'endpoint':user_deployment.deployment.auth_url,
+		       'name': user_deployment.user.email,
+                       'email': user_deployment.user.email,
+                       'password': hashlib.md5(user_deployment.user.password).hexdigest()[:6],
+                       'admin_user': user_deployment.deployment.admin_user,
+		       'admin_password': user_deployment.deployment.admin_password,
+		       'admin_tenant': 'admin',
+		       'roles':roles,
+		       'tenant':tenant_name}    
+	
+	rendered = template.render(user_fields)
+	res = run_template('sync_user_deployments.yaml', user_fields)
+
+	# results is an array in which each element corresponds to an 
+	# "ok" string received per operation. If we get as many oks as
+	# the number of operations we issued, that means a grand success.
+	# Otherwise, the number of oks tell us which operation failed.
+	expected_length = len(roles) + 1
+	if (len(res)==expected_length):
+        	user_deployment.save()
+	elif (len(res)):
+		raise Exception('Could not assign roles for user %s'%user_fields['name'])
+	else:
+		raise Exception('Could not create or update user %s'%user_fields['name'])
 
     def delete_record(self, user_deployment):
         if user_deployment.kuser_id:
             driver = self.driver.admin_driver(deployment=user_deployment.deployment.name)
             driver.delete_user(user_deployment.kuser_id)
-