resolve merge conflict
diff --git a/xos.spec b/xos.spec
index 435285c..c585e7e 100644
--- a/xos.spec
+++ b/xos.spec
@@ -1,7 +1,7 @@
 Summary: OpenCloud core services
 Name: xos
 Version: 1.2.0
-Release: 4
+Release: 5
 License: GPL+
 Group: Development/Tools
 Source0: %{_tmppath}/%{name}-%{version}.tar.gz
diff --git a/xos/cord/models.py b/xos/cord/models.py
index 86beba1..346fcc9 100644
--- a/xos/cord/models.py
+++ b/xos/cord/models.py
@@ -24,24 +24,24 @@
 t.caller = User.objects.all()[0]
 t.save()
 
-for v in VOLTTenant.objects.all():
+for v in VOLTTenant.get_tenant_objects().all():
     v.caller = User.objects.all()[0]
     v.delete()
 
-for v in VCPETenant.objects.all():
+for v in VCPETenant.get_tenant_objects().all():
     v.caller = User.objects.all()[0]
     v.delete()
 
-for v in VOLTTenant.objects.all():
+for v in VOLTTenant.get_tenant_objects().all():
     v.caller = User.objects.all()[0]
     v.delete()
 
-for v in VOLTTenant.objects.all():
+for v in VOLTTenant.get_tenant_objects().all():
     if not v.creator:
         v.creator= User.objects.all()[0]
         v.save()
 
-for v in VCPETenant.objects.all():
+for v in VCPETenant.get_tenant_objects().all():
     if not v.creator:
         v.creator= User.objects.all()[0]
         v.save()
@@ -350,7 +350,7 @@
 
         if self.sliver is None:
             if not self.provider_service.slices.count():
-                raise XOSConfigurationError("The VCPE service has no slicers")
+                raise XOSConfigurationError("The VCPE service has no slices")
 
             node =self.pick_node()
             sliver = Sliver(slice = self.provider_service.slices.all()[0],
@@ -404,10 +404,10 @@
         if not self.creator:
             if not getattr(self, "caller", None):
                 # caller must be set when creating a vCPE since it creates a slice
-                raise XOSProgrammingError("VBNGTenant's self.caller was not set")
+                raise XOSProgrammingError("VCPETenant's self.caller was not set")
             self.creator = self.caller
             if not self.creator:
-                raise XOSProgrammingError("VCPETenant's self.caller was not set")
+                raise XOSProgrammingError("VCPETenant's self.creator was not set")
 
         super(VCPETenant, self).save(*args, **kwargs)
         self.manage_sliver()
diff --git a/xos/core/admin.py b/xos/core/admin.py
index 69cae61..1ded815 100644
--- a/xos/core/admin.py
+++ b/xos/core/admin.py
@@ -57,6 +57,31 @@
                            flatatt(final_attrs),
                            force_text(value))
 
+class SliderWidget(forms.HiddenInput):
+    def render(self, name, value,  attrs=None):
+        if value is None:
+            value = '0'
+        final_attrs = self.build_attrs(attrs, name=name)
+        attrs = attrs or attrs[:]
+        attrs["name"] = name
+        attrs["value"] = value
+        html = """<div style="width:640px"><span id="%(id)s_label">%(value)s</span><div id="%(id)s_slider" style="float:right;width:610px;margin-top:5px"></div></div>
+                              <script>
+                                  $(function() {
+                                      $("#%(id)s_slider").slider({
+                                         value: %(value)s,
+                                         slide: function(event, ui) { $("#%(id)s").val( ui.value ); $("#%(id)s_label").html(ui.value); },
+                                         });
+                                  });
+                              </script>
+                              <input type="hidden" id="%(id)s" name="%(name)s" value="%(value)s"></input>
+                           """ % attrs
+        html = html.replace("{","{{").replace("}","}}")
+        return format_html(html,
+                           flatatt(final_attrs),
+                           force_text(value))
+
+
 class PlainTextWidget(forms.HiddenInput):
     input_type = 'hidden'
 
diff --git a/xos/core/models/plcorebase.py b/xos/core/models/plcorebase.py
index b9a2345..5e3e287 100644
--- a/xos/core/models/plcorebase.py
+++ b/xos/core/models/plcorebase.py
@@ -185,7 +185,8 @@
             if (not self.write_protect):
                 self.deleted = True
                 self.enacted=None
-                self.save(update_fields=['enacted','deleted'], silent=silent)
+                self.policed=None
+                self.save(update_fields=['enacted','deleted','policed'], silent=silent)
 
 
     def save(self, *args, **kwargs):
diff --git a/xos/core/models/service.py b/xos/core/models/service.py
index 038d71a..0b75ce3 100644
--- a/xos/core/models/service.py
+++ b/xos/core/models/service.py
@@ -32,6 +32,79 @@
     def can_update(self, user):
         return user.can_update_service(self, allow=['admin'])
      
+    def get_scalable_nodes(self, slice, max_per_node=None, exclusive_slices=[]):
+        """
+             Get a list of nodes that can be used to scale up a slice.
+
+                slice - slice to scale up
+                max_per_node - maximum numbers of slivers that 'slice' can have on a single node
+                exclusive_slices - list of slices that must have no nodes in common with 'slice'.
+        """
+
+        from core.models import Node, Sliver # late import to get around order-of-imports constraint in __init__.py
+
+        nodes = list(Node.objects.all())
+
+        conflicting_slivers = Sliver.objects.filter(slice__in = exclusive_slices)
+        conflicting_nodes = Node.objects.filter(slivers__in = conflicting_slivers)
+
+        nodes = [x for x in nodes if x not in conflicting_nodes]
+
+        # If max_per_node is set, then limit the number of slivers this slice
+        # can have on a single node.
+        if max_per_node:
+            acceptable_nodes = []
+            for node in nodes:
+                existing_count = node.slivers.filter(slice=slice).count()
+                if existing_count < max_per_node:
+                    acceptable_nodes.append(node)
+            nodes = acceptable_nodes
+
+        return nodes
+
+    def pick_node(self, slice, max_per_node=None, exclusive_slices=[]):
+        # Pick the best node to scale up a slice.
+
+        nodes = self.get_scalable_nodes(slice, max_per_node, exclusive_slices)
+        nodes = sorted(nodes, key=lambda node: node.slivers.all().count())
+        if not nodes:
+            return None
+        return nodes[0]
+
+    def adjust_scale(self, slice_hint, scale, max_per_node=None, exclusive_slices=[]):
+        from core.models import Sliver # late import to get around order-of-imports constraint in __init__.py
+
+        slices = [x for x in self.slices.all() if slice_hint in x.name]
+        for slice in slices:
+            while slice.slivers.all().count() > scale:
+                s = slice.slivers.all()[0]
+                # print "drop sliver", s
+                s.delete()
+
+            while slice.slivers.all().count() < scale:
+                node = self.pick_node(slice, max_per_node, exclusive_slices)
+                if not node:
+                    # no more available nodes
+                    break
+
+                image = slice.default_image
+                if not image:
+                    raise XOSConfigurationError("No default_image for slice %s" % slice.name)
+
+                flavor = slice.default_flavor
+                if not flavor:
+                    raise XOSConfigurationError("No default_flavor for slice %s" % slice.name)
+
+                s = Sliver(slice=slice,
+                           node=node,
+                           creator=slice.creator,
+                           image=image,
+                           flavor=flavor,
+                           deployment=node.site_deployment.deployment)
+                s.save()
+
+                # print "add sliver", s
+>>>>>>> origin/master
 
 class ServiceAttribute(PlCoreBase):
     name = models.SlugField(help_text="Attribute Name", max_length=128)
@@ -93,11 +166,11 @@
 
     kind = StrippedCharField(max_length=30, default=KIND)
     provider_service = models.ForeignKey(Service, related_name='tenants')
-    subscriber_service = models.ForeignKey(Service, related_name='subscriptions', blank=True, null=True)      # can we drop this ?
+    subscriber_service = models.ForeignKey(Service, related_name='subscriptions', blank=True, null=True)
     subscriber_tenant = models.ForeignKey("Tenant", related_name='subscriptions', blank=True, null=True)
     subscriber_user = models.ForeignKey("User", related_name='subscriptions', blank=True, null=True)
-    service_specific_id = StrippedCharField(max_length=30)
-    service_specific_attribute = models.TextField()
+    service_specific_id = StrippedCharField(max_length=30, blank=True, null=True)
+    service_specific_attribute = models.TextField(blank=True, null=True)
     connect_method = models.CharField(null=False, blank=False, max_length=30, choices=CONNECTIVITY_CHOICES, default="na")
 
     def __init__(self, *args, **kwargs):
diff --git a/xos/core/models/slice.py b/xos/core/models/slice.py
index 4fc8489..0649d6f 100644
--- a/xos/core/models/slice.py
+++ b/xos/core/models/slice.py
@@ -99,6 +99,7 @@
             qs = Slice.objects.filter(id__in=slice_ids)
         return qs
 
+    """
     def delete(self, *args, **kwds):
         # delete networks associated with this slice
         from core.models.network import Network
@@ -112,6 +113,7 @@
         slice_privileges.delete() 
         # continue with normal delete
         super(Slice, self).delete(*args, **kwds) 
+    """
          
 
 class SliceRole(PlCoreBase):
diff --git a/xos/core/models/sliver.py b/xos/core/models/sliver.py
index e45152f..ff1e9b4 100644
--- a/xos/core/models/sliver.py
+++ b/xos/core/models/sliver.py
@@ -5,7 +5,7 @@
 from core.models import PlCoreBase,PlCoreBaseManager,PlCoreBaseDeletionManager
 from core.models.plcorebase import StrippedCharField
 from core.models import Image
-from core.models import Slice
+from core.models import Slice, SlicePrivilege
 from core.models import Node
 from core.models import Site
 from core.models import Deployment
@@ -170,3 +170,18 @@
             return None
         else:
             return 'ssh -o "ProxyCommand ssh -q %s@%s" ubuntu@%s' % (self.instance_id, self.node.name, self.instance_name)
+
+    def get_public_keys(self):
+        slice_memberships = SlicePrivilege.objects.filter(slice=self.slice)
+        pubkeys = set([sm.user.public_key for sm in slice_memberships if sm.user.public_key])
+
+        if self.creator.public_key:
+            pubkeys.add(self.creator.public_key)
+
+        if self.slice.creator.public_key:
+            pubkeys.add(self.slice.creator.public_key)
+
+        if self.slice.service and self.slice.service.public_key:
+            pubkeys.add(self.slice.service.public_key)
+
+        return pubkeys
diff --git a/xos/core/static/xos.css b/xos/core/static/xos.css
index 513dc06..282c9f4 100644
--- a/xos/core/static/xos.css
+++ b/xos/core/static/xos.css
@@ -182,7 +182,7 @@
   float: right;
   border: 2px darkGrey;
 }
-.ui-state-default, .ui-widget-content .ui-state-default, .ui-widget-header .ui-state-default{
+.ui-state-default #hometabs, .ui-widget-content .ui-state-default #hometabs, .ui-widget-header .ui-state-default {
 background: none !important;
 border-top: 0px !important;
 border-left: 0px !important;
diff --git a/xos/core/xoslib/methods/cordsubscriber.py b/xos/core/xoslib/methods/cordsubscriber.py
index 2337b21..08aa9d9 100644
--- a/xos/core/xoslib/methods/cordsubscriber.py
+++ b/xos/core/xoslib/methods/cordsubscriber.py
@@ -32,7 +32,7 @@
         cdn_enable = serializers.BooleanField()
         sliver_name = ReadOnlyField()
         image_name = ReadOnlyField()
-        routeable_subnet = serializers.CharField()
+        routeable_subnet = serializers.CharField(required=False)
 
         humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
 
diff --git a/xos/core/xoslib/methods/sshkeys.py b/xos/core/xoslib/methods/sshkeys.py
new file mode 100644
index 0000000..a714212
--- /dev/null
+++ b/xos/core/xoslib/methods/sshkeys.py
@@ -0,0 +1,56 @@
+from rest_framework.decorators import api_view
+from rest_framework.response import Response
+from rest_framework.reverse import reverse
+from rest_framework import serializers
+from rest_framework import generics
+from rest_framework.views import APIView
+from core.models import *
+from django.forms import widgets
+from syndicate_storage.models import Volume
+from django.core.exceptions import PermissionDenied
+from xos.exceptions import XOSNotFound
+
+class SSHKeyList(APIView):
+    method_kind = "list"
+    method_name = "sshkeys"
+
+    def get(self, request, format=None):
+        instances=[]
+        for sliver in self.get_queryset().all():
+            if sliver.instance_id:
+                instances.append( {"id": sliver.instance_id,
+                                   "public_keys": sliver.get_public_keys(),
+                                   "node_name": sliver.node.name } )
+
+        return Response(instances)
+
+    def get_queryset(self):
+        queryset = queryset=Sliver.objects.all()
+
+        node_name = self.request.QUERY_PARAMS.get('node_name', None)
+        if node_name is not None:
+            queryset = queryset.filter(node__name = node_name)
+
+        return queryset
+
+class SSHKeyDetail(APIView):
+    method_kind = "detail"
+    method_name = "sshkeys"
+
+    def get(self, request, format=None, pk=0):
+        slivers = self.get_queryset().filter(instance_id=pk)
+        if not slivers:
+            raise XOSNotFound("didn't find sliver for instance %s" % pk)
+        return Response( [ {"id": slivers[0].instance_id,
+                            "public_keys": slivers[0].get_public_keys(),
+                            "node_name": slivers[0].node.name } ])
+
+    def get_queryset(self):
+        queryset = queryset=Sliver.objects.all()
+
+        node_name = self.request.QUERY_PARAMS.get('node_name', None)
+        if node_name is not None:
+            queryset = queryset.filter(node__name = node_name)
+
+        return queryset
+
diff --git a/xos/core/xoslib/templates/xosCordSubscriber.html b/xos/core/xoslib/templates/xosCordSubscriber.html
index dbd4cfe..1a189c4 100644
--- a/xos/core/xoslib/templates/xosCordSubscriber.html
+++ b/xos/core/xoslib/templates/xosCordSubscriber.html
@@ -20,10 +20,10 @@
   <tr><td class="xos-label-cell xos-cord-label">Image:</td><td><%= model.attributes.image_name %></td></tr>

   <tr><td class="xos-label-cell xos-cord-label">Sliver Id:</td><td><%= model.attributes.sliver %></td></tr>

   <tr><td class="xos-label-cell xos-cord-label">Firewall:</td><td><input type="checkbox" name="firewall_enable" <% if (model.attributes.firewall_enable) print("checked"); %>>Enable<br>

-                                                                  <textarea name="firewall_rules"><%= model.attributes.firewall_rules %></textarea></td></tr>

+                                                                  <textarea name="firewall_rules" style="width:320px; height:80px"><%= model.attributes.firewall_rules %></textarea></td></tr>

   <tr><td class="xos-label-cell xos-cord-label">URL Filter:</td><td><input type="checkbox" name="url_filter_enable" <% if (model.attributes.url_filter_enable) print("checked"); %>>Enable<br>

-                                                                  <textarea name="url_filter_rules"><%= model.attributes.url_filter_rules %></textarea></td></tr>

-  <tr><td class="xos-label-cell xos-cord-label">CDN:</td><td><input type="checkbox" name="cdn_enable" <% if (model.attributes.firewall_enable) print("checked"); %>>Enable</td></tr>

+                                                                  <textarea name="url_filter_rules" style="width:320px; height:80px"><%= model.attributes.url_filter_rules %></textarea></td></tr>

+  <tr><td class="xos-label-cell xos-cord-label">CDN:</td><td><input type="checkbox" name="cdn_enable" <% if (model.attributes.cdn_enable) print("checked"); %>>Enable</td></tr>

   </table>

   </div>

 

diff --git a/xos/dependency_walker.py b/xos/dependency_walker.py
index 0b23136..ba9de11 100644
--- a/xos/dependency_walker.py
+++ b/xos/dependency_walker.py
@@ -71,9 +71,10 @@
 			except AttributeError:
 				if not missing_links.has_key(model+'.'+link):
 					print "Model %s missing link for dependency %s"%(model, link)
-                                        logger.log_exc("Model %s missing link for dependency %s"%(model, link))
+                                        logger.log_exc("WARNING: Model %s missing link for dependency %s."%(model, link))
 					missing_links[model+'.'+link]=True
 
+
 		if (peer):
 			try:
 				peer_objects = peer.all()
@@ -83,11 +84,13 @@
 				peer_objects = []
 
 			for o in peer_objects:
-				fn(o, object)
+				#if (isinstance(o,PlCoreBase)):
+				if (hasattr(o,'updated')):
+					fn(o, object)
 				# Uncomment the following line to enable recursion
 				# walk_inv_deps(fn, o)
 
-def p(x):
+def p(x,source):
 	print x,x.__class__.__name__
 	return
 
diff --git a/xos/hpc/admin.py b/xos/hpc/admin.py
index 20364b6..08a1cdb 100644
--- a/xos/hpc/admin.py
+++ b/xos/hpc/admin.py
@@ -10,7 +10,7 @@
 from django.utils import timezone
 from django.contrib.contenttypes import generic
 from suit.widgets import LinkedSelect
-from core.admin import ServiceAppAdmin,SliceInline,ServiceAttrAsTabInline, ReadOnlyAwareAdmin, XOSTabularInline
+from core.admin import ServiceAppAdmin,SliceInline,ServiceAttrAsTabInline, ReadOnlyAwareAdmin, XOSTabularInline, SliderWidget
 
 from functools import update_wrapper
 from django.contrib.admin.views.main import ChangeList
@@ -106,15 +106,30 @@
        # filtered_change_view rather than the default change_view.
        return FilteredChangeList
 
+class HpcServiceForm(forms.ModelForm):
+    scale = forms.IntegerField(widget = SliderWidget, required=False)
+
+    def __init__(self, *args, **kwargs):
+        super(HpcServiceForm, self).__init__(*args, **kwargs)
+        self.fields['scale'].initial = kwargs["instance"].scale
+
+    def save(self, *args, **kwargs):
+        if self.cleaned_data['scale']:
+             self.instance.scale = self.cleaned_data['scale']
+
+        return super(HpcServiceForm, self).save(*args, **kwargs)
+
+
 class HpcServiceAdmin(ReadOnlyAwareAdmin):
     model = HpcService
     verbose_name = "HPC Service"
     verbose_name_plural = "HPC Service"
     list_display = ("backend_status_icon", "name","enabled")
     list_display_links = ('backend_status_icon', 'name', )
-    fieldsets = [(None, {'fields': ['backend_status_text', 'name','enabled','versionNumber', 'description', "cmi_hostname"], 'classes':['suit-tab suit-tab-general']})]
+    fieldsets = [(None, {'fields': ['backend_status_text', 'name','scale','enabled','versionNumber', 'description', "cmi_hostname"], 'classes':['suit-tab suit-tab-general']})]
     readonly_fields = ('backend_status_text', )
     inlines = [SliceInline,ServiceAttrAsTabInline]
+    form = HpcServiceForm
 
     extracontext_registered_admins = True
 
diff --git a/xos/hpc/models.py b/xos/hpc/models.py
index e915fbc..a3b7c90 100644
--- a/xos/hpc/models.py
+++ b/xos/hpc/models.py
@@ -17,6 +17,26 @@
 
     cmi_hostname = StrippedCharField(max_length=254, null=True, blank=True)
 
+    @property
+    def scale(self):
+        hpc_slices = [x for x in self.slices.all() if "hpc" in x.name]
+        if not hpc_slices:
+            return 0
+        return hpc_slices[0].slivers.count()
+
+    @scale.setter
+    def scale(self, value):
+        self.set_scale = value
+
+    def save(self, *args, **kwargs):
+        super(HpcService, self).save(*args, **kwargs)
+
+        # scale up/down
+        scale = getattr(self, "set_scale", None)
+        if scale is not None:
+            exclude_slices = [x for x in self.slices.all() if "cmi" in x.name]
+            self.adjust_scale(slice_hint="hpc", scale=scale, exclusive_slices = exclude_slices)
+
 class ServiceProvider(PlCoreBase):
     class Meta:
         app_label = "hpc"
diff --git a/xos/model_policies/model_policy_Slice.py b/xos/model_policies/model_policy_Slice.py
index a9936bd..b610601 100644
--- a/xos/model_policies/model_policy_Slice.py
+++ b/xos/model_policies/model_policy_Slice.py
@@ -1,16 +1,33 @@
+def handle_delete(slice):
+    from core.models import Controller, ControllerSlice, SiteDeployment, Network, NetworkSlice,NetworkTemplate, Slice
+    from collections import defaultdict
+
+    public_nets = []
+    private_net = None
+    networks = Network.objects.filter(owner=slice)
+
+    for n in networks:
+        n.delete()	
+    
+    # Note that sliceprivileges and slicecontrollers are autodeleted, through the dependency graph
 
 def handle(slice):
     from core.models import Controller, ControllerSlice, SiteDeployment, Network, NetworkSlice,NetworkTemplate, Slice
     from collections import defaultdict
 
+    print "MODEL POLICY: slice", slice
+
     # slice = Slice.get(slice_id)
 
     controller_slices = ControllerSlice.objects.filter(slice=slice)
     existing_controllers = [cs.controller for cs in controller_slices] 
         
+    print "MODEL POLICY: slice existing_controllers=", existing_controllers
+
     all_controllers = Controller.objects.all() 
     for controller in all_controllers:
         if controller not in existing_controllers:
+            print "MODEL POLICY: slice adding controller", controller
             sd = ControllerSlice(slice=slice, controller=controller)
             sd.save()
 
diff --git a/xos/model_policy.py b/xos/model_policy.py
index 3fa7218..9333adc 100644
--- a/xos/model_policy.py
+++ b/xos/model_policy.py
@@ -11,62 +11,105 @@
 from django.db.models import F, Q
 
 modelPolicyEnabled = True
+bad_instances=[]
 
 def EnableModelPolicy(x):
     global modelPolicyEnabled
     modelPolicyEnabled = x
 
+def update_wp(d, o):
+    try:
+        save_fields = []
+        if (d.write_protect != o.write_protect):
+            d.write_protect = o.write_protect
+            save_fields.append('write_protect')
+        if (save_fields):
+            d.save(update_fields=save_fields)
+    except AttributeError,e:
+        raise e
+
 def update_dep(d, o):
-	try:
-		if (d.updated < o.updated):
-			d.save(update_fields=['updated'])
-	except AttributeError,e:
-		raise e
-	
+    try:
+        print 'Trying to update %s'%d
+        save_fields = []
+        if (d.updated < o.updated):
+            save_fields = ['updated']
+
+        if (save_fields):
+            d.save(update_fields=save_fields)
+    except AttributeError,e:
+        raise e
+
 def delete_if_inactive(d, o):
-	#print "Deleting %s (%s)"%(d,d.__class__.__name__)
-	# d.delete()	
-	return
+    try:
+        d.delete()
+        print "Deleted %s (%s)"%(d,d.__class__.__name__)
+    except:
+        pass
+    return
+
 
 @atomic
 def execute_model_policy(instance, deleted):
-	# Automatic dirtying
-	walk_inv_deps(update_dep, instance)
+    # Automatic dirtying
+    if (instance in bad_instances):
+        return
 
-	sender_name = instance.__class__.__name__
-	policy_name = 'model_policy_%s'%sender_name
-	noargs = False
+    # These are the models whose children get deleted when they are
+    delete_policy_models = ['Slice','Sliver','Network']
+    sender_name = instance.__class__.__name__
+    policy_name = 'model_policy_%s'%sender_name
+    noargs = False
 
-	if deleted:
-		walk_inv_deps(delete_if_inactive, instance)
-	else:
-		try:
-			policy_handler = getattr(model_policies, policy_name, None)
-			logger.error("POLICY HANDLER: %s %s" % (policy_name, policy_handler))                       
-			if policy_handler is not None:
-				policy_handler.handle(instance)
-		except:
-			logger.log_exc("Model Policy Error:") 
-			print "Policy Exceution Error"
+    if (not deleted):
+        walk_inv_deps(update_dep, instance)
+        walk_deps(update_wp, instance)
+    elif (sender_name in delete_policy_models):
+        walk_inv_deps(delete_if_inactive, instance)
 
-	instance.policed=datetime.now()
+
+
+    try:
+        policy_handler = getattr(model_policies, policy_name, None)
+        logger.error("POLICY HANDLER: %s %s" % (policy_name, policy_handler))
+        if policy_handler is not None:
+            if (deleted):
+                try:
+                    policy_handler.handle_delete(instance)
+                except AttributeError:
+                    pass
+            else:
+                policy_handler.handle(instance)
+    except:
+        logger.log_exc("Model Policy Error:")
+
+    try:
+        instance.policed=datetime.now()
         instance.save(update_fields=['policed'])
+    except:
+        logging.error('Object %r is defective'%instance)
+        bad_instances.append(instance)
 
 def run_policy():
-        from core.models import Sliver,Slice,Controller,Network,User,SlicePrivilege,Site,SitePrivilege,Image,ControllerSlice,ControllerUser,ControllerSite
-	while (True):
-		start = time.time()
-		models = [Sliver,Slice, Controller, Network, User, SlicePrivilege, Site, SitePrivilege, Image, ControllerSlice, ControllerSite, ControllerUser]
-		objects = []
-		
-		for m in models:
-        		res = m.objects.filter(Q(policed__lt=F('updated')) | Q(policed=None))
-			objects.extend(res)	
+    from core.models import Sliver,Slice,Controller,Network,User,SlicePrivilege,Site,SitePrivilege,Image,ControllerSlice,ControllerUser,ControllerSite
+    while (True):
+        start = time.time()
+        models = [Sliver,Slice, Controller, Network, User, SlicePrivilege, Site, SitePrivilege, Image, ControllerSlice, ControllerSite, ControllerUser]
+        objects = []
+        deleted_objects = []
 
-		for o in objects:
-			print "Working on %r"%o
-			execute_model_policy(o, False)
-		
-		
-		if (time.time()-start<1):
-			time.sleep(1)	
+        for m in models:
+            res = m.objects.filter(Q(policed__lt=F('updated')) | Q(policed=None))
+            objects.extend(res)
+            res = m.deleted_objects.filter(Q(policed__lt=F('updated')) | Q(policed=None))
+            deleted_objects.extend(res)
+
+        for o in objects:
+            execute_model_policy(o, o.deleted)
+
+        for o in deleted_objects:
+            execute_model_policy(o, True)
+
+
+        if (time.time()-start<1):
+            time.sleep(1)
diff --git a/xos/observers/vbng/model-deps b/xos/observers/vbng/model-deps
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/xos/observers/vbng/model-deps
@@ -0,0 +1 @@
+{}
diff --git a/xos/observers/vbng/run.sh b/xos/observers/vbng/run.sh
new file mode 100755
index 0000000..7829ac0
--- /dev/null
+++ b/xos/observers/vbng/run.sh
@@ -0,0 +1,6 @@
+if [[ ! -e ./vbng-observer.py ]]; then
+    ln -s ../../xos-observer.py vbng-observer.py
+fi
+
+export XOS_DIR=/opt/xos
+python vbng-observer.py  -C $XOS_DIR/observers/vbng/vbng_observer_config
diff --git a/xos/observers/vbng/start.sh b/xos/observers/vbng/start.sh
new file mode 100755
index 0000000..5ceff27
--- /dev/null
+++ b/xos/observers/vbng/start.sh
@@ -0,0 +1,6 @@
+if [[ ! -e ./vbng-observer.py ]]; then
+    ln -s ../../xos-observer.py vbng-observer.py
+fi
+
+export XOS_DIR=/opt/xos
+nohup python vbng-observer.py  -C $XOS_DIR/observers/vbng/vbng_observer_config > /dev/null 2>&1 &
diff --git a/xos/observers/vbng/steps/sync_vbngtenant.py b/xos/observers/vbng/steps/sync_vbngtenant.py
new file mode 100644
index 0000000..64494f8
--- /dev/null
+++ b/xos/observers/vbng/steps/sync_vbngtenant.py
@@ -0,0 +1,85 @@
+import os
+import requests
+import socket
+import sys
+import base64
+from django.db.models import F, Q
+from xos.config import Config
+from observer.syncstep import SyncStep
+from observer.ansible import run_template_ssh
+from core.models import Service
+from cord.models import VCPEService, VCPETenant, VBNGTenant, VBNGService
+from hpc.models import HpcService, CDNPrefix
+from util.logger import Logger, logging
+
+VBNG_API = "http://<vnbg-addr>/onos/virtualbng/privateip/"
+
+# hpclibrary will be in steps/..
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+logger = Logger(level=logging.INFO)
+
+class SyncVBNGTenant(SyncStep):
+    provides=[VCPETenant]
+    observes=VCPETenant
+    requested_interval=0
+
+    def __init__(self, **args):
+        SyncStep.__init__(self, **args)
+
+    def fetch_pending(self, deleted):
+        if (not deleted):
+            objs = VBNGTenant.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
+        else:
+            objs = VBNGTenant.get_deleted_tenant_objects()
+
+        return objs
+
+    def defer_sync(self, o, reason):
+        o.backend_register="{}"
+        o.backend_status = "2 - " + reason
+        o.save(update_fields=['enacted','backend_status','backend_register'])
+        logger.info("defer object %s due to %s" % (str(o), reason))
+
+    def sync_record(self, o):
+        logger.info("sync'ing VBNGTenant %s" % str(o))
+
+        vcpes = VCPETenant.get_tenant_objects().all()
+        vcpes = [x for x in vcpes if (x.vbng is not None) and (x.vbng.id == o.id)]
+        if not vcpes:
+            raise Exception("No vCPE tenant is associated with vBNG %s" % str(o.id))
+        if len(vcpes)>1:
+            raise Exception("More than one vCPE tenant is associated with vBNG %s" % str(o.id))
+
+        vcpe = vcpes[0]
+        sliver = vcpe.sliver
+
+        if not sliver:
+            raise Exception("No sliver associated with vBNG %s" % str(o.id))
+
+        external_ns = None
+        for ns in sliver.networkslivers.all():
+            if (ns.ip) and (ns.network.template.visibility=="private") and (ns.network.template.translation=="none"):
+                # need some logic here to find the right network
+                external_ns = ns
+
+        if not external_ns:
+            self.defer_sync(o, "private network is not filled in yet")
+            return
+
+        private_ip = external_ns.ip
+
+        if not o.routeable_subnet:
+            print "This is where we would call Pingping's API"
+            o.routeable_subnet = "placeholder-from-observer"
+
+            # r = requests.post(VBNG_API + "%s" % private_ip, )
+            # public_ip = r.json()
+            # o.routeable_subnet = public_ip
+
+        o.save()
+
+    def delete_record(self, m):
+        pass
+
diff --git a/xos/observers/vbng/stop.sh b/xos/observers/vbng/stop.sh
new file mode 100755
index 0000000..d49591e
--- /dev/null
+++ b/xos/observers/vbng/stop.sh
@@ -0,0 +1 @@
+pkill -9 -f vbng-observer.py
diff --git a/xos/observers/vbng/vbng_observer_config b/xos/observers/vbng/vbng_observer_config
new file mode 100644
index 0000000..217ce71
--- /dev/null
+++ b/xos/observers/vbng/vbng_observer_config
@@ -0,0 +1,37 @@
+
+[plc]
+name=plc
+deployment=VICCI
+
+[db]
+name=xos
+user=postgres
+password=password
+host=localhost
+port=5432
+
+[api]
+host=128.112.171.237
+port=8000
+ssl_key=None
+ssl_cert=None
+ca_ssl_cert=None
+ratelimit_enabled=0
+omf_enabled=0
+mail_support_address=support@localhost
+nova_enabled=True
+
+[observer]
+name=vbng
+dependency_graph=/opt/xos/observers/vbng/model-deps
+steps_dir=/opt/xos/observers/vbng/steps
+sys_dir=/opt/xos/observer/vbng/sys
+deleters_dir=/opt/xos/observers/vbng/deleters
+log_file=console
+#/var/log/hpc.log
+driver=None
+pretend=False
+
+[feefie]
+client_id='vicci_dev_central'
+user_id='pl'
diff --git a/xos/observers/vcpe/model-deps b/xos/observers/vcpe/model-deps
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/xos/observers/vcpe/model-deps
@@ -0,0 +1 @@
+{}
diff --git a/xos/observers/vcpe/run.sh b/xos/observers/vcpe/run.sh
old mode 100644
new mode 100755
diff --git a/xos/observers/vcpe/start.sh b/xos/observers/vcpe/start.sh
old mode 100644
new mode 100755
diff --git a/xos/observers/vcpe/steps/sync_vcpetenant.py b/xos/observers/vcpe/steps/sync_vcpetenant.py
index cc1d520..3fc9310 100644
--- a/xos/observers/vcpe/steps/sync_vcpetenant.py
+++ b/xos/observers/vcpe/steps/sync_vcpetenant.py
@@ -57,10 +57,15 @@
                             except:
                                 pass
 
-        volts = [x for x in VOLTTenant.get_tenant_objects() if x.vcpe.id==o.id]
+        cdn_prefixes = []
+        for prefix in CDNPrefix.objects.all():
+            cdn_prefixes.append(prefix.prefix)
+
+        volts = [x for x in VOLTTenant.get_tenant_objects() if (x.vcpe is not None) and (x.vcpe.id==o.id)]
         vlan_ids = [x.vlan_id for x in volts]
         return {"vlan_ids": vlan_ids,
-                "dnsdemux_ip": dnsdemux_ip}
+                "dnsdemux_ip": dnsdemux_ip,
+                "cdn_prefixes": cdn_prefixes}
 
     def get_sliver(self, o):
         # We need to know what slivers is associated with the object.
diff --git a/xos/observers/vcpe/steps/sync_vcpetenant.yaml b/xos/observers/vcpe/steps/sync_vcpetenant.yaml
index d45b38a..6c2bad4 100644
--- a/xos/observers/vcpe/steps/sync_vcpetenant.yaml
+++ b/xos/observers/vcpe/steps/sync_vcpetenant.yaml
@@ -16,6 +16,10 @@
         {% for firewall_rule in firewall_rules.split("\n") %}
         - {{ firewall_rule }}
         {% endfor %}
+      cdn_prefixes:
+        {% for prefix in cdn_prefixes %}
+        - {{ prefix }}
+        {% endfor %}
 
   tasks:
     - name: make sure /etc/dnsmasq.d exists
diff --git a/xos/observers/vcpe/stop.sh b/xos/observers/vcpe/stop.sh
old mode 100644
new mode 100755
diff --git a/xos/observers/vcpe/templates/dnsmasq_servers.j2 b/xos/observers/vcpe/templates/dnsmasq_servers.j2
index ac97035..359070a 100644
--- a/xos/observers/vcpe/templates/dnsmasq_servers.j2
+++ b/xos/observers/vcpe/templates/dnsmasq_servers.j2
@@ -3,7 +3,9 @@
 
 {% if cdn_enable %}
 # CDN 
-server=/foo.com/{{ dnsdemux_ip }}
+{% for prefix in cdn_prefixes %}
+server=/{{ prefix }}/{{ dnsdemux_ip }}
+{% endfor %}
 {% endif %}
 
 {% if url_filter_enable %}
diff --git a/xos/openstack_observer/ansible.py b/xos/openstack_observer/ansible.py
index cf6106a..17504b3 100644
--- a/xos/openstack_observer/ansible.py
+++ b/xos/openstack_observer/ansible.py
@@ -115,13 +115,13 @@
     (hosts_handle, hosts_pathname) = tempfile.mkstemp()
 
     try:
-        proxy_command = "ProxyCommand ssh -q -i %s %s@%s" % (private_key_pathname, instance_id, hostname)
+        proxy_command = "ProxyCommand ssh -q -i %s -o StrictHostKeyChecking=no %s@%s" % (private_key_pathname, instance_id, hostname)
 
         os.write(private_key_handle, private_key)
         os.close(private_key_handle)
 
         os.write(config_handle, "[ssh_connection]\n")
-        os.write(config_handle, 'ssh_args = -o "%s"\n' % proxy_command)
+        os.write(config_handle, 'ssh_args = -o "%s" -o StrictHostKeyChecking=no\n' % proxy_command)
         os.write(config_handle, 'scp_if_ssh = True\n')
         os.close(config_handle)
 
diff --git a/xos/openstack_observer/backend.py b/xos/openstack_observer/backend.py
index 23ec352..5657491 100644
--- a/xos/openstack_observer/backend.py
+++ b/xos/openstack_observer/backend.py
@@ -33,7 +33,6 @@
         #event_manager_thread = threading.Thread(target=event_manager.run)
         #event_manager_thread.start()
 
-        print "entering keyboard wait loop"
         while True:
             try:
                 time.sleep(1000)
@@ -42,6 +41,6 @@
                 # TODO: See about setting the threads as daemons
                 observer_thread._Thread__stop()
                 if model_policy_thread:
-                    model_policy_thread._Threat__stop()
+                    model_policy_thread._Thread__stop()
                 sys.exit(1)
 
diff --git a/xos/openstack_observer/event_loop.py b/xos/openstack_observer/event_loop.py
index 04b5c97..a63ff3c 100644
--- a/xos/openstack_observer/event_loop.py
+++ b/xos/openstack_observer/event_loop.py
@@ -193,7 +193,8 @@
 		pp.pprint(step_graph)
 		self.ordered_steps = toposort(self.dependency_graph, map(lambda s:s.__name__,self.sync_steps))
 		#self.ordered_steps = ['SyncRoles', 'SyncControllerSites', 'SyncControllerSitePrivileges','SyncImages', 'SyncControllerImages','SyncControllerUsers','SyncControllerUserSitePrivileges','SyncControllerSlices', 'SyncControllerSlicePrivileges', 'SyncControllerUserSlicePrivileges', 'SyncControllerNetworks','SyncSlivers']
-		#self.ordered_steps = ['SyncControllerSites','SyncControllerUsers','SyncControllerSlices','SyncControllerNetworks']
+		#self.ordered_steps = ['SyncControllerSites','SyncRoles','SyncControllerUsers','SyncControllerSlices','SyncControllerNetworks']
+		#self.ordered_steps = ['SyncControllerNetworks']
 		#self.ordered_steps = ['SyncSlivers','SyncNetworkSlivers']
 
 		print "Order of steps=",self.ordered_steps
diff --git a/xos/openstack_observer/run_ansible_verbose b/xos/openstack_observer/run_ansible_verbose
new file mode 100755
index 0000000..d17cad7
--- /dev/null
+++ b/xos/openstack_observer/run_ansible_verbose
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+source /opt/ansible/hacking/env-setup >> /dev/null
+ansible-playbook -vvv "$@"
diff --git a/xos/openstack_observer/steps/sync_controller_images.py b/xos/openstack_observer/steps/sync_controller_images.py
index 19009dd..94b18a0 100644
--- a/xos/openstack_observer/steps/sync_controller_images.py
+++ b/xos/openstack_observer/steps/sync_controller_images.py
@@ -8,6 +8,7 @@
 from core.models import Image, ControllerImages
 from util.logger import observer_logger as logger 
 from observer.ansible import *
+import json
 
 class SyncControllerImages(OpenStackSyncStep):
     provides=[ControllerImages]
@@ -23,6 +24,11 @@
 
     def sync_record(self, controller_image):
         logger.info("Working on image %s on controller %s" % (controller_image.image.name, controller_image.controller))
+
+	controller_register = json.loads(controller_image.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_image.controller.name)
+
         image_fields = {'endpoint':controller_image.controller.auth_url,
                         'admin_user':controller_image.controller.admin_user,
                         'admin_password':controller_image.controller.admin_password,
diff --git a/xos/openstack_observer/steps/sync_controller_networks.py b/xos/openstack_observer/steps/sync_controller_networks.py
index 8866e53..d327b7b 100644
--- a/xos/openstack_observer/steps/sync_controller_networks.py
+++ b/xos/openstack_observer/steps/sync_controller_networks.py
@@ -11,6 +11,7 @@
 from util.logger import observer_logger as logger
 from observer.ansible import *
 from openstack.driver import OpenStackDriver
+import json
 
 import pdb
 
@@ -44,7 +45,8 @@
                     'name':network_name,
                     'subnet_name':subnet_name,
                     'ansible_tag':'%s-%s@%s'%(network_name,slice.slicename,controller_network.controller.name),
-                    'cidr':cidr
+                    'cidr':cidr,
+                    'delete':False	
                     }
 
         res = run_template('sync_controller_networks.yaml', network_fields, path = 'controller_networks',expected_num=2)
@@ -61,6 +63,10 @@
     def sync_record(self, controller_network):
         logger.info("sync'ing network controller %s for network %s slice %s controller %s" % (controller_network, controller_network.network, str(controller_network.network.owner), controller_network.controller))
 
+	controller_register = json.loads(controller_network.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_network.controller.name)
+
         if not controller_network.controller.admin_user:
             logger.info("controller %r has no admin_user, skipping" % controller_network.controller)
             return
@@ -70,6 +76,32 @@
 	    logger.info("saved network controller: %s" % (controller_network))
 
     def delete_record(self, controller_network):
+	controller_register = json.loads(controller_network.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_network.controller.name)
+
+	try:
+        	slice = controller_network.network.owner # XXX: FIXME!!
+        except:
+                raise Exception('Could not get slice for Network %s'%controller_network.network.name)
+
+	network_name = controller_network.network.name
+        subnet_name = '%s-%d'%(network_name,controller_network.pk)
+	cidr = controller_network.subnet
+	network_fields = {'endpoint':controller_network.controller.auth_url,
+                    'admin_user':slice.creator.email, # XXX: FIXME
+                    'tenant_name':slice.name, # XXX: FIXME
+                    'admin_password':slice.creator.remote_password,
+                    'name':network_name,
+                    'subnet_name':subnet_name,
+                    'ansible_tag':'%s-%s@%s'%(network_name,slice.slicename,controller_network.controller.name),
+                    'cidr':cidr,
+		    'delete':True	
+                    }
+
+        res = run_template('sync_controller_networks.yaml', network_fields, path = 'controller_networks',expected_num=1)
+
+	"""
         driver = OpenStackDriver().client_driver(caller=controller_network.network.owner.creator,
                                                  tenant=controller_network.network.owner.name,
                                                  controller=controller_network.controller.name)
@@ -81,3 +113,4 @@
             driver.delete_router(controller_network.router_id)
         if controller_network.net_id:
             driver.delete_network(controller_network.net_id)
+	"""
diff --git a/xos/openstack_observer/steps/sync_controller_networks.yaml b/xos/openstack_observer/steps/sync_controller_networks.yaml
index 8f0d4c1..6754c47 100644
--- a/xos/openstack_observer/steps/sync_controller_networks.yaml
+++ b/xos/openstack_observer/steps/sync_controller_networks.yaml
@@ -15,6 +15,7 @@
         state=present
         {% endif %}
         shared=true
+  {% if not delete %}
   - quantum_subnet:
         auth_url={{ endpoint }} 
         login_username={{ admin_user }}
@@ -30,3 +31,4 @@
         no_gateway=true 
         cidr={{ cidr }}
         {% endif %}
+  {% endif %}
diff --git a/xos/openstack_observer/steps/sync_controller_site_privileges.py b/xos/openstack_observer/steps/sync_controller_site_privileges.py
index 499a0ff..6a13736 100644
--- a/xos/openstack_observer/steps/sync_controller_site_privileges.py
+++ b/xos/openstack_observer/steps/sync_controller_site_privileges.py
@@ -9,6 +9,7 @@
 from core.models.controlleruser import ControllerUser, ControllerSitePrivilege
 from util.logger import observer_logger as logger
 from observer.ansible import *
+import json
 
 class SyncControllerSitePrivileges(OpenStackSyncStep):
     provides=[SitePrivilege]
@@ -25,6 +26,11 @@
     def sync_record(self, controller_site_privilege):
         logger.info("sync'ing controler_site_privilege %s at controller %s" % (controller_site_privilege, controller_site_privilege.controller))
 
+	controller_register = json.loads(controller_site_privilege.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_site_privilege.controller.name)
+
+
         if not controller_site_privilege.controller.admin_user:
             logger.info("controller %r has no admin_user, skipping" % controller_site_privilege.controller)
             return
@@ -68,6 +74,10 @@
             controller_site_privilege.save()
 
     def delete_record(self, controller_site_privilege):
+	controller_register = json.loads(controller_site_privilege.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_site_privilege.controller.name)
+
         if controller_site_privilege.role_id:
             driver = self.driver.admin_driver(controller=controller_site_privilege.controller)
             user = ControllerUser.objects.get(
diff --git a/xos/openstack_observer/steps/sync_controller_sites.py b/xos/openstack_observer/steps/sync_controller_sites.py
index acb6ba9..f101315 100644
--- a/xos/openstack_observer/steps/sync_controller_sites.py
+++ b/xos/openstack_observer/steps/sync_controller_sites.py
@@ -6,6 +6,7 @@
 from core.models.site import *
 from observer.ansible import *
 from util.logger import observer_logger as logger
+import json
 
 class SyncControllerSites(OpenStackSyncStep):
     requested_interval=0
@@ -17,6 +18,10 @@
         return pending.filter(controller__isnull=False)
 
     def sync_record(self, controller_site):
+	controller_register = json.loads(controller_site.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_site.controller.name)
+
 	template = os_template_env.get_template('sync_controller_sites.yaml')
 	tenant_fields = {'endpoint':controller_site.controller.auth_url,
 		         'admin_user': controller_site.controller.admin_user,
@@ -34,6 +39,10 @@
         controller_site.save()
             
     def delete_record(self, controller_site):
+	controller_register = json.loads(controller_site.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_site.controller.name)
+
 	if controller_site.tenant_id:
             driver = self.driver.admin_driver(controller=controller_site.controller)
             driver.delete_tenant(controller_site.tenant_id)
diff --git a/xos/openstack_observer/steps/sync_controller_slice_privileges.py b/xos/openstack_observer/steps/sync_controller_slice_privileges.py
index f1600ca..38f23c2 100644
--- a/xos/openstack_observer/steps/sync_controller_slice_privileges.py
+++ b/xos/openstack_observer/steps/sync_controller_slice_privileges.py
@@ -9,6 +9,7 @@
 from core.models.controlleruser import ControllerUser, ControllerSlicePrivilege
 from observer.ansible import *
 from util.logger import observer_logger as logger
+import json
 
 class SyncControllerSlicePrivileges(OpenStackSyncStep):
     provides=[SlicePrivilege]
@@ -25,6 +26,10 @@
     def sync_record(self, controller_slice_privilege):
         logger.info("sync'ing controler_slice_privilege %s at controller %s" % (controller_slice_privilege, controller_slice_privilege.controller))
 
+	controller_register = json.loads(controller_slice_privilege.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_slice_privilege.controller.name)
+
         if not controller_slice_privilege.controller.admin_user:
             logger.info("controller %r has no admin_user, skipping" % controller_slice_privilege.controller)
             return
@@ -68,6 +73,10 @@
             controller_slice_privilege.save()
 
     def delete_record(self, controller_slice_privilege):
+	controller_register = json.loads(controller_slice_privilege.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_slice_privilege.controller.name)
+
         if controller_slice_privilege.role_id:
             driver = self.driver.admin_driver(controller=controller_slice_privilege.controller)
             user = ControllerUser.objects.get(
diff --git a/xos/openstack_observer/steps/sync_controller_slices.py b/xos/openstack_observer/steps/sync_controller_slices.py
index 591a1b6..8d4a5e0 100644
--- a/xos/openstack_observer/steps/sync_controller_slices.py
+++ b/xos/openstack_observer/steps/sync_controller_slices.py
@@ -5,12 +5,11 @@
 from django.db.models import F, Q
 from xos.config import Config
 from observer.openstacksyncstep import OpenStackSyncStep
-from core.models import User
-from core.models.slice import Slice, ControllerSlice
-from core.models.controlleruser import ControllerUser
+from core.models import *
 from observer.ansible import *
 from openstack.driver import OpenStackDriver
 from util.logger import observer_logger as logger
+import json
 
 class SyncControllerSlices(OpenStackSyncStep):
     provides=[Slice]
@@ -26,6 +25,10 @@
     def sync_record(self, controller_slice):
         logger.info("sync'ing slice controller %s" % controller_slice)
 
+        controller_register = json.loads(controller_slice.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+            raise Exception('Controller %s is disabled'%controller_slice.controller.name)
+
         if not controller_slice.controller.admin_user:
             logger.info("controller %r has no admin_user, skipping" % controller_slice.controller)
             return
@@ -55,31 +58,38 @@
         tenant_id = res[0]['id']
         if (not controller_slice.tenant_id):
             try:
-                    driver = OpenStackDriver().admin_driver(controller=controller_slice.controller)
-                    driver.shell.nova.quotas.update(tenant_id=controller_slice.tenant_id, instances=int(controller_slice.slice.max_slivers))
+                driver = OpenStackDriver().admin_driver(controller=controller_slice.controller)
+                driver.shell.nova.quotas.update(tenant_id=controller_slice.tenant_id, instances=int(controller_slice.slice.max_slivers))
             except:
-                    logger.log_exc('Could not update quota for %s'%controller_slice.slice.name)
-                    raise Exception('Could not update quota for %s'%controller_slice.slice.name)
-                
+                logger.log_exc('Could not update quota for %s'%controller_slice.slice.name)
+                raise Exception('Could not update quota for %s'%controller_slice.slice.name)
+
             controller_slice.tenant_id = tenant_id
             controller_slice.backend_status = '1 - OK'
             controller_slice.save()
 
 
     def delete_record(self, controller_slice):
-        user = User.objects.get(id=controller_slice.slice.creator.id)
-        driver = OpenStackDriver().admin_driver(controller=controller_slice.controller)
-        client_driver = driver.client_driver(caller=user,
-                                             tenant=controller_slice.slice.name,
-                                             controller=controller_slice.controller)
+        controller_register = json.loads(controller_slice.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+            raise Exception('Controller %s is disabled'%controller_slice.controller.name)
 
-        if controller_slice.router_id and controller_slice.subnet_id:
-            client_driver.delete_router_interface(controller_slice.router_id, controller_slice.subnet_id)
-        if controller_slice.subnet_id:
-            client_driver.delete_subnet(controller_slice.subnet_id)
-        if controller_slice.router_id:
-            client_driver.delete_router(controller_slice.router_id)
-        if controller_slice.network_id:
-            client_driver.delete_network(controller_slice.network_id)
-        if controller_slice.tenant_id:
-            driver.delete_tenant(controller_slice.tenant_id)
+        controller_users = ControllerUser.objects.filter(user=controller_slice.slice.creator,
+                                                              controller=controller_slice.controller)
+        if not controller_users:
+            raise Exception("slice createor %s has not accout at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name))
+        else:
+            controller_user = controller_users[0]
+
+        tenant_fields = {'endpoint':controller_slice.controller.auth_url,
+                          'admin_user': controller_slice.controller.admin_user,
+                          'admin_password': controller_slice.controller.admin_password,
+                          'admin_tenant': 'admin',
+                          'tenant': controller_slice.slice.name,
+                          'tenant_description': controller_slice.slice.description,
+                          'name':controller_user.user.email,
+                          'ansible_tag':'%s@%s'%(controller_slice.slice.name,controller_slice.controller.name),
+                          'delete': True}
+
+        expected_num = 1
+        run_template('sync_controller_slices.yaml', tenant_fields, path='controller_slices', expected_num=expected_num)
diff --git a/xos/openstack_observer/steps/sync_controller_slices.yaml b/xos/openstack_observer/steps/sync_controller_slices.yaml
index de1caf4..380f001 100644
--- a/xos/openstack_observer/steps/sync_controller_slices.yaml
+++ b/xos/openstack_observer/steps/sync_controller_slices.yaml
@@ -2,7 +2,11 @@
 - hosts: 127.0.0.1
   connection: local
   tasks:
+  {% if delete %}
+  - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}" state=absent
+  {% else %}	
   - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}"
   {% for role in roles %}
   - keystone_user: endpoint={{ endpoint}} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} user="{{ name }}" role={{ role }} tenant={{ tenant }}
   {% endfor %}
+  {% endif %} 
diff --git a/xos/openstack_observer/steps/sync_controller_users.py b/xos/openstack_observer/steps/sync_controller_users.py
index acb3050..47d1096 100644
--- a/xos/openstack_observer/steps/sync_controller_users.py
+++ b/xos/openstack_observer/steps/sync_controller_users.py
@@ -9,6 +9,7 @@
 from core.models.controlleruser import ControllerUser
 from observer.ansible import *
 from util.logger import observer_logger as logger
+import json
 
 class SyncControllerUsers(OpenStackSyncStep):
     provides=[User]
@@ -25,6 +26,10 @@
     def sync_record(self, controller_user):
         logger.info("sync'ing user %s at controller %s" % (controller_user.user, controller_user.controller))
 
+	controller_register = json.loads(controller_user.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_user.controller.name)
+
         if not controller_user.controller.admin_user:
             logger.info("controller %r has no admin_user, skipping" % controller_user.controller)
             return
@@ -72,6 +77,10 @@
             controller_user.save()
 
     def delete_record(self, controller_user):
+	controller_register = json.loads(controller_user.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_user.controller.name)
+
         if controller_user.kuser_id:
             driver = self.driver.admin_driver(controller=controller_user.controller)
             driver.delete_user(controller_user.kuser_id)
diff --git a/xos/openstack_observer/steps/sync_slivers.py b/xos/openstack_observer/steps/sync_slivers.py
index 9ee7cfa..9b5dd99 100644
--- a/xos/openstack_observer/steps/sync_slivers.py
+++ b/xos/openstack_observer/steps/sync_slivers.py
@@ -29,6 +29,10 @@
 
     def sync_record(self, sliver):
         logger.info("sync'ing sliver:%s slice:%s controller:%s " % (sliver, sliver.slice.name, sliver.node.site_deployment.controller))
+        controller_register = json.loads(sliver.node.site_deployment.controller.backend_register)
+
+        if (controller_register.get('disabled',False)):
+            raise Exception('Controller %s is disabled'%sliver.node.site_deployment.controller.name)
 
         metadata_update = {}
         if (sliver.numberCores):
@@ -143,6 +147,11 @@
         sliver.save()
 
     def delete_record(self, sliver):
+        controller_register = json.loads(sliver.node.site_deployment.controller.backend_register)
+
+        if (controller_register.get('disabled',False)):
+            raise Exception('Controller %s is disabled'%sliver.node.site_deployment.controller.name)
+
         sliver_name = '%s-%d'%(sliver.slice.name,sliver.id)
         controller = sliver.node.site_deployment.controller
         tenant_fields = {'endpoint':controller.auth_url,
@@ -156,12 +165,12 @@
                      'delete': True}
 
         try:
-               res = run_template('sync_slivers.yaml', tenant_fields,path='slivers', expected_num=1)

+            res = run_template('sync_slivers.yaml', tenant_fields,path='slivers', expected_num=1)

         except Exception,e:

-               print "Could not sync %s"%sliver_name

-               #import traceback

-               #traceback.print_exc()

-               raise e

+            print "Could not sync %s"%sliver_name

+            #import traceback

+            #traceback.print_exc()

+            raise e

 
         if (len(res)!=1):
             raise Exception('Could not delete sliver %s'%sliver.slice.name)
diff --git a/xos/openstack_observer/syncstep.py b/xos/openstack_observer/syncstep.py
index a4d591d..b752760 100644
--- a/xos/openstack_observer/syncstep.py
+++ b/xos/openstack_observer/syncstep.py
@@ -108,7 +108,6 @@
                     next_run = scratchpad['next_run']
                     if (not backoff_disabled and next_run>time.time()):
                         sync_failed = True
-                        print "BACKING OFF, exponent = %d"%scratchpad['exponent']
             except:
                 pass
 
@@ -176,5 +175,11 @@
 
         return failed
 
+    def sync_record(self, o):
+        return
+
+    def delete_record(self, o):
+        return
+
     def __call__(self, **args):
         return self.call(**args)
diff --git a/xos/scripts/opencloud b/xos/scripts/opencloud
index 4a82c03..94edc46 100755
--- a/xos/scripts/opencloud
+++ b/xos/scripts/opencloud
@@ -135,6 +135,7 @@
     python ./manage.py makemigrations hpc
     python ./manage.py makemigrations requestrouter
     python ./manage.py makemigrations syndicate_storage
+    python ./manage.py makemigrations cord
     #python ./manage.py makemigrations servcomp
 }
 
diff --git a/xos/tools/cleanup_unique.py b/xos/tools/cleanup_unique.py
new file mode 100644
index 0000000..0ee0c7a
--- /dev/null
+++ b/xos/tools/cleanup_unique.py
@@ -0,0 +1,45 @@
+import os
+import sys
+sys.path.append("/opt/xos")
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
+import django
+from core.models import *
+from hpc.models import *
+from cord.models import *
+django.setup()
+
+
+for obj in ControllerNetwork.objects.all():
+     conflicts = ControllerNetwork.objects.filter(network=obj.network, controller=obj.controller)
+     for conflict in conflicts:
+         if conflict.id != obj.id:
+             print "Purging", conflict
+             conflict.delete(purge=True)
+
+for obj in NetworkSlice.objects.all():
+     conflicts = NetworkSlice.objects.filter(network=obj.network, slice=obj.slice)
+     for conflict in conflicts:
+         if conflict.id != obj.id:
+             print "Purging", conflict        
+             conflict.delete(purge=True)
+
+for obj in NetworkSliver.objects.all():
+     conflicts = NetworkSliver.objects.filter(network=obj.network, sliver=obj.sliver)
+     for conflict in conflicts:
+         if conflict.id != obj.id:
+             print "Purging", conflict 
+             conflict.delete(purge=True)
+
+for obj in DeploymentPrivilege.objects.all():
+     conflicts = DeploymentPrivilege.objects.filter(user=obj.user, deployment=obj.deployment, role=obj.role)
+     for conflict in conflicts:
+         if conflict.id != obj.id:
+             print "Purging", conflict 
+             conflict.delete(purge=True)
+
+for obj in SiteDeployment.objects.all():
+     conflicts = SiteDeployment.objects.filter(site=obj.site, deployment=obj.deployment, controller=obj.controller)
+     for conflict in conflicts:
+         if conflict.id != obj.id:
+             print "Purging", conflict 
+             conflict.delete(purge=True)
diff --git a/xos/xos/exceptions.py b/xos/xos/exceptions.py
index c13de49..9ce38a3 100644
--- a/xos/xos/exceptions.py
+++ b/xos/xos/exceptions.py
@@ -20,6 +20,13 @@
                             "specific_error": why,
                             "fields": fields})
 
+class XOSNotFound(RestFrameworkPermissionDenied):
+    status_code=404
+    def __init__(self, why="object not found", fields={}):
+        APIException.__init__(self, {"error": "XOSNotFound",
+                            "specific_error": why,
+                            "fields": fields})
+
 class XOSValidationError(APIException):
     status_code=403
     def __init__(self, why="validation error", fields={}):
diff --git a/xos/xos/settings.py b/xos/xos/settings.py
index 07b4843..26ebbf2 100644
--- a/xos/xos/settings.py
+++ b/xos/xos/settings.py
@@ -145,6 +145,7 @@
     'django_evolution',
     'core',
     'hpc',
+    'cord',
     'requestrouter',
 #    'urlfilter',
 #    'servcomp',