Merge branch 'master' of github.com:open-cloud/xos
diff --git a/Dockerfile b/Dockerfile
index b722a3c..dafc5d9 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -23,6 +23,7 @@
 RUN DEBIAN_FRONTEND=noninteractive apt-get install -y wget
 RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-dev
 RUN DEBIAN_FRONTEND=noninteractive apt-get install -y libyaml-dev
+RUN DEBIAN_FRONTEND=noninteractive apt-get install -y pkg-config
 
 RUN pip install django==1.7
 RUN pip install djangorestframework==2.4.4
@@ -42,6 +43,7 @@
 RUN pip install django-ipware
 RUN pip install django-encrypted-fields
 RUN pip install python-keyczar
+RUN pip install pygraphviz
 
 RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-keystoneclient
 RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-novaclient
diff --git a/xos.spec b/xos.spec
index 435285c..c585e7e 100644
--- a/xos.spec
+++ b/xos.spec
@@ -1,7 +1,7 @@
 Summary: OpenCloud core services
 Name: xos
 Version: 1.2.0
-Release: 4
+Release: 5
 License: GPL+
 Group: Development/Tools
 Source0: %{_tmppath}/%{name}-%{version}.tar.gz
diff --git a/xos/cord/models.py b/xos/cord/models.py
index a0f712d..346fcc9 100644
--- a/xos/cord/models.py
+++ b/xos/cord/models.py
@@ -1,5 +1,5 @@
 from django.db import models
-from core.models import Service, PlCoreBase, Slice, Sliver, Tenant, Node, Image
+from core.models import Service, PlCoreBase, Slice, Sliver, Tenant, Node, Image, User
 from core.models.plcorebase import StrippedCharField
 import os
 from django.db import models
@@ -24,17 +24,27 @@
 t.caller = User.objects.all()[0]
 t.save()
 
-for v in VOLTTenant.objects.all():
+for v in VOLTTenant.get_tenant_objects().all():
     v.caller = User.objects.all()[0]
     v.delete()
 
-for v in VCPETenant.objects.all():
+for v in VCPETenant.get_tenant_objects().all():
     v.caller = User.objects.all()[0]
     v.delete()
 
-for v in VOLTTenant.objects.all():
+for v in VOLTTenant.get_tenant_objects().all():
     v.caller = User.objects.all()[0]
     v.delete()
+
+for v in VOLTTenant.get_tenant_objects().all():
+    if not v.creator:
+        v.creator= User.objects.all()[0]
+        v.save()
+
+for v in VCPETenant.get_tenant_objects().all():
+    if not v.creator:
+        v.creator= User.objects.all()[0]
+        v.save()
 """
 
 class ConfigurationError(Exception):
@@ -85,7 +95,7 @@
         if not vcpes:
             return None
         vcpe=vcpes[0]
-        vcpe.caller = getattr(self, "caller", None)
+        vcpe.caller = self.creator
         self.cached_vcpe = vcpe
         return vcpe
 
@@ -97,6 +107,28 @@
             self.cached_vcpe=None
         self.set_attribute("vcpe_id", value)
 
+    @property
+    def creator(self):
+        if getattr(self, "cached_creator", None):
+            return self.cached_creator
+        creator_id=self.get_attribute("creator_id")
+        if not creator_id:
+            return None
+        users=User.objects.filter(id=creator_id)
+        if not users:
+            return None
+        user=users[0]
+        self.cached_creator = users[0]
+        return user
+
+    @creator.setter
+    def creator(self, value):
+        if value:
+            value = value.id
+        if (value != self.get_attribute("creator_id", None)):
+            self.cached_creator=None
+        self.set_attribute("creator_id", value)
+
     def manage_vcpe(self):
         # Each VOLT object owns exactly one VCPE object
 
@@ -110,7 +142,7 @@
 
             vcpe = VCPETenant(provider_service = vcpeServices[0],
                               subscriber_tenant = self)
-            vcpe.caller = self.caller
+            vcpe.caller = self.creator
             vcpe.save()
 
             try:
@@ -128,8 +160,14 @@
     def save(self, *args, **kwargs):
         self.validate_unique_service_specific_id()
 
-        if not getattr(self, "caller", None):
-            raise XOSProgrammingError("VOLTTenant's self.caller was not set")
+        if not self.creator:
+            if not getattr(self, "caller", None):
+                # caller must be set when creating a vCPE since it creates a slice
+                raise XOSProgrammingError("VOLTTenant's self.caller was not set")
+            self.creator = self.caller
+            if not self.creator:
+                raise XOSProgrammingError("VOLTTenant's self.creator was not set")
+
         super(VOLTTenant, self).save(*args, **kwargs)
         self.manage_vcpe()
 
@@ -155,6 +193,12 @@
 
     KIND = "vCPE"
 
+    sync_attributes = ("firewall_enable",
+                       "firewall_rules",
+                       "url_filter_enable",
+                       "url_filter_rules",
+                       "cdn_enable")
+
     default_attributes = {"firewall_enable": False,
                           "firewall_rules": "accept all anywhere anywhere",
                           "url_filter_enable": False,
@@ -190,7 +234,7 @@
         if not slivers:
             return None
         sliver=slivers[0]
-        sliver.caller = getattr(self, "caller", None)
+        sliver.caller = self.creator
         self.cached_sliver = sliver
         return sliver
 
@@ -203,6 +247,28 @@
         self.set_attribute("sliver_id", value)
 
     @property
+    def creator(self):
+        if getattr(self, "cached_creator", None):
+            return self.cached_creator
+        creator_id=self.get_attribute("creator_id")
+        if not creator_id:
+            return None
+        users=User.objects.filter(id=creator_id)
+        if not users:
+            return None
+        user=users[0]
+        self.cached_creator = users[0]
+        return user
+
+    @creator.setter
+    def creator(self, value):
+        if value:
+            value = value.id
+        if (value != self.get_attribute("creator_id", None)):
+            self.cached_creator=None
+        self.set_attribute("creator_id", value)
+
+    @property
     def vbng(self):
         if getattr(self, "cached_vbng", None):
             return self.cached_vbng
@@ -213,7 +279,7 @@
         if not vbngs:
             return None
         vbng=vbngs[0]
-        vbng.caller = getattr(self, "caller", None)
+        vbng.caller = self.creator
         self.cached_vbng = vbng
         return vbng
 
@@ -281,15 +347,16 @@
         if (self.sliver is not None) and (self.sliver.image != self.image):
             self.sliver.delete()
             self.sliver = None
+
         if self.sliver is None:
             if not self.provider_service.slices.count():
-                raise XOSConfigurationError("The VCPE service has no slicers")
+                raise XOSConfigurationError("The VCPE service has no slices")
 
             node =self.pick_node()
             sliver = Sliver(slice = self.provider_service.slices.all()[0],
                             node = node,
                             image = self.image,
-                            creator = self.caller,
+                            creator = self.creator,
                             deployment = node.site_deployment.deployment)
             sliver.save()
 
@@ -318,7 +385,7 @@
 
             vbng = VBNGTenant(provider_service = vbngServices[0],
                               subscriber_tenant = self)
-            vbng.caller = self.caller
+            vbng.caller = self.creator
             vbng.save()
 
             try:
@@ -334,8 +401,14 @@
             self.vbng = None
 
     def save(self, *args, **kwargs):
-        if not getattr(self, "caller", None):
-            raise XOSProgrammingError("VCPETenant's self.caller was not set")
+        if not self.creator:
+            if not getattr(self, "caller", None):
+                # caller must be set when creating a vCPE since it creates a slice
+                raise XOSProgrammingError("VCPETenant's self.caller was not set")
+            self.creator = self.caller
+            if not self.creator:
+                raise XOSProgrammingError("VCPETenant's self.creator was not set")
+
         super(VCPETenant, self).save(*args, **kwargs)
         self.manage_sliver()
         self.manage_vbng()
diff --git a/xos/cord/templates/vbngadmin.html b/xos/cord/templates/vbngadmin.html
new file mode 100644
index 0000000..721f76c
--- /dev/null
+++ b/xos/cord/templates/vbngadmin.html
@@ -0,0 +1,6 @@
+<div class = "left-nav">
+<ul>
+<li><a href="/admin/cord/vbngtenant/">vBNG Tenants</a></li>
+</ul>
+</div>
+
diff --git a/xos/cord/templates/vcpeadmin.html b/xos/cord/templates/vcpeadmin.html
new file mode 100644
index 0000000..7a5d43f
--- /dev/null
+++ b/xos/cord/templates/vcpeadmin.html
@@ -0,0 +1,7 @@
+<div class = "left-nav">
+<ul>
+<li><a href="/admin/cord/vcpetenant/">vCPE Tenants</a></li>
+<li><a href="/admin/dashboard/cord/">Subscriber View</a></li>
+</ul>
+</div>
+
diff --git a/xos/cord/templates/voltadmin.html b/xos/cord/templates/voltadmin.html
index 6a583d6..5bf28ff 100644
--- a/xos/cord/templates/voltadmin.html
+++ b/xos/cord/templates/voltadmin.html
@@ -1,8 +1,6 @@
 <div class = "left-nav">
 <ul>
-{% for admin in registered_admins %}
-    <li><a href="{{ admin.url }}">{{ admin.name }}</a></li>
-{% endfor %}
+<li><a href="/admin/cord/volttenant/">vOLT Tenants</a></li>
 </ul>
 </div>
 
diff --git a/xos/core/admin.py b/xos/core/admin.py
index b3cee88..1ded815 100644
--- a/xos/core/admin.py
+++ b/xos/core/admin.py
@@ -57,6 +57,31 @@
                            flatatt(final_attrs),
                            force_text(value))
 
+class SliderWidget(forms.HiddenInput):
+    def render(self, name, value,  attrs=None):
+        if value is None:
+            value = '0'
+        final_attrs = self.build_attrs(attrs, name=name)
+        attrs = attrs or attrs[:]
+        attrs["name"] = name
+        attrs["value"] = value
+        html = """<div style="width:640px"><span id="%(id)s_label">%(value)s</span><div id="%(id)s_slider" style="float:right;width:610px;margin-top:5px"></div></div>
+                              <script>
+                                  $(function() {
+                                      $("#%(id)s_slider").slider({
+                                         value: %(value)s,
+                                         slide: function(event, ui) { $("#%(id)s").val( ui.value ); $("#%(id)s_label").html(ui.value); },
+                                         });
+                                  });
+                              </script>
+                              <input type="hidden" id="%(id)s" name="%(name)s" value="%(value)s"></input>
+                           """ % attrs
+        html = html.replace("{","{{").replace("}","}}")
+        return format_html(html,
+                           flatatt(final_attrs),
+                           force_text(value))
+
+
 class PlainTextWidget(forms.HiddenInput):
     input_type = 'hidden'
 
@@ -721,6 +746,32 @@
 
         return tabs
 
+class ProviderTenantInline(XOSTabularInline):
+    model = CoarseTenant
+    fields = ['provider_service', 'subscriber_service', 'connect_method']
+    extra = 0
+    suit_classes = 'suit-tab suit-tab-servicetenants'
+    fk_name = 'provider_service'
+    verbose_name = 'provided tenant'
+    verbose_name_plural = 'provided tenants'
+
+    def queryset(self, request):
+        qs = super(ProviderTenantInline, self).queryset(request)
+        return qs.filter(kind="coarse")
+
+class SubscriberTenantInline(XOSTabularInline):
+    model = CoarseTenant
+    fields = ['provider_service', 'subscriber_service', 'connect_method']
+    extra = 0
+    suit_classes = 'suit-tab suit-tab-servicetenants'
+    fk_name = 'subscriber_service'
+    verbose_name = 'subscribed tenant'
+    verbose_name_plural = 'subscribed tenants'
+
+    def queryset(self, request):
+        qs = super(SubscriberTenantInline, self).queryset(request)
+        return qs.filter(kind="coarse")
+
 class ServiceAttrAsTabInline(XOSTabularInline):
     model = ServiceAttribute
     fields = ['name','value']
@@ -730,9 +781,9 @@
 class ServiceAdmin(XOSBaseAdmin):
     list_display = ("backend_status_icon","name","kind","versionNumber","enabled","published")
     list_display_links = ('backend_status_icon', 'name', )
-    fieldList = ["backend_status_text","name","kind","description","versionNumber","enabled","published","view_url","icon_url"]
+    fieldList = ["backend_status_text","name","kind","description","versionNumber","enabled","published","view_url","icon_url","public_key"]
     fieldsets = [(None, {'fields': fieldList, 'classes':['suit-tab suit-tab-general']})]
-    inlines = [ServiceAttrAsTabInline,SliceInline]
+    inlines = [ServiceAttrAsTabInline,SliceInline,ProviderTenantInline,SubscriberTenantInline]
     readonly_fields = ('backend_status_text', )
 
     user_readonly_fields = fieldList
@@ -740,6 +791,7 @@
     suit_form_tabs =(('general', 'Service Details'),
         ('slices','Slices'),
         ('serviceattrs','Additional Attributes'),
+        ('servicetenants','Tenancy'),
     )
 
 class SiteNodeInline(XOSTabularInline):
diff --git a/xos/core/models/__init__.py b/xos/core/models/__init__.py
index 928679b..81bf4cc 100644
--- a/xos/core/models/__init__.py
+++ b/xos/core/models/__init__.py
@@ -1,7 +1,7 @@
 from .plcorebase import PlCoreBase,PlCoreBaseManager,PlCoreBaseDeletionManager,PlModelMixIn
 from .project import Project
 from .singletonmodel import SingletonModel
-from .service import Service, Tenant
+from .service import Service, Tenant, CoarseTenant
 from .service import ServiceAttribute
 from .tag import Tag
 from .role import Role
diff --git a/xos/core/models/plcorebase.py b/xos/core/models/plcorebase.py
index b9a2345..5e3e287 100644
--- a/xos/core/models/plcorebase.py
+++ b/xos/core/models/plcorebase.py
@@ -185,7 +185,8 @@
             if (not self.write_protect):
                 self.deleted = True
                 self.enacted=None
-                self.save(update_fields=['enacted','deleted'], silent=silent)
+                self.policed=None
+                self.save(update_fields=['enacted','deleted','policed'], silent=silent)
 
 
     def save(self, *args, **kwargs):
diff --git a/xos/core/models/service.py b/xos/core/models/service.py
index afb4949..8a10f37 100644
--- a/xos/core/models/service.py
+++ b/xos/core/models/service.py
@@ -16,6 +16,7 @@
     published = models.BooleanField(default=True)
     view_url = StrippedCharField(blank=True, null=True, max_length=1024)
     icon_url = StrippedCharField(blank=True, null=True, max_length=1024)
+    public_key = models.TextField(null=True, blank=True, max_length=1024, help_text="Public key string")
 
     def __init__(self, *args, **kwargs):
         # for subclasses, set the default kind appropriately
@@ -28,6 +29,79 @@
 
     def __unicode__(self): return u'%s' % (self.name)
 
+    def get_scalable_nodes(self, slice, max_per_node=None, exclusive_slices=[]):
+        """
+             Get a list of nodes that can be used to scale up a slice.
+
+                slice - slice to scale up
+                max_per_node - maximum numbers of slivers that 'slice' can have on a single node
+                exclusive_slices - list of slices that must have no nodes in common with 'slice'.
+        """
+
+        from core.models import Node, Sliver # late import to get around order-of-imports constraint in __init__.py
+
+        nodes = list(Node.objects.all())
+
+        conflicting_slivers = Sliver.objects.filter(slice__in = exclusive_slices)
+        conflicting_nodes = Node.objects.filter(slivers__in = conflicting_slivers)
+
+        nodes = [x for x in nodes if x not in conflicting_nodes]
+
+        # If max_per_node is set, then limit the number of slivers this slice
+        # can have on a single node.
+        if max_per_node:
+            acceptable_nodes = []
+            for node in nodes:
+                existing_count = node.slivers.filter(slice=slice).count()
+                if existing_count < max_per_node:
+                    acceptable_nodes.append(node)
+            nodes = acceptable_nodes
+
+        return nodes
+
+    def pick_node(self, slice, max_per_node=None, exclusive_slices=[]):
+        # Pick the best node to scale up a slice.
+
+        nodes = self.get_scalable_nodes(slice, max_per_node, exclusive_slices)
+        nodes = sorted(nodes, key=lambda node: node.slivers.all().count())
+        if not nodes:
+            return None
+        return nodes[0]
+
+    def adjust_scale(self, slice_hint, scale, max_per_node=None, exclusive_slices=[]):
+        from core.models import Sliver # late import to get around order-of-imports constraint in __init__.py
+
+        slices = [x for x in self.slices.all() if slice_hint in x.name]
+        for slice in slices:
+            while slice.slivers.all().count() > scale:
+                s = slice.slivers.all()[0]
+                # print "drop sliver", s
+                s.delete()
+
+            while slice.slivers.all().count() < scale:
+                node = self.pick_node(slice, max_per_node, exclusive_slices)
+                if not node:
+                    # no more available nodes
+                    break
+
+                image = slice.default_image
+                if not image:
+                    raise XOSConfigurationError("No default_image for slice %s" % slice.name)
+
+                flavor = slice.default_flavor
+                if not flavor:
+                    raise XOSConfigurationError("No default_flavor for slice %s" % slice.name)
+
+                s = Sliver(slice=slice,
+                           node=node,
+                           creator=slice.creator,
+                           image=image,
+                           flavor=flavor,
+                           deployment=node.site_deployment.deployment)
+                s.save()
+
+                # print "add sliver", s
+
 class ServiceAttribute(PlCoreBase):
     name = models.SlugField(help_text="Attribute Name", max_length=128)
     value = StrippedCharField(help_text="Attribute Value", max_length=1024)
@@ -42,6 +116,8 @@
         The provider is always a Service.
     """
 
+    CONNECTIVITY_CHOICES = (('public', 'Public'), ('private', 'Private'), ('na', 'Not Applicable'))
+
     # when subclassing a service, redefine KIND to describe the new service
     KIND = "generic"
 
@@ -50,8 +126,9 @@
     subscriber_service = models.ForeignKey(Service, related_name='subscriptions', blank=True, null=True)
     subscriber_tenant = models.ForeignKey("Tenant", related_name='subscriptions', blank=True, null=True)
     subscriber_user = models.ForeignKey("User", related_name='subscriptions', blank=True, null=True)
-    service_specific_id = StrippedCharField(max_length=30)
-    service_specific_attribute = models.TextField()
+    service_specific_id = StrippedCharField(max_length=30, blank=True, null=True)
+    service_specific_attribute = models.TextField(blank=True, null=True)
+    connect_method = models.CharField(null=False, blank=False, max_length=30, choices=CONNECTIVITY_CHOICES, default="na")
 
     def __init__(self, *args, **kwargs):
         # for subclasses, set the default kind appropriately
@@ -93,6 +170,10 @@
     def get_tenant_objects(cls):
         return cls.objects.filter(kind = cls.KIND)
 
+    @classmethod
+    def get_deleted_tenant_objects(cls):
+        return cls.deleted_objects.filter(kind = cls.KIND)
+
     # helper function to be used in subclasses that want to ensure service_specific_id is unique
     def validate_unique_service_specific_id(self):
         if self.pk is None:
@@ -103,4 +184,16 @@
             if conflicts:
                 raise XOSDuplicateKey("service_specific_id %s already exists" % self.service_specific_id, fields={"service_specific_id": "duplicate key"})
 
+class CoarseTenant(Tenant):
+    class Meta:
+        proxy = True
 
+    KIND = "coarse"
+
+    def save(self, *args, **kwargs):
+        if (not self.subscriber_service):
+            raise XOSValidationError("subscriber_service cannot be null")
+        if (self.subscriber_tenant or self.subscriber_user):
+            raise XOSValidationError("subscriber_tenant and subscriber_user must be null")
+
+        super(CoarseTenant,self).save()
diff --git a/xos/core/models/slice.py b/xos/core/models/slice.py
index 4fc8489..0649d6f 100644
--- a/xos/core/models/slice.py
+++ b/xos/core/models/slice.py
@@ -99,6 +99,7 @@
             qs = Slice.objects.filter(id__in=slice_ids)
         return qs
 
+    """
     def delete(self, *args, **kwds):
         # delete networks associated with this slice
         from core.models.network import Network
@@ -112,6 +113,7 @@
         slice_privileges.delete() 
         # continue with normal delete
         super(Slice, self).delete(*args, **kwds) 
+    """
          
 
 class SliceRole(PlCoreBase):
diff --git a/xos/core/models/sliver.py b/xos/core/models/sliver.py
index e45152f..ff1e9b4 100644
--- a/xos/core/models/sliver.py
+++ b/xos/core/models/sliver.py
@@ -5,7 +5,7 @@
 from core.models import PlCoreBase,PlCoreBaseManager,PlCoreBaseDeletionManager
 from core.models.plcorebase import StrippedCharField
 from core.models import Image
-from core.models import Slice
+from core.models import Slice, SlicePrivilege
 from core.models import Node
 from core.models import Site
 from core.models import Deployment
@@ -170,3 +170,18 @@
             return None
         else:
             return 'ssh -o "ProxyCommand ssh -q %s@%s" ubuntu@%s' % (self.instance_id, self.node.name, self.instance_name)
+
+    def get_public_keys(self):
+        slice_memberships = SlicePrivilege.objects.filter(slice=self.slice)
+        pubkeys = set([sm.user.public_key for sm in slice_memberships if sm.user.public_key])
+
+        if self.creator.public_key:
+            pubkeys.add(self.creator.public_key)
+
+        if self.slice.creator.public_key:
+            pubkeys.add(self.slice.creator.public_key)
+
+        if self.slice.service and self.slice.service.public_key:
+            pubkeys.add(self.slice.service.public_key)
+
+        return pubkeys
diff --git a/xos/core/static/primarycons_blue/plus.png b/xos/core/static/primarycons_blue/plus.png
new file mode 100644
index 0000000..a00ab89
--- /dev/null
+++ b/xos/core/static/primarycons_blue/plus.png
Binary files differ
diff --git a/xos/core/static/primarycons_blue/service_graph.png b/xos/core/static/primarycons_blue/service_graph.png
new file mode 100644
index 0000000..e2e8c5b
--- /dev/null
+++ b/xos/core/static/primarycons_blue/service_graph.png
Binary files differ
diff --git a/xos/core/static/xos.css b/xos/core/static/xos.css
index 513dc06..282c9f4 100644
--- a/xos/core/static/xos.css
+++ b/xos/core/static/xos.css
@@ -182,7 +182,7 @@
   float: right;
   border: 2px darkGrey;
 }
-.ui-state-default, .ui-widget-content .ui-state-default, .ui-widget-header .ui-state-default{
+.ui-state-default #hometabs, .ui-widget-content .ui-state-default #hometabs, .ui-widget-header .ui-state-default {
 background: none !important;
 border-top: 0px !important;
 border-left: 0px !important;
diff --git a/xos/core/views/services.py b/xos/core/views/services.py
index 76180e7..6f24609 100644
--- a/xos/core/views/services.py
+++ b/xos/core/views/services.py
@@ -6,6 +6,7 @@
 import json
 import os
 import time
+import tempfile
 
 class ServiceGridView(TemplateView):
     head_template = r"""{% extends "admin/dashboard/dashboard_base.html" %}
@@ -21,11 +22,8 @@
 
         html = '<table><tr>'
 
-        i=0
+        icons=[]
         for service in Service.objects.all():
-            if (i%4) == 0:
-                html = html + '</tr><tr>'
-
             view_url = service.view_url
             if (not view_url):
                 view_url = "/admin/core/service/$id$/"
@@ -35,8 +33,26 @@
             if (not image_url):
                 image_url = "/static/primarycons_blue/gear_2.png"
 
+            icons.append( {"name": service.name, "view_url": view_url, "image_url": image_url} )
+
+        icons.append( {"name": "Tenancy Graph", "view_url": "/serviceGraph.png", "image_url": "/static/primarycons_blue/service_graph.png", "horiz_rule": True} )
+        icons.append( {"name": "Add Service", "view_url": "/admin/core/service/add/", "image_url": "/static/primarycons_blue/plus.png"} )
+
+        i=0
+        for icon in icons:
+            if icon.get("horiz_rule", False):
+                html = html + "</tr><tr><td colspan=4><hr></td></tr><tr>"
+                i=0
+
+            service_name = icon["name"]
+            view_url = icon["view_url"]
+            image_url = icon["image_url"]
+
+            if (i%4) == 0:
+                html = html + '</tr><tr>'
+
             html = html + '<td width=96 height=128 valign=top align=center><a href="%s"><img src="%s" height=64 width=64></img></a>' % (view_url, image_url)
-            html = html + '<p><a href="%s">%s</a></p></td>' % (view_url, service.name)
+            html = html + '<p><a href="%s">%s</a></p></td>' % (view_url, service_name)
             i=i+1
 
         html = html + '</tr></table>'
@@ -50,4 +66,76 @@
             template = t,
             **response_kwargs)
 
+class ServiceGraphViewOld(TemplateView):
+    #  this attempt used networkx
+    # yum -y install python-matplotlib python-networkx
+    # pip-python install -upgrade networkx
+    # pip-python install graphviz pygraphviz
 
+    def get(self, request, name="root", *args, **kwargs):
+        import networkx as nx
+        import matplotlib as mpl
+        mpl.use("Agg")
+        import matplotlib.pyplot as plt
+        import nxedges
+
+        plt.figure(figsize=(10,8))
+
+        g = nx.DiGraph()
+
+        labels = {}
+        for service in Service.objects.all():
+            g.add_node(service.id)
+            if len(service.name)>8:
+                labels[service.id] = service.name[:8] + "\n" + service.name[8:]
+            else:
+                labels[service.id] = service.name
+
+        for tenant in CoarseTenant.objects.all():
+            if (not tenant.provider_service) or (not tenant.subscriber_service):
+                continue
+            g.add_edge(tenant.subscriber_service.id, tenant.provider_service.id)
+
+        pos = nx.graphviz_layout(g)
+        nxedges.xos_draw_networkx_edges(g,pos,arrow_len=30)
+        nx.draw_networkx_nodes(g,pos,node_size=5000)
+        nx.draw_networkx_labels(g,pos,labels,font_size=12)
+        #plt.axis('off')
+        plt.savefig("/tmp/foo.png")
+
+        return HttpResponse(open("/tmp/foo.png","r").read(), content_type="image/png")
+
+class ServiceGraphView(TemplateView):
+    # this attempt just uses graphviz directly
+    # yum -y install graphviz
+    # pip-python install pygraphviz
+
+    def get(self, request, name="root", *args, **kwargs):
+        import pygraphviz as pgv
+
+        g = pgv.AGraph(directed=True)
+        g.graph_attr.update(size="8,4!")
+        g.graph_attr.update(dpi="100")
+        #g.graph_attr.update(nodesep="2.5")
+        g.graph_attr.update(overlap="false")
+        g.graph_attr.update(graphdir="TB")
+
+        for service in Service.objects.all():
+            provided_tenants = CoarseTenant.get_tenant_objects().filter(provider_service=service)
+            subscribed_tenants = CoarseTenant.get_tenant_objects().filter(subscriber_service=service)
+            if not (provided_tenants or subscribed_tenants):
+               # nodes with no edges aren't interesting
+               continue
+            g.add_node(service.id, label=service.name)
+
+        for tenant in CoarseTenant.get_tenant_objects().all():
+            if (not tenant.provider_service) or (not tenant.subscriber_service):
+                continue
+            g.add_edge(tenant.subscriber_service.id, tenant.provider_service.id)
+
+        tf = tempfile.TemporaryFile()
+        g.layout(prog="dot")
+        g.draw(path=tf, format="png")
+        tf.seek(0)
+
+        return HttpResponse(tf.read(), content_type="image/png")
diff --git a/xos/core/xoslib/methods/cordsubscriber.py b/xos/core/xoslib/methods/cordsubscriber.py
index 2337b21..08aa9d9 100644
--- a/xos/core/xoslib/methods/cordsubscriber.py
+++ b/xos/core/xoslib/methods/cordsubscriber.py
@@ -32,7 +32,7 @@
         cdn_enable = serializers.BooleanField()
         sliver_name = ReadOnlyField()
         image_name = ReadOnlyField()
-        routeable_subnet = serializers.CharField()
+        routeable_subnet = serializers.CharField(required=False)
 
         humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
 
diff --git a/xos/core/xoslib/methods/hpcview.py b/xos/core/xoslib/methods/hpcview.py
index 7c4baa9..41f6051 100644
--- a/xos/core/xoslib/methods/hpcview.py
+++ b/xos/core/xoslib/methods/hpcview.py
@@ -11,6 +11,7 @@
 from syndicate_storage.models import Volume
 from django.core.exceptions import PermissionDenied
 from django.contrib.contenttypes.models import ContentType
+import json
 import socket
 import time
 
@@ -45,16 +46,41 @@
     else:
         return None
 
+def compute_config_run(d):
+    if not d:
+        return "null"
+
+    d = json.loads(d)
+
+    status = d.get("status", "null")
+    if status!="success":
+        return status
+
+    config_run = d.get("config.run")
+    if not config_run:
+        return "null"
+
+    try:
+        config_run = max(0, int(time.time()) - int(float(config_run)))
+    except:
+        pass
+
+    return config_run
+
 def getHpcDict(user, pk):
     hpc = HpcService.objects.get(pk=pk)
     slices = get_service_slices(hpc)
 
     dnsdemux_slice = None
+    dnsredir_slice = None
     hpc_slice = None
     for slice in slices:
         if "dnsdemux" in slice.name:
             dnsdemux_service = hpc
             dnsdemux_slice = slice
+        if "dnsredir" in slice.name:
+            dnsredir_service = hpc
+            dnsredir_slice = slice
         if "hpc" in slice.name:
             hpc_service = hpc
             hpc_slice = slice
@@ -68,6 +94,17 @@
                 if "dnsdemux" in slice.name:
                     dnsdemux_service = rr
                     dnsdemux_slice = slice
+                if "dnsredir" in slice.name:
+                    dnsredir_service = rr
+                    dnsredir_slice = slice
+
+    if not dnsredir_slice:
+        print "no dnsredir slice"
+        return
+
+    if not dnsdemux_slice:
+        print "no dnsdemux slice"
+        return
 
     dnsdemux_has_public_network = False
     for network in dnsdemux_slice.networks.all():
@@ -97,26 +134,39 @@
                 sliver_nameservers.append(ns["name"])
                 ns["hit"]=True
 
+        # now find the dnsredir sliver that is also on this node
+        watcherd_dnsredir = "no-redir-sliver"
+        for dnsredir_sliver in dnsredir_slice.slivers.all():
+            if dnsredir_sliver.node == sliver.node:
+                watcherd_dnsredir = lookup_tag(dnsredir_service, dnsredir_sliver, "watcher.watcher.msg")
+
+        watcherd_dnsdemux = lookup_tag(dnsdemux_service, sliver, "watcher.watcher.msg")
+
         dnsdemux.append( {"name": sliver.node.name,
                        "watcher.DNS.msg": lookup_tag(dnsdemux_service, sliver, "watcher.DNS.msg"),
                        "watcher.DNS.time": lookup_time(dnsdemux_service, sliver, "watcher.DNS.time"),
                        "ip": ip,
-                       "nameservers": sliver_nameservers })
+                       "nameservers": sliver_nameservers,
+                       "dnsdemux_config_age": compute_config_run(watcherd_dnsdemux),
+                       "dnsredir_config_age": compute_config_run(watcherd_dnsredir) })
 
     hpc=[]
     for sliver in hpc_slice.slivers.all():
+        watcherd_hpc = lookup_tag(hpc_service, sliver, "watcher.watcher.msg")
+
         hpc.append( {"name": sliver.node.name,
                      "watcher.HPC-hb.msg": lookup_tag(hpc_service, sliver, "watcher.HPC-hb.msg"),
                      "watcher.HPC-hb.time": lookup_time(hpc_service, sliver, "watcher.HPC-hb.time"),
                      "watcher.HPC-fetch.msg": lookup_tag(hpc_service, sliver, "watcher.HPC-fetch.msg"),
                      "watcher.HPC-fetch.time": lookup_time(hpc_service, sliver, "watcher.HPC-fetch.time"),
+                     "config_age": compute_config_run(watcherd_hpc),
 
         })
 
     return { "id": pk,
              "dnsdemux": dnsdemux,
              "hpc": hpc,
-             "nameservers": nameservers }
+             "nameservers": nameservers,}
 
 
 class HpcList(APIView):
diff --git a/xos/core/xoslib/methods/sshkeys.py b/xos/core/xoslib/methods/sshkeys.py
new file mode 100644
index 0000000..a714212
--- /dev/null
+++ b/xos/core/xoslib/methods/sshkeys.py
@@ -0,0 +1,56 @@
+from rest_framework.decorators import api_view
+from rest_framework.response import Response
+from rest_framework.reverse import reverse
+from rest_framework import serializers
+from rest_framework import generics
+from rest_framework.views import APIView
+from core.models import *
+from django.forms import widgets
+from syndicate_storage.models import Volume
+from django.core.exceptions import PermissionDenied
+from xos.exceptions import XOSNotFound
+
+class SSHKeyList(APIView):
+    method_kind = "list"
+    method_name = "sshkeys"
+
+    def get(self, request, format=None):
+        instances=[]
+        for sliver in self.get_queryset().all():
+            if sliver.instance_id:
+                instances.append( {"id": sliver.instance_id,
+                                   "public_keys": sliver.get_public_keys(),
+                                   "node_name": sliver.node.name } )
+
+        return Response(instances)
+
+    def get_queryset(self):
+        queryset = queryset=Sliver.objects.all()
+
+        node_name = self.request.QUERY_PARAMS.get('node_name', None)
+        if node_name is not None:
+            queryset = queryset.filter(node__name = node_name)
+
+        return queryset
+
+class SSHKeyDetail(APIView):
+    method_kind = "detail"
+    method_name = "sshkeys"
+
+    def get(self, request, format=None, pk=0):
+        slivers = self.get_queryset().filter(instance_id=pk)
+        if not slivers:
+            raise XOSNotFound("didn't find sliver for instance %s" % pk)
+        return Response( [ {"id": slivers[0].instance_id,
+                            "public_keys": slivers[0].get_public_keys(),
+                            "node_name": slivers[0].node.name } ])
+
+    def get_queryset(self):
+        queryset = queryset=Sliver.objects.all()
+
+        node_name = self.request.QUERY_PARAMS.get('node_name', None)
+        if node_name is not None:
+            queryset = queryset.filter(node__name = node_name)
+
+        return queryset
+
diff --git a/xos/core/xoslib/static/js/xosHpc.js b/xos/core/xoslib/static/js/xosHpc.js
index 220b1d5..634e918 100644
--- a/xos/core/xoslib/static/js/xosHpc.js
+++ b/xos/core/xoslib/static/js/xosHpc.js
@@ -23,7 +23,8 @@
     for (rowkey in dnsdemux) {
         row = dnsdemux[rowkey];
 
-        actualEntries.push( [row.name, row.ip, staleCheck(row, "watcher.DNS.time", "watcher.DNS.msg", SC_RR), row.nameservers.join(",")] );
+        actualEntries.push( [row.name, row.ip, staleCheck(row, "watcher.DNS.time", "watcher.DNS.msg", SC_RR), row.nameservers.join(","),
+                             row.dnsredir_config_age + "," + row.dnsdemux_config_age] );
     }
     console.log(actualEntries);
     oTable = $('#dynamic_dnsdemux').dataTable( {
@@ -37,6 +38,7 @@
             { "sTitle": "IP Address" },
             { "sTitle": "Record Checker" },
             { "sTitle": "Nameservers" },
+            { "sTitle": "Config Age" },
         ]
     } );
 }
@@ -50,7 +52,9 @@
     for (rowkey in dnsdemux) {
         row = dnsdemux[rowkey];
 
-        actualEntries.push( [row.name, staleCheck(row, "watcher.HPC-hb.time", "watcher.HPC-hb.msg", SC_HPC_PROBE), staleCheck(row, "watcher.HPC-fetch.time", "watcher.HPC-fetch.msg", SC_HPC_FETCH) ] );
+        actualEntries.push( [row.name, staleCheck(row, "watcher.HPC-hb.time", "watcher.HPC-hb.msg", SC_HPC_PROBE),
+                                       staleCheck(row, "watcher.HPC-fetch.time", "watcher.HPC-fetch.msg", SC_HPC_FETCH),
+                                       row.config_age, ] );
     }
     console.log(actualEntries);
     oTable = $('#dynamic_hpc').dataTable( {
@@ -63,6 +67,7 @@
             { "sTitle": "Node", },
             { "sTitle": "Prober" },
             { "sTitle": "Fetcher" },
+            { "sTitle": "Config Age" },
         ]
     } );
 }
diff --git a/xos/core/xoslib/templates/xosCordSubscriber.html b/xos/core/xoslib/templates/xosCordSubscriber.html
index dbd4cfe..1a189c4 100644
--- a/xos/core/xoslib/templates/xosCordSubscriber.html
+++ b/xos/core/xoslib/templates/xosCordSubscriber.html
@@ -20,10 +20,10 @@
   <tr><td class="xos-label-cell xos-cord-label">Image:</td><td><%= model.attributes.image_name %></td></tr>

   <tr><td class="xos-label-cell xos-cord-label">Sliver Id:</td><td><%= model.attributes.sliver %></td></tr>

   <tr><td class="xos-label-cell xos-cord-label">Firewall:</td><td><input type="checkbox" name="firewall_enable" <% if (model.attributes.firewall_enable) print("checked"); %>>Enable<br>

-                                                                  <textarea name="firewall_rules"><%= model.attributes.firewall_rules %></textarea></td></tr>

+                                                                  <textarea name="firewall_rules" style="width:320px; height:80px"><%= model.attributes.firewall_rules %></textarea></td></tr>

   <tr><td class="xos-label-cell xos-cord-label">URL Filter:</td><td><input type="checkbox" name="url_filter_enable" <% if (model.attributes.url_filter_enable) print("checked"); %>>Enable<br>

-                                                                  <textarea name="url_filter_rules"><%= model.attributes.url_filter_rules %></textarea></td></tr>

-  <tr><td class="xos-label-cell xos-cord-label">CDN:</td><td><input type="checkbox" name="cdn_enable" <% if (model.attributes.firewall_enable) print("checked"); %>>Enable</td></tr>

+                                                                  <textarea name="url_filter_rules" style="width:320px; height:80px"><%= model.attributes.url_filter_rules %></textarea></td></tr>

+  <tr><td class="xos-label-cell xos-cord-label">CDN:</td><td><input type="checkbox" name="cdn_enable" <% if (model.attributes.cdn_enable) print("checked"); %>>Enable</td></tr>

   </table>

   </div>

 

diff --git a/xos/dependency_walker.py b/xos/dependency_walker.py
index 0b23136..ba9de11 100644
--- a/xos/dependency_walker.py
+++ b/xos/dependency_walker.py
@@ -71,9 +71,10 @@
 			except AttributeError:
 				if not missing_links.has_key(model+'.'+link):
 					print "Model %s missing link for dependency %s"%(model, link)
-                                        logger.log_exc("Model %s missing link for dependency %s"%(model, link))
+                                        logger.log_exc("WARNING: Model %s missing link for dependency %s."%(model, link))
 					missing_links[model+'.'+link]=True
 
+
 		if (peer):
 			try:
 				peer_objects = peer.all()
@@ -83,11 +84,13 @@
 				peer_objects = []
 
 			for o in peer_objects:
-				fn(o, object)
+				#if (isinstance(o,PlCoreBase)):
+				if (hasattr(o,'updated')):
+					fn(o, object)
 				# Uncomment the following line to enable recursion
 				# walk_inv_deps(fn, o)
 
-def p(x):
+def p(x,source):
 	print x,x.__class__.__name__
 	return
 
diff --git a/xos/hpc/admin.py b/xos/hpc/admin.py
index 20364b6..08a1cdb 100644
--- a/xos/hpc/admin.py
+++ b/xos/hpc/admin.py
@@ -10,7 +10,7 @@
 from django.utils import timezone
 from django.contrib.contenttypes import generic
 from suit.widgets import LinkedSelect
-from core.admin import ServiceAppAdmin,SliceInline,ServiceAttrAsTabInline, ReadOnlyAwareAdmin, XOSTabularInline
+from core.admin import ServiceAppAdmin,SliceInline,ServiceAttrAsTabInline, ReadOnlyAwareAdmin, XOSTabularInline, SliderWidget
 
 from functools import update_wrapper
 from django.contrib.admin.views.main import ChangeList
@@ -106,15 +106,30 @@
        # filtered_change_view rather than the default change_view.
        return FilteredChangeList
 
+class HpcServiceForm(forms.ModelForm):
+    scale = forms.IntegerField(widget = SliderWidget, required=False)
+
+    def __init__(self, *args, **kwargs):
+        super(HpcServiceForm, self).__init__(*args, **kwargs)
+        self.fields['scale'].initial = kwargs["instance"].scale
+
+    def save(self, *args, **kwargs):
+        if self.cleaned_data['scale']:
+             self.instance.scale = self.cleaned_data['scale']
+
+        return super(HpcServiceForm, self).save(*args, **kwargs)
+
+
 class HpcServiceAdmin(ReadOnlyAwareAdmin):
     model = HpcService
     verbose_name = "HPC Service"
     verbose_name_plural = "HPC Service"
     list_display = ("backend_status_icon", "name","enabled")
     list_display_links = ('backend_status_icon', 'name', )
-    fieldsets = [(None, {'fields': ['backend_status_text', 'name','enabled','versionNumber', 'description', "cmi_hostname"], 'classes':['suit-tab suit-tab-general']})]
+    fieldsets = [(None, {'fields': ['backend_status_text', 'name','scale','enabled','versionNumber', 'description', "cmi_hostname"], 'classes':['suit-tab suit-tab-general']})]
     readonly_fields = ('backend_status_text', )
     inlines = [SliceInline,ServiceAttrAsTabInline]
+    form = HpcServiceForm
 
     extracontext_registered_admins = True
 
diff --git a/xos/hpc/models.py b/xos/hpc/models.py
index e915fbc..1cd51ce 100644
--- a/xos/hpc/models.py
+++ b/xos/hpc/models.py
@@ -17,6 +17,26 @@
 
     cmi_hostname = StrippedCharField(max_length=254, null=True, blank=True)
 
+    @property
+    def scale(self):
+        hpc_slices = [x for x in self.slices.all() if "hpc" in x.name]
+        if not hpc_slices:
+            return 0
+        return hpc_slices[0].slivers.count()
+
+    @scale.setter
+    def scale(self, value):
+        self.set_scale = value
+
+    def save(self, *args, **kwargs):
+        super(HpcService, self).save(*args, **kwargs)
+
+        # scale up/down
+        scale = getattr(self, "set_scale", None)
+        if scale is not None:
+            exclude_slices = [x for x in self.slices.all() if "cmi" in x.name]
+            self.adjust_scale(slice_hint="hpc", scale=scale, exclusive_slices = exclude_slices, max_per_node=1)
+
 class ServiceProvider(PlCoreBase):
     class Meta:
         app_label = "hpc"
diff --git a/xos/hpc_observer/run.sh b/xos/hpc_observer/run.sh
deleted file mode 100644
index abf620a..0000000
--- a/xos/hpc_observer/run.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-if [[ ! -e ./hpc-backend.py ]]; then
-    ln -s ../xos-observer.py hpc-backend.py
-fi
-
-export XOS_DIR=/opt/xos
-python hpc-backend.py  -C $XOS_DIR/hpc_observer/hpc_observer_config
diff --git a/xos/hpc_observer/start.sh b/xos/hpc_observer/start.sh
deleted file mode 100644
index 07a1663..0000000
--- a/xos/hpc_observer/start.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-if [[ ! -e ./hpc-backend.py ]]; then
-    ln -s ../xos-observer.py hpc-backend.py
-fi
-
-export XOS_DIR=/opt/xos
-nohup python hpc-backend.py  -C $XOS_DIR/hpc_observer/hpc_observer_config > /dev/null 2>&1 &
diff --git a/xos/hpc_observer/stop.sh b/xos/hpc_observer/stop.sh
deleted file mode 100644
index 632f7a6..0000000
--- a/xos/hpc_observer/stop.sh
+++ /dev/null
@@ -1 +0,0 @@
-pkill -9 -f hpc-backend.py
diff --git a/xos/model_policies/model_policy_Slice.py b/xos/model_policies/model_policy_Slice.py
index a9936bd..b610601 100644
--- a/xos/model_policies/model_policy_Slice.py
+++ b/xos/model_policies/model_policy_Slice.py
@@ -1,16 +1,33 @@
+def handle_delete(slice):
+    from core.models import Controller, ControllerSlice, SiteDeployment, Network, NetworkSlice,NetworkTemplate, Slice
+    from collections import defaultdict
+
+    public_nets = []
+    private_net = None
+    networks = Network.objects.filter(owner=slice)
+
+    for n in networks:
+        n.delete()	
+    
+    # Note that sliceprivileges and slicecontrollers are autodeleted, through the dependency graph
 
 def handle(slice):
     from core.models import Controller, ControllerSlice, SiteDeployment, Network, NetworkSlice,NetworkTemplate, Slice
     from collections import defaultdict
 
+    print "MODEL POLICY: slice", slice
+
     # slice = Slice.get(slice_id)
 
     controller_slices = ControllerSlice.objects.filter(slice=slice)
     existing_controllers = [cs.controller for cs in controller_slices] 
         
+    print "MODEL POLICY: slice existing_controllers=", existing_controllers
+
     all_controllers = Controller.objects.all() 
     for controller in all_controllers:
         if controller not in existing_controllers:
+            print "MODEL POLICY: slice adding controller", controller
             sd = ControllerSlice(slice=slice, controller=controller)
             sd.save()
 
diff --git a/xos/model_policy.py b/xos/model_policy.py
index 3fa7218..9333adc 100644
--- a/xos/model_policy.py
+++ b/xos/model_policy.py
@@ -11,62 +11,105 @@
 from django.db.models import F, Q
 
 modelPolicyEnabled = True
+bad_instances=[]
 
 def EnableModelPolicy(x):
     global modelPolicyEnabled
     modelPolicyEnabled = x
 
+def update_wp(d, o):
+    try:
+        save_fields = []
+        if (d.write_protect != o.write_protect):
+            d.write_protect = o.write_protect
+            save_fields.append('write_protect')
+        if (save_fields):
+            d.save(update_fields=save_fields)
+    except AttributeError,e:
+        raise e
+
 def update_dep(d, o):
-	try:
-		if (d.updated < o.updated):
-			d.save(update_fields=['updated'])
-	except AttributeError,e:
-		raise e
-	
+    try:
+        print 'Trying to update %s'%d
+        save_fields = []
+        if (d.updated < o.updated):
+            save_fields = ['updated']
+
+        if (save_fields):
+            d.save(update_fields=save_fields)
+    except AttributeError,e:
+        raise e
+
 def delete_if_inactive(d, o):
-	#print "Deleting %s (%s)"%(d,d.__class__.__name__)
-	# d.delete()	
-	return
+    try:
+        d.delete()
+        print "Deleted %s (%s)"%(d,d.__class__.__name__)
+    except:
+        pass
+    return
+
 
 @atomic
 def execute_model_policy(instance, deleted):
-	# Automatic dirtying
-	walk_inv_deps(update_dep, instance)
+    # Automatic dirtying
+    if (instance in bad_instances):
+        return
 
-	sender_name = instance.__class__.__name__
-	policy_name = 'model_policy_%s'%sender_name
-	noargs = False
+    # These are the models whose children get deleted when they are
+    delete_policy_models = ['Slice','Sliver','Network']
+    sender_name = instance.__class__.__name__
+    policy_name = 'model_policy_%s'%sender_name
+    noargs = False
 
-	if deleted:
-		walk_inv_deps(delete_if_inactive, instance)
-	else:
-		try:
-			policy_handler = getattr(model_policies, policy_name, None)
-			logger.error("POLICY HANDLER: %s %s" % (policy_name, policy_handler))                       
-			if policy_handler is not None:
-				policy_handler.handle(instance)
-		except:
-			logger.log_exc("Model Policy Error:") 
-			print "Policy Exceution Error"
+    if (not deleted):
+        walk_inv_deps(update_dep, instance)
+        walk_deps(update_wp, instance)
+    elif (sender_name in delete_policy_models):
+        walk_inv_deps(delete_if_inactive, instance)
 
-	instance.policed=datetime.now()
+
+
+    try:
+        policy_handler = getattr(model_policies, policy_name, None)
+        logger.error("POLICY HANDLER: %s %s" % (policy_name, policy_handler))
+        if policy_handler is not None:
+            if (deleted):
+                try:
+                    policy_handler.handle_delete(instance)
+                except AttributeError:
+                    pass
+            else:
+                policy_handler.handle(instance)
+    except:
+        logger.log_exc("Model Policy Error:")
+
+    try:
+        instance.policed=datetime.now()
         instance.save(update_fields=['policed'])
+    except:
+        logging.error('Object %r is defective'%instance)
+        bad_instances.append(instance)
 
 def run_policy():
-        from core.models import Sliver,Slice,Controller,Network,User,SlicePrivilege,Site,SitePrivilege,Image,ControllerSlice,ControllerUser,ControllerSite
-	while (True):
-		start = time.time()
-		models = [Sliver,Slice, Controller, Network, User, SlicePrivilege, Site, SitePrivilege, Image, ControllerSlice, ControllerSite, ControllerUser]
-		objects = []
-		
-		for m in models:
-        		res = m.objects.filter(Q(policed__lt=F('updated')) | Q(policed=None))
-			objects.extend(res)	
+    from core.models import Sliver,Slice,Controller,Network,User,SlicePrivilege,Site,SitePrivilege,Image,ControllerSlice,ControllerUser,ControllerSite
+    while (True):
+        start = time.time()
+        models = [Sliver,Slice, Controller, Network, User, SlicePrivilege, Site, SitePrivilege, Image, ControllerSlice, ControllerSite, ControllerUser]
+        objects = []
+        deleted_objects = []
 
-		for o in objects:
-			print "Working on %r"%o
-			execute_model_policy(o, False)
-		
-		
-		if (time.time()-start<1):
-			time.sleep(1)	
+        for m in models:
+            res = m.objects.filter(Q(policed__lt=F('updated')) | Q(policed=None))
+            objects.extend(res)
+            res = m.deleted_objects.filter(Q(policed__lt=F('updated')) | Q(policed=None))
+            deleted_objects.extend(res)
+
+        for o in objects:
+            execute_model_policy(o, o.deleted)
+
+        for o in deleted_objects:
+            execute_model_policy(o, True)
+
+
+        if (time.time()-start<1):
+            time.sleep(1)
diff --git a/xos/hpc_observer/Makefile b/xos/observers/hpc/Makefile
similarity index 100%
rename from xos/hpc_observer/Makefile
rename to xos/observers/hpc/Makefile
diff --git a/xos/hpc_observer/fsck.py b/xos/observers/hpc/fsck.py
similarity index 100%
rename from xos/hpc_observer/fsck.py
rename to xos/observers/hpc/fsck.py
diff --git a/xos/observers/hpc/hpc-observer.py b/xos/observers/hpc/hpc-observer.py
new file mode 100755
index 0000000..d6a71ff
--- /dev/null
+++ b/xos/observers/hpc/hpc-observer.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# This imports and runs ../../xos-observer.py
+
+import importlib
+import os
+import sys
+observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../..")
+sys.path.append(observer_path)
+mod = importlib.import_module("xos-observer")
+mod.main()
diff --git a/xos/hpc_observer/hpc_observer_config b/xos/observers/hpc/hpc_observer_config
similarity index 74%
rename from xos/hpc_observer/hpc_observer_config
rename to xos/observers/hpc/hpc_observer_config
index 0e7e53a..16d9077 100644
--- a/xos/hpc_observer/hpc_observer_config
+++ b/xos/observers/hpc/hpc_observer_config
@@ -23,9 +23,9 @@
 
 [observer]
 name=hpc
-dependency_graph=/opt/xos/hpc_observer/model-deps
-steps_dir=/opt/xos/hpc_observer/steps
-deleters_dir=/opt/xos/hpc_observer/deleters
+dependency_graph=/opt/xos/observers/hpc/model-deps
+steps_dir=/opt/xos/observers/hpc/steps
+deleters_dir=/opt/xos/observers/hpc/deleters
 log_file=console
 #/var/log/hpc.log
 driver=None
diff --git a/xos/hpc_observer/hpc_watcher.py b/xos/observers/hpc/hpc_watcher.py
similarity index 95%
rename from xos/hpc_observer/hpc_watcher.py
rename to xos/observers/hpc/hpc_watcher.py
index 15adce9..9eb8afe 100644
--- a/xos/hpc_observer/hpc_watcher.py
+++ b/xos/observers/hpc/hpc_watcher.py
@@ -390,11 +390,15 @@
 
             ip = sliver.get_public_ip()
             if not ip:
-                ip = socket.gethostbyname(sliver.node.name)
+                try:
+                    ip = socket.gethostbyname(sliver.node.name)
+                except:
+                    self.set_status(sliver, service, "watcher.DNS", "dns resolution failure")
+                    continue
 
-            #if not ip:
-            #    self.set_status(sliver, service, "watcher.DNS", "no public IP")
-            #    continue
+            if not ip:
+                self.set_status(sliver, service, "watcher.DNS", "no IP address")
+                continue
 
             checks = HpcHealthCheck.objects.filter(kind="dns")
             if not checks:
@@ -518,7 +522,15 @@
         for sliver in slivers:
             ip = sliver.get_public_ip()
             if not ip:
-                ip = socket.gethostbyname(sliver.node.name)
+                try:
+                    ip = socket.gethostbyname(sliver.node.name)
+                except:
+                    self.set_status(sliver, service, "watcher.watcher", "dns resolution failure")
+                    continue
+
+            if not ip:
+                self.set_status(sliver, service, "watcher.watcher", "no IP address")
+                continue
 
             port = 8015
             if ("redir" in sliver.slice.name):
diff --git a/xos/hpc_observer/hpclib.py b/xos/observers/hpc/hpclib.py
similarity index 100%
rename from xos/hpc_observer/hpclib.py
rename to xos/observers/hpc/hpclib.py
diff --git a/xos/hpc_observer/model-deps b/xos/observers/hpc/model-deps
similarity index 100%
rename from xos/hpc_observer/model-deps
rename to xos/observers/hpc/model-deps
diff --git a/xos/observers/hpc/run.sh b/xos/observers/hpc/run.sh
new file mode 100755
index 0000000..f77d751
--- /dev/null
+++ b/xos/observers/hpc/run.sh
@@ -0,0 +1,6 @@
+#if [[ ! -e ./hpc-backend.py ]]; then
+#    ln -s ../xos-observer.py hpc-backend.py
+#fi
+
+export XOS_DIR=/opt/xos
+python hpc-observer.py  -C $XOS_DIR/observers/hpc/hpc_observer_config
diff --git a/xos/observers/hpc/start.sh b/xos/observers/hpc/start.sh
new file mode 100755
index 0000000..305c07f
--- /dev/null
+++ b/xos/observers/hpc/start.sh
@@ -0,0 +1,6 @@
+#if [[ ! -e ./hpc-backend.py ]]; then
+#    ln -s ../xos-observer.py hpc-backend.py
+#fi
+
+export XOS_DIR=/opt/xos
+nohup python hpc-observer.py  -C $XOS_DIR/observers/hpc/hpc_observer_config > /dev/null 2>&1 &
diff --git a/xos/hpc_observer/steps/garbage_collector.py b/xos/observers/hpc/steps/garbage_collector.py
similarity index 100%
rename from xos/hpc_observer/steps/garbage_collector.py
rename to xos/observers/hpc/steps/garbage_collector.py
diff --git a/xos/hpc_observer/steps/sync_cdnprefix.py b/xos/observers/hpc/steps/sync_cdnprefix.py
similarity index 100%
rename from xos/hpc_observer/steps/sync_cdnprefix.py
rename to xos/observers/hpc/steps/sync_cdnprefix.py
diff --git a/xos/hpc_observer/steps/sync_contentprovider.py b/xos/observers/hpc/steps/sync_contentprovider.py
similarity index 100%
rename from xos/hpc_observer/steps/sync_contentprovider.py
rename to xos/observers/hpc/steps/sync_contentprovider.py
diff --git a/xos/hpc_observer/steps/sync_hpcservices.py b/xos/observers/hpc/steps/sync_hpcservices.py
similarity index 100%
rename from xos/hpc_observer/steps/sync_hpcservices.py
rename to xos/observers/hpc/steps/sync_hpcservices.py
diff --git a/xos/hpc_observer/steps/sync_originserver.py b/xos/observers/hpc/steps/sync_originserver.py
similarity index 100%
rename from xos/hpc_observer/steps/sync_originserver.py
rename to xos/observers/hpc/steps/sync_originserver.py
diff --git a/xos/hpc_observer/steps/sync_serviceprovider.py b/xos/observers/hpc/steps/sync_serviceprovider.py
similarity index 100%
rename from xos/hpc_observer/steps/sync_serviceprovider.py
rename to xos/observers/hpc/steps/sync_serviceprovider.py
diff --git a/xos/hpc_observer/steps/sync_sitemap.py b/xos/observers/hpc/steps/sync_sitemap.py
similarity index 100%
rename from xos/hpc_observer/steps/sync_sitemap.py
rename to xos/observers/hpc/steps/sync_sitemap.py
diff --git a/xos/observers/hpc/stop.sh b/xos/observers/hpc/stop.sh
new file mode 100755
index 0000000..a0b4a8e
--- /dev/null
+++ b/xos/observers/hpc/stop.sh
@@ -0,0 +1 @@
+pkill -9 -f hpc-observer.py
diff --git a/xos/observers/hpc/supervisor/hpc-observer.conf b/xos/observers/hpc/supervisor/hpc-observer.conf
new file mode 100644
index 0000000..f2c79d4
--- /dev/null
+++ b/xos/observers/hpc/supervisor/hpc-observer.conf
@@ -0,0 +1,2 @@
+[program:hpc-observer]
+command=python /opt/xos/observers/hpc/hpc-observer.py -C /opt/xos/observers/hpc/hpc_observer_config
diff --git a/xos/observers/hpc/supervisor/hpc-watcher.conf b/xos/observers/hpc/supervisor/hpc-watcher.conf
new file mode 100644
index 0000000..e0f4eb1
--- /dev/null
+++ b/xos/observers/hpc/supervisor/hpc-watcher.conf
@@ -0,0 +1,2 @@
+[program:hpc-watcher]
+command=python /opt/xos/observers/hpc/hpc_watcher.py
diff --git a/xos/observers/vbng/model-deps b/xos/observers/vbng/model-deps
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/xos/observers/vbng/model-deps
@@ -0,0 +1 @@
+{}
diff --git a/xos/observers/vbng/run.sh b/xos/observers/vbng/run.sh
new file mode 100755
index 0000000..efb586f
--- /dev/null
+++ b/xos/observers/vbng/run.sh
@@ -0,0 +1,6 @@
+#if [[ ! -e ./vbng-observer.py ]]; then
+#    ln -s ../../xos-observer.py vbng-observer.py
+#fi
+
+export XOS_DIR=/opt/xos
+python vbng-observer.py  -C $XOS_DIR/observers/vbng/vbng_observer_config
diff --git a/xos/observers/vbng/start.sh b/xos/observers/vbng/start.sh
new file mode 100755
index 0000000..98008f4
--- /dev/null
+++ b/xos/observers/vbng/start.sh
@@ -0,0 +1,6 @@
+#if [[ ! -e ./vbng-observer.py ]]; then
+#    ln -s ../../xos-observer.py vbng-observer.py
+#fi
+
+export XOS_DIR=/opt/xos
+nohup python vbng-observer.py  -C $XOS_DIR/observers/vbng/vbng_observer_config > /dev/null 2>&1 &
diff --git a/xos/observers/vbng/steps/sync_vbngtenant.py b/xos/observers/vbng/steps/sync_vbngtenant.py
new file mode 100644
index 0000000..64494f8
--- /dev/null
+++ b/xos/observers/vbng/steps/sync_vbngtenant.py
@@ -0,0 +1,85 @@
+import os
+import requests
+import socket
+import sys
+import base64
+from django.db.models import F, Q
+from xos.config import Config
+from observer.syncstep import SyncStep
+from observer.ansible import run_template_ssh
+from core.models import Service
+from cord.models import VCPEService, VCPETenant, VBNGTenant, VBNGService
+from hpc.models import HpcService, CDNPrefix
+from util.logger import Logger, logging
+
+VBNG_API = "http://<vnbg-addr>/onos/virtualbng/privateip/"
+
+# hpclibrary will be in steps/..
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+logger = Logger(level=logging.INFO)
+
+class SyncVBNGTenant(SyncStep):
+    provides=[VCPETenant]
+    observes=VCPETenant
+    requested_interval=0
+
+    def __init__(self, **args):
+        SyncStep.__init__(self, **args)
+
+    def fetch_pending(self, deleted):
+        if (not deleted):
+            objs = VBNGTenant.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
+        else:
+            objs = VBNGTenant.get_deleted_tenant_objects()
+
+        return objs
+
+    def defer_sync(self, o, reason):
+        o.backend_register="{}"
+        o.backend_status = "2 - " + reason
+        o.save(update_fields=['enacted','backend_status','backend_register'])
+        logger.info("defer object %s due to %s" % (str(o), reason))
+
+    def sync_record(self, o):
+        logger.info("sync'ing VBNGTenant %s" % str(o))
+
+        vcpes = VCPETenant.get_tenant_objects().all()
+        vcpes = [x for x in vcpes if (x.vbng is not None) and (x.vbng.id == o.id)]
+        if not vcpes:
+            raise Exception("No vCPE tenant is associated with vBNG %s" % str(o.id))
+        if len(vcpes)>1:
+            raise Exception("More than one vCPE tenant is associated with vBNG %s" % str(o.id))
+
+        vcpe = vcpes[0]
+        sliver = vcpe.sliver
+
+        if not sliver:
+            raise Exception("No sliver associated with vBNG %s" % str(o.id))
+
+        external_ns = None
+        for ns in sliver.networkslivers.all():
+            if (ns.ip) and (ns.network.template.visibility=="private") and (ns.network.template.translation=="none"):
+                # need some logic here to find the right network
+                external_ns = ns
+
+        if not external_ns:
+            self.defer_sync(o, "private network is not filled in yet")
+            return
+
+        private_ip = external_ns.ip
+
+        if not o.routeable_subnet:
+            print "This is where we would call Pingping's API"
+            o.routeable_subnet = "placeholder-from-observer"
+
+            # r = requests.post(VBNG_API + "%s" % private_ip, )
+            # public_ip = r.json()
+            # o.routeable_subnet = public_ip
+
+        o.save()
+
+    def delete_record(self, m):
+        pass
+
diff --git a/xos/observers/vbng/stop.sh b/xos/observers/vbng/stop.sh
new file mode 100755
index 0000000..d49591e
--- /dev/null
+++ b/xos/observers/vbng/stop.sh
@@ -0,0 +1 @@
+pkill -9 -f vbng-observer.py
diff --git a/xos/observers/vbng/supervisor/vbng-observer.conf b/xos/observers/vbng/supervisor/vbng-observer.conf
new file mode 100644
index 0000000..cff77b8
--- /dev/null
+++ b/xos/observers/vbng/supervisor/vbng-observer.conf
@@ -0,0 +1,2 @@
+[program:vbng-observer]
+command=python /opt/xos/observers/vbng/vbng-observer.py -C /opt/xos/observers/vbng/vbng_observer_config
diff --git a/xos/observers/vbng/vbng-observer.py b/xos/observers/vbng/vbng-observer.py
new file mode 100755
index 0000000..d6a71ff
--- /dev/null
+++ b/xos/observers/vbng/vbng-observer.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# This imports and runs ../../xos-observer.py
+
+import importlib
+import os
+import sys
+observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../..")
+sys.path.append(observer_path)
+mod = importlib.import_module("xos-observer")
+mod.main()
diff --git a/xos/hpc_observer/hpc_observer_config b/xos/observers/vbng/vbng_observer_config
similarity index 66%
copy from xos/hpc_observer/hpc_observer_config
copy to xos/observers/vbng/vbng_observer_config
index 0e7e53a..217ce71 100644
--- a/xos/hpc_observer/hpc_observer_config
+++ b/xos/observers/vbng/vbng_observer_config
@@ -22,13 +22,15 @@
 nova_enabled=True
 
 [observer]
-name=hpc
-dependency_graph=/opt/xos/hpc_observer/model-deps
-steps_dir=/opt/xos/hpc_observer/steps
-deleters_dir=/opt/xos/hpc_observer/deleters
+name=vbng
+dependency_graph=/opt/xos/observers/vbng/model-deps
+steps_dir=/opt/xos/observers/vbng/steps
+sys_dir=/opt/xos/observer/vbng/sys
+deleters_dir=/opt/xos/observers/vbng/deleters
 log_file=console
 #/var/log/hpc.log
 driver=None
+pretend=False
 
 [feefie]
 client_id='vicci_dev_central'
diff --git a/xos/observers/vcpe/model-deps b/xos/observers/vcpe/model-deps
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/xos/observers/vcpe/model-deps
@@ -0,0 +1 @@
+{}
diff --git a/xos/observers/vcpe/observer_ansible_test.py b/xos/observers/vcpe/observer_ansible_test.py
new file mode 100644
index 0000000..1b4358d
--- /dev/null
+++ b/xos/observers/vcpe/observer_ansible_test.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+import os
+import sys
+
+sys.path.append("../..")
+import observer.ansible
+
+print sys.argv
+
+private_key="""-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAtJiuarud5S4Y2quDeWyaS0UCQGQtfuSzzNhplFwujYnJGL65

+e14REtv+UuHGymyr/SfkTrBd8vH5NI2UZ/4sZW13ieI/1d97OeVe2+ct0Y4BaFEI

+3Hja6DIpsY3Q2cBQsWUwcQzbMIF9jIq8SzwR1zk8UtZi09fNxqjCchRPlBvbiLKX

+g0/yirN237WbaKzK++8EPy3nuv83216MXHFFSjuxfICe/RhjaqMzVp7isSbv1suU

+geyvNNzU71c/K13gTggdcIXeRQBiJYio2Sn3h2nsV6AFqFH4fjERxWG55Q4e3jeE

+tWM/Dw+hqYKg/25UcmM862a6sUmuDCmM5S3VEQIDAQABAoIBACH88iZoNOki6c6N

+pOq/Q7KSxD+2GtHc3PHacNRZHPHKUqxziJjtNS1lddHHaBBEr4GvdkpQ6v2ECLCZ

+TKrdrSFRnsO2bukjbB+TSWz9byQXI7CsP4yuuhQlDK+7zuiMRyN7tcgw8TeJx0Uh

+/xnxrjHhNbcpXeQcoz+WFzI9HFT1MEGmMS4Lyp/zLB/pmfY9h7V9d+EeRZDi78jq

+Vir6MI6iCTa0T02dvHUFOg+wXLb0nb8V1xKDL+6cAJla7LzwoG8lTnvp5DSYCojI

+5JrILYafeO8RbBV2GWmaE5mkHgeBkFZ+qZQ7K0MjR30Yh6tajB7P3+F/Max8FUgW

+xLHr8AECgYEA2+o0ge3HtZcepEFBKKYnLTwoEpPCfLElWZHzUJYDz259s4JLsfak

+tROANFEdsJUjpmWG52MCL+bgKFFOedDkt4p1jgcIneaHk0jvoU11wG7W3jZZVy1q

+WjQNH5vDU+hg5tm/CREwm7lbUxR9Xuj9K63CNAAGp8KO7h2tOH8woIECgYEA0jrb

+LUg30RxO3+vrq9dUYohrDRisk5zKXuRLfxRA+E+ruvZ7CctG2OpM+658/qZM/w95

+7pOj6zz3//w7tAvH9erY+JOISnzaYKx04sYC1MfbFiFkq5j0gpuYm/MULDYNvFqr

+NU2Buj4dW+ZB+SeficsQOqm5QeNxh1kgiDCs7JECgYEAjSLGCAzeesA9vhTTCI95

+3SIaZbHGw9e8rLtqeHGOiHXU3nvksJYmJsAZK3pTn5xXgNbvuVhlcvCtM7LatntG

+DjUiNMB22z+0CuZoRBE+XP3FkF84/yX6d2Goenyw4wzkA8QDQoJxu789yRgBTgQh

+VwLw/AZ4PvoyWMdbAENApgECgYEAvFikosYP09XTyIPaKaOKY5iqqBoSC1GucSOB

+jAG+T3k5dxB6nQS0nYQUomvqak7drqnT6O33Lrr5ySrW5nCjnmvgJZwvv+Rp1bDM

+K5uRT8caPpJ+Wcp4TUdPi3BVA2MOHVDyEJg3AH/D1+DL/IgGQ/JcwOHsKt61iLhO

+EBXj5zECgYEAk+HuwksUPkSxg/AiJGbapGDK6XGymEUzo2duWlnofRqGcZ3NT3bB

+/kDI1KxQdlpODXSi4/BuTpbQiFOrzcEq5e5ytoMxlCHh3Fl3Jxl+JlgO21vAUvP6

+4SET7Q/6LxmfBlCVRg0dXDwcfJLgbnWxyvprIcz4e0FSFVZTBs/6tFk=

+-----END RSA PRIVATE KEY-----
+"""
+
+observer.ansible.run_template_ssh("test.yaml",
+                                  {"sliver_name": "onlab_test405-378",
+                                   "instance_id": "instance-0000004d",
+                                   "hostname": "node67.washington.vicci.org",
+                                   "private_key": private_key})
+
diff --git a/xos/observers/vcpe/run.sh b/xos/observers/vcpe/run.sh
new file mode 100755
index 0000000..f180e66
--- /dev/null
+++ b/xos/observers/vcpe/run.sh
@@ -0,0 +1,6 @@
+#if [[ ! -e ./vcpe-observer.py ]]; then
+#    ln -s ../../xos-observer.py vcpe-observer.py
+#fi
+
+export XOS_DIR=/opt/xos
+python vcpe-observer.py  -C $XOS_DIR/observers/vcpe/vcpe_observer_config
diff --git a/xos/observers/vcpe/start.sh b/xos/observers/vcpe/start.sh
new file mode 100755
index 0000000..b402e5d
--- /dev/null
+++ b/xos/observers/vcpe/start.sh
@@ -0,0 +1,6 @@
+#if [[ ! -e ./vcpe-observer.py ]]; then
+#    ln -s ../../xos-observer.py vcpe-observer.py
+#fi
+
+export XOS_DIR=/opt/xos
+nohup python vcpe-observer.py  -C $XOS_DIR/observers/vcpe/vcpe_observer_config > /dev/null 2>&1 &
diff --git a/xos/observers/vcpe/steps/ansible_test/inventory.txt b/xos/observers/vcpe/steps/ansible_test/inventory.txt
new file mode 100644
index 0000000..d5ff47e
--- /dev/null
+++ b/xos/observers/vcpe/steps/ansible_test/inventory.txt
@@ -0,0 +1,7 @@
+[onlab_hpc-355]
+node67.washington.vicci.org instance_id=instance-00000045 sliver_name=onlab_hpc-355
+
+[onlab_test405-372]
+node67.washington.vicci.org instance_id=instance-0000004c sliver_name=onlab_test405-372
+
+
diff --git a/xos/observers/vcpe/steps/ansible_test/test.sh b/xos/observers/vcpe/steps/ansible_test/test.sh
new file mode 100755
index 0000000..157ba9c
--- /dev/null
+++ b/xos/observers/vcpe/steps/ansible_test/test.sh
@@ -0,0 +1,2 @@
+#! /bin/bash
+ansible-playbook --private-key /home/smbaker/.ssh/id_rsa -i ./inventory.txt test.yaml
diff --git a/xos/observers/vcpe/steps/ansible_test/test.yaml b/xos/observers/vcpe/steps/ansible_test/test.yaml
new file mode 100644
index 0000000..6a29d56
--- /dev/null
+++ b/xos/observers/vcpe/steps/ansible_test/test.yaml
@@ -0,0 +1,12 @@
+---
+- hosts: onlab_test405-372
+  connection: xos
+  user: ubuntu
+  vars:
+     foo: 25
+#  instance_name: instance-00000045
+#  slice_name: onlab_hpc-355
+
+  tasks:
+    - name: foobar
+      shell: echo foo > /tmp/foobar
diff --git a/xos/observers/vcpe/steps/ansible_test/xos.py b/xos/observers/vcpe/steps/ansible_test/xos.py
new file mode 100755
index 0000000..3ef72ab
--- /dev/null
+++ b/xos/observers/vcpe/steps/ansible_test/xos.py
@@ -0,0 +1,444 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import re
+import subprocess
+import shlex
+import pipes
+import random
+import select
+import fcntl
+import hmac
+import pwd
+import gettext
+import pty
+from hashlib import sha1
+import ansible.constants as C
+from ansible.callbacks import vvv
+from ansible import errors
+from ansible import utils
+
+class Connection(object):
+    ''' ssh based connections '''
+
+    def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
+        self.runner = runner
+        self.host = host
+        self.ipv6 = ':' in self.host
+        self.port = port
+        self.user = str(user)
+        self.password = password
+        self.private_key_file = private_key_file
+        self.HASHED_KEY_MAGIC = "|1|"
+        self.has_pipelining = True
+        #self.instance_id = "instance-00000045" # C.get_config(C.p, "xos", "instance_id", "INSTANCE_ID", None)
+        #self.sliver_name = "onlab_hpc-355" # C.get_config(C.p, "xos", "sliver_name", "SLIVER_NAME", None)
+
+        inject={}
+        inject= utils.combine_vars(inject, self.runner.inventory.get_variables(self.host))
+
+        self.instance_id = inject["instance_id"]
+        self.sliver_name = inject["sliver_name"]
+
+        fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
+        self.cp_dir = utils.prepare_writeable_dir('$HOME/.ansible/cp',mode=0700)
+        fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
+
+    def connect(self):
+        ''' connect to the remote host '''
+
+        vvv("ESTABLISH CONNECTION FOR USER: %s" % self.user, host=self.host)
+
+        self.common_args = []
+        extra_args = C.ANSIBLE_SSH_ARGS
+        if extra_args is not None:
+            # make sure there is no empty string added as this can produce weird errors
+            self.common_args += [x.strip() for x in shlex.split(extra_args) if x.strip()]
+        else:
+            self.common_args += ["-o", "ControlMaster=auto",
+                                 "-o", "ControlPersist=60s",
+                                 "-o", "ControlPath=%s" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))]
+
+        self.common_args += ["-o", "ProxyCommand ssh -q -i %s %s@%s" % (self.private_key_file, self.instance_id, self.host)]
+
+        cp_in_use = False
+        cp_path_set = False
+        for arg in self.common_args:
+            if "ControlPersist" in arg:
+                cp_in_use = True
+            if "ControlPath" in arg:
+                cp_path_set = True
+
+        if cp_in_use and not cp_path_set:
+            self.common_args += ["-o", "ControlPath=%s" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))]
+
+        if not C.HOST_KEY_CHECKING:
+            self.common_args += ["-o", "StrictHostKeyChecking=no"]
+
+        if self.port is not None:
+            self.common_args += ["-o", "Port=%d" % (self.port)]
+        if self.private_key_file is not None:
+            self.common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.private_key_file)]
+        elif self.runner.private_key_file is not None:
+            self.common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.runner.private_key_file)]
+        if self.password:
+            self.common_args += ["-o", "GSSAPIAuthentication=no",
+                                 "-o", "PubkeyAuthentication=no"]
+        else:
+            self.common_args += ["-o", "KbdInteractiveAuthentication=no",
+                                 "-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
+                                 "-o", "PasswordAuthentication=no"]
+        if self.user != pwd.getpwuid(os.geteuid())[0]:
+            self.common_args += ["-o", "User="+self.user]
+        self.common_args += ["-o", "ConnectTimeout=%d" % self.runner.timeout]
+
+        return self
+
+    def _run(self, cmd, indata):
+        if indata:
+            # do not use pseudo-pty
+            p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
+                                     stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            stdin = p.stdin
+        else:
+            # try to use upseudo-pty
+            try:
+                # Make sure stdin is a proper (pseudo) pty to avoid: tcgetattr errors
+                master, slave = pty.openpty()
+                p = subprocess.Popen(cmd, stdin=slave,
+                                     stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+                stdin = os.fdopen(master, 'w', 0)
+                os.close(slave)
+            except:
+                p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
+                                     stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+                stdin = p.stdin
+
+        return (p, stdin)
+
+    def _password_cmd(self):
+        if self.password:
+            try:
+                p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE,
+                    stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+                p.communicate()
+            except OSError:
+                raise errors.AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program")
+            (self.rfd, self.wfd) = os.pipe()
+            return ["sshpass", "-d%d" % self.rfd]
+        return []
+
+    def _send_password(self):
+        if self.password:
+            os.close(self.rfd)
+            os.write(self.wfd, "%s\n" % self.password)
+            os.close(self.wfd)
+
+    def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None):
+        fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+        fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+        # We can't use p.communicate here because the ControlMaster may have stdout open as well
+        stdout = ''
+        stderr = ''
+        rpipes = [p.stdout, p.stderr]
+        if indata:
+            try:
+                stdin.write(indata)
+                stdin.close()
+            except:
+                raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
+        # Read stdout/stderr from process
+        while True:
+            rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
+
+            # fail early if the sudo/su password is wrong
+            if self.runner.sudo and sudoable:
+                if self.runner.sudo_pass:
+                    incorrect_password = gettext.dgettext(
+                        "sudo", "Sorry, try again.")
+                    if stdout.endswith("%s\r\n%s" % (incorrect_password,
+                                                     prompt)):
+                        raise errors.AnsibleError('Incorrect sudo password')
+
+                if stdout.endswith(prompt):
+                    raise errors.AnsibleError('Missing sudo password')
+
+            if self.runner.su and su and self.runner.su_pass:
+                incorrect_password = gettext.dgettext(
+                    "su", "Sorry")
+                if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
+                    raise errors.AnsibleError('Incorrect su password')
+
+            if p.stdout in rfd:
+                dat = os.read(p.stdout.fileno(), 9000)
+                stdout += dat
+                if dat == '':
+                    rpipes.remove(p.stdout)
+            if p.stderr in rfd:
+                dat = os.read(p.stderr.fileno(), 9000)
+                stderr += dat
+                if dat == '':
+                    rpipes.remove(p.stderr)
+            # only break out if no pipes are left to read or
+            # the pipes are completely read and
+            # the process is terminated
+            if (not rpipes or not rfd) and p.poll() is not None:
+                break
+            # No pipes are left to read but process is not yet terminated
+            # Only then it is safe to wait for the process to be finished
+            # NOTE: Actually p.poll() is always None here if rpipes is empty
+            elif not rpipes and p.poll() == None:
+                p.wait()
+                # The process is terminated. Since no pipes to read from are
+                # left, there is no need to call select() again.
+                break
+        # close stdin after process is terminated and stdout/stderr are read
+        # completely (see also issue #848)
+        stdin.close()
+        return (p.returncode, stdout, stderr)
+
+    def not_in_host_file(self, host):
+        if 'USER' in os.environ:
+            user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
+        else:
+            user_host_file = "~/.ssh/known_hosts"
+        user_host_file = os.path.expanduser(user_host_file)
+        
+        host_file_list = []
+        host_file_list.append(user_host_file)
+        host_file_list.append("/etc/ssh/ssh_known_hosts")
+        host_file_list.append("/etc/ssh/ssh_known_hosts2")
+        
+        hfiles_not_found = 0
+        for hf in host_file_list:
+            if not os.path.exists(hf):
+                hfiles_not_found += 1
+                continue
+            try:
+                host_fh = open(hf)
+            except IOError, e:
+                hfiles_not_found += 1
+                continue
+            else:
+                data = host_fh.read()
+                host_fh.close()
+                
+            for line in data.split("\n"):
+                if line is None or " " not in line:
+                    continue
+                tokens = line.split()
+                if tokens[0].find(self.HASHED_KEY_MAGIC) == 0:
+                    # this is a hashed known host entry
+                    try:
+                        (kn_salt,kn_host) = tokens[0][len(self.HASHED_KEY_MAGIC):].split("|",2)
+                        hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
+                        hash.update(host)
+                        if hash.digest() == kn_host.decode('base64'):
+                            return False
+                    except:
+                        # invalid hashed host key, skip it
+                        continue
+                else:
+                    # standard host file entry
+                    if host in tokens[0]:
+                        return False
+
+        if (hfiles_not_found == len(host_file_list)):
+            vvv("EXEC previous known host file not found for %s" % host)
+        return True
+
+    def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su_user=None, su=False):
+        ''' run a command on the remote host '''
+
+        ssh_cmd = self._password_cmd()
+        ssh_cmd += ["ssh", "-C"]
+        if not in_data:
+            # we can only use tty when we are not pipelining the modules. piping data into /usr/bin/python
+            # inside a tty automatically invokes the python interactive-mode but the modules are not
+            # compatible with the interactive-mode ("unexpected indent" mainly because of empty lines)
+            ssh_cmd += ["-tt"]
+        if utils.VERBOSITY > 3:
+            ssh_cmd += ["-vvv"]
+        else:
+            ssh_cmd += ["-q"]
+        ssh_cmd += self.common_args
+
+        if self.ipv6:
+            ssh_cmd += ['-6']
+        #ssh_cmd += [self.host]
+        ssh_cmd += [self.sliver_name]
+
+        if su and su_user:
+            sudocmd, prompt, success_key = utils.make_su_cmd(su_user, executable, cmd)
+            prompt_re = re.compile(prompt)
+            ssh_cmd.append(sudocmd)
+        elif not self.runner.sudo or not sudoable:
+            prompt = None
+            if executable:
+                ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd))
+            else:
+                ssh_cmd.append(cmd)
+        else:
+            sudocmd, prompt, success_key = utils.make_sudo_cmd(sudo_user, executable, cmd)
+            ssh_cmd.append(sudocmd)
+
+        vvv("EXEC %s" % ssh_cmd, host=self.host)
+
+        not_in_host_file = self.not_in_host_file(self.host)
+
+        if C.HOST_KEY_CHECKING and not_in_host_file:
+            # lock around the initial SSH connectivity so the user prompt about whether to add 
+            # the host to known hosts is not intermingled with multiprocess output.
+            fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
+            fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
+
+        # create process
+        (p, stdin) = self._run(ssh_cmd, in_data)
+
+        self._send_password()
+
+        if (self.runner.sudo and sudoable and self.runner.sudo_pass) or \
+                (self.runner.su and su and self.runner.su_pass):
+            # several cases are handled for sudo privileges with password
+            # * NOPASSWD (tty & no-tty): detect success_key on stdout
+            # * without NOPASSWD:
+            #   * detect prompt on stdout (tty)
+            #   * detect prompt on stderr (no-tty)
+            fcntl.fcntl(p.stdout, fcntl.F_SETFL,
+                        fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
+            fcntl.fcntl(p.stderr, fcntl.F_SETFL,
+                        fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
+            sudo_output = ''
+            sudo_errput = ''
+
+            while True:
+                if success_key in sudo_output or \
+                    (self.runner.sudo_pass and sudo_output.endswith(prompt)) or \
+                    (self.runner.su_pass and prompt_re.match(sudo_output)):
+                    break
+
+                rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
+                                              [p.stdout], self.runner.timeout)
+                if p.stderr in rfd:
+                    chunk = p.stderr.read()
+                    if not chunk:
+                        raise errors.AnsibleError('ssh connection closed waiting for sudo or su password prompt')
+                    sudo_errput += chunk
+                    incorrect_password = gettext.dgettext(
+                        "sudo", "Sorry, try again.")
+                    if sudo_errput.strip().endswith("%s%s" % (prompt, incorrect_password)):
+                        raise errors.AnsibleError('Incorrect sudo password')
+                    elif sudo_errput.endswith(prompt):
+                        stdin.write(self.runner.sudo_pass + '\n')
+
+                if p.stdout in rfd:
+                    chunk = p.stdout.read()
+                    if not chunk:
+                        raise errors.AnsibleError('ssh connection closed waiting for sudo or su password prompt')
+                    sudo_output += chunk
+
+                if not rfd:
+                    # timeout. wrap up process communication
+                    stdout = p.communicate()
+                    raise errors.AnsibleError('ssh connection error waiting for sudo or su password prompt')
+
+            if success_key not in sudo_output:
+                if sudoable:
+                    stdin.write(self.runner.sudo_pass + '\n')
+                elif su:
+                    stdin.write(self.runner.su_pass + '\n')
+
+        (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, su=su, sudoable=sudoable, prompt=prompt)
+
+        if C.HOST_KEY_CHECKING and not_in_host_file:
+            # lock around the initial SSH connectivity so the user prompt about whether to add 
+            # the host to known hosts is not intermingled with multiprocess output.
+            fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
+            fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
+        controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or \
+                              'unknown configuration option: ControlPersist' in stderr
+
+        if C.HOST_KEY_CHECKING:
+            if ssh_cmd[0] == "sshpass" and p.returncode == 6:
+                raise errors.AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this.  Please add this host\'s fingerprint to your known_hosts file to manage this host.')
+
+        if p.returncode != 0 and controlpersisterror:
+            raise errors.AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again')
+        if p.returncode == 255 and (in_data or self.runner.module_name == 'raw'):
+            raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
+
+        return (p.returncode, '', stdout, stderr)
+
+    def put_file(self, in_path, out_path):
+        ''' transfer a file from local to remote '''
+        vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
+        if not os.path.exists(in_path):
+            raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
+        cmd = self._password_cmd()
+
+        host = self.host
+        if self.ipv6:
+            host = '[%s]' % host
+
+        if C.DEFAULT_SCP_IF_SSH:
+            cmd += ["scp"] + self.common_args
+            cmd += [in_path,host + ":" + pipes.quote(out_path)]
+            indata = None
+        else:
+            cmd += ["sftp"] + self.common_args + [host]
+            indata = "put %s %s\n" % (pipes.quote(in_path), pipes.quote(out_path))
+
+        (p, stdin) = self._run(cmd, indata)
+
+        self._send_password()
+
+        (returncode, stdout, stderr) = self._communicate(p, stdin, indata)
+
+        if returncode != 0:
+            raise errors.AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr))
+
+    def fetch_file(self, in_path, out_path):
+        ''' fetch a file from remote to local '''
+        vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
+        cmd = self._password_cmd()
+
+        host = self.host
+        if self.ipv6:
+            host = '[%s]' % host
+
+        if C.DEFAULT_SCP_IF_SSH:
+            cmd += ["scp"] + self.common_args
+            cmd += [host + ":" + in_path, out_path]
+            indata = None
+        else:
+            cmd += ["sftp"] + self.common_args + [host]
+            indata = "get %s %s\n" % (in_path, out_path)
+
+        p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
+                             stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        self._send_password()
+        stdout, stderr = p.communicate(indata)
+
+        if p.returncode != 0:
+            raise errors.AnsibleError("failed to transfer file from %s:\n%s\n%s" % (in_path, stdout, stderr))
+
+    def close(self):
+        ''' not applicable since we're executing openssh binaries '''
+        pass
+
diff --git a/xos/observers/vcpe/steps/sync_vcpetenant.py b/xos/observers/vcpe/steps/sync_vcpetenant.py
new file mode 100644
index 0000000..3fc9310
--- /dev/null
+++ b/xos/observers/vcpe/steps/sync_vcpetenant.py
@@ -0,0 +1,116 @@
+import os
+import socket
+import sys
+import base64
+from django.db.models import F, Q
+from xos.config import Config
+from observer.syncstep import SyncStep
+from observer.ansible import run_template_ssh
+from core.models import Service
+from cord.models import VCPEService, VCPETenant, VOLTTenant
+from hpc.models import HpcService, CDNPrefix
+from util.logger import Logger, logging
+
+# hpclibrary will be in steps/..
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+logger = Logger(level=logging.INFO)
+
+class SyncVCPETenant(SyncStep):
+    provides=[VCPETenant]
+    observes=VCPETenant
+    requested_interval=0
+    template_name = "sync_vcpetenant.yaml"
+    service_key_name = "/opt/xos/observers/vcpe/vcpe_private_key"
+
+    def __init__(self, **args):
+        SyncStep.__init__(self, **args)
+
+    def defer_sync(self, o, reason):
+        o.backend_register="{}"
+        o.backend_status = "2 - " + reason
+        o.save(update_fields=['enacted','backend_status','backend_register'])
+        logger.info("defer object %s due to %s" % (str(o), reason))
+
+    def fetch_pending(self, deleted):
+        if (not deleted):
+            objs = VCPETenant.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
+        else:
+            objs = VCPETenant.get_deleted_tenant_objects()
+
+        return objs
+
+    def get_extra_attributes(self, o):
+        # This is a place to include extra attributes that aren't part of the
+        # object itself. In our case, it's handy to know the VLAN IDs when
+        # configuring the VCPE.
+
+        dnsdemux_ip = "none"
+        for service in HpcService.objects.all():
+            for slice in service.slices.all():
+                if "dnsdemux" in slice.name:
+                    for sliver in slice.slivers.all():
+                        if dnsdemux_ip=="none":
+                            try:
+                                dnsdemux_ip = socket.gethostbyname(sliver.node.name)
+                            except:
+                                pass
+
+        cdn_prefixes = []
+        for prefix in CDNPrefix.objects.all():
+            cdn_prefixes.append(prefix.prefix)
+
+        volts = [x for x in VOLTTenant.get_tenant_objects() if (x.vcpe is not None) and (x.vcpe.id==o.id)]
+        vlan_ids = [x.vlan_id for x in volts]
+        return {"vlan_ids": vlan_ids,
+                "dnsdemux_ip": dnsdemux_ip,
+                "cdn_prefixes": cdn_prefixes}
+
+    def get_sliver(self, o):
+        # We need to know what slivers is associated with the object.
+        # For vCPE this is easy, as VCPETenant has a sliver field.
+
+        return o.sliver
+
+    def sync_record(self, o):
+        logger.info("sync'ing VCPETenant %s" % str(o))
+
+        sliver = self.get_sliver(o)
+        if not sliver:
+            self.defer_sync(o, "waiting on sliver")
+            return
+
+        service = o.sliver.slice.service
+        if not service:
+            # Ansible uses the service's keypair in order to SSH into the
+            # instance. It would be bad if the slice had no service.
+
+            raise Exception("Slice %s is not associated with a service" % sliver.slice.name)
+
+        if not os.path.exists(self.service_key_name):
+            raise Exception("Service key %s does not exist" % self.service_key_name)
+
+        service_key = file(self.service_key_name).read()
+
+        fields = { "sliver_name": sliver.name,
+                   "hostname": sliver.node.name,
+                   "instance_id": sliver.instance_id,
+                   "private_key": service_key,
+                 }
+
+        if hasattr(o, "sync_attributes"):
+            for attribute_name in o.sync_attributes:
+                 fields[attribute_name] = getattr(o, attribute_name)
+
+        fields.update(self.get_extra_attributes(o))
+
+        print fields
+
+        run_template_ssh(self.template_name, fields)
+
+        o.save()
+
+    def delete_record(self, m):
+        pass
+
diff --git a/xos/observers/vcpe/steps/sync_vcpetenant.yaml b/xos/observers/vcpe/steps/sync_vcpetenant.yaml
new file mode 100644
index 0000000..6c2bad4
--- /dev/null
+++ b/xos/observers/vcpe/steps/sync_vcpetenant.yaml
@@ -0,0 +1,35 @@
+---
+- hosts: {{ sliver_name }}
+  connection: ssh
+  user: ubuntu
+  sudo: yes
+  vars:
+      cdn_enable: {{ cdn_enable }}
+      dnsdemux_ip: {{ dnsdemux_ip }}
+      firewall_enable: {{ firewall_enable }}
+      url_filter_enable: {{ url_filter_enable }}
+      vlan_ids:
+        {% for vlan_id in vlan_ids %}
+        - {{ vlan_id }}
+        {% endfor %}
+      firewall_rules:
+        {% for firewall_rule in firewall_rules.split("\n") %}
+        - {{ firewall_rule }}
+        {% endfor %}
+      cdn_prefixes:
+        {% for prefix in cdn_prefixes %}
+        - {{ prefix }}
+        {% endfor %}
+
+  tasks:
+    - name: make sure /etc/dnsmasq.d exists
+      file: path=/etc/dnsmasq.d state=directory owner=root group=root
+   
+    - name: configure dnsmasq servers
+      template: src=/opt/xos/observers/vcpe/templates/dnsmasq_servers.j2 dest=/etc/dnsmasq.d/servers.conf owner=root group=root
+
+    - name: setup networking
+      template: src=/opt/xos/observers/vcpe/templates/vlan_sample.j2 dest=/etc/vlan_sample owner=root group=root
+
+    - name: setup firewall
+      template: src=/opt/xos/observers/vcpe/templates/firewall_sample.j2 dest=/etc/firewall_sample owner=root group=root
diff --git a/xos/observers/vcpe/steps/test.yaml b/xos/observers/vcpe/steps/test.yaml
new file mode 100644
index 0000000..dbda94d
--- /dev/null
+++ b/xos/observers/vcpe/steps/test.yaml
@@ -0,0 +1,7 @@
+---
+- hosts: {{ sliver_name }}
+  connection: ssh
+  user: ubuntu
+  tasks:
+    - name: foobar
+      shell: echo foo > /tmp/foobar
diff --git a/xos/observers/vcpe/stop.sh b/xos/observers/vcpe/stop.sh
new file mode 100755
index 0000000..e90e16c
--- /dev/null
+++ b/xos/observers/vcpe/stop.sh
@@ -0,0 +1 @@
+pkill -9 -f vcpe-observer.py
diff --git a/xos/observers/vcpe/supervisor/vcpe-observer.conf b/xos/observers/vcpe/supervisor/vcpe-observer.conf
new file mode 100644
index 0000000..27d2796
--- /dev/null
+++ b/xos/observers/vcpe/supervisor/vcpe-observer.conf
@@ -0,0 +1,2 @@
+[program:vcpe-observer]
+command=python /opt/xos/observers/vcpe/vcpe-observer.py -C /opt/xos/observers/vcpe/vcpe_observer_config
diff --git a/xos/observers/vcpe/templates/dnsmasq_servers.j2 b/xos/observers/vcpe/templates/dnsmasq_servers.j2
new file mode 100644
index 0000000..359070a
--- /dev/null
+++ b/xos/observers/vcpe/templates/dnsmasq_servers.j2
@@ -0,0 +1,17 @@
+# This file autogenerated by vCPE observer
+# It contains a list of DNS servers for dnsmasq to use.
+
+{% if cdn_enable %}
+# CDN 
+{% for prefix in cdn_prefixes %}
+server=/{{ prefix }}/{{ dnsdemux_ip }}
+{% endfor %}
+{% endif %}
+
+{% if url_filter_enable %}
+# placeholder; figure out what to really use...
+server=dns.xerocole.com
+{% else %}
+# use google's DNS service
+server=8.8.8.8
+{% endif %}
diff --git a/xos/observers/vcpe/templates/firewall_sample.j2 b/xos/observers/vcpe/templates/firewall_sample.j2
new file mode 100644
index 0000000..ce85e68
--- /dev/null
+++ b/xos/observers/vcpe/templates/firewall_sample.j2
@@ -0,0 +1,5 @@
+firewall_enable = {{ firewall_enable }}
+
+{% for firewall_rule in firewall_rules %}
+{{ firewall_rule }}
+{% endfor %}
diff --git a/xos/observers/vcpe/templates/vlan_sample.j2 b/xos/observers/vcpe/templates/vlan_sample.j2
new file mode 100644
index 0000000..a26c840
--- /dev/null
+++ b/xos/observers/vcpe/templates/vlan_sample.j2
@@ -0,0 +1,5 @@
+# below is a list of all vlan_ids associated with this vcpe
+
+{% for vlan_id in vlan_ids %}
+{{ vlan_id }}
+{% endfor %}
diff --git a/xos/observers/vcpe/vcpe-observer.py b/xos/observers/vcpe/vcpe-observer.py
new file mode 100755
index 0000000..d6a71ff
--- /dev/null
+++ b/xos/observers/vcpe/vcpe-observer.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# This imports and runs ../../xos-observer.py
+
+import importlib
+import os
+import sys
+observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../..")
+sys.path.append(observer_path)
+mod = importlib.import_module("xos-observer")
+mod.main()
diff --git a/xos/hpc_observer/hpc_observer_config b/xos/observers/vcpe/vcpe_observer_config
similarity index 66%
copy from xos/hpc_observer/hpc_observer_config
copy to xos/observers/vcpe/vcpe_observer_config
index 0e7e53a..c6500ae 100644
--- a/xos/hpc_observer/hpc_observer_config
+++ b/xos/observers/vcpe/vcpe_observer_config
@@ -22,13 +22,15 @@
 nova_enabled=True
 
 [observer]
-name=hpc
-dependency_graph=/opt/xos/hpc_observer/model-deps
-steps_dir=/opt/xos/hpc_observer/steps
-deleters_dir=/opt/xos/hpc_observer/deleters
+name=vcpe
+dependency_graph=/opt/xos/observers/vcpe/model-deps
+steps_dir=/opt/xos/observers/vcpe/steps
+sys_dir=/opt/xos/observer/vcpe/sys
+deleters_dir=/opt/xos/observers/vcpe/deleters
 log_file=console
 #/var/log/hpc.log
 driver=None
+pretend=False
 
 [feefie]
 client_id='vicci_dev_central'
diff --git a/xos/openstack_observer/ansible.py b/xos/openstack_observer/ansible.py
index 4466cb3..17504b3 100644
--- a/xos/openstack_observer/ansible.py
+++ b/xos/openstack_observer/ansible.py
@@ -7,6 +7,8 @@
 import string
 import random
 import re
+import traceback
+import subprocess
 from xos.config import Config, XOS_DIR
 
 try:
@@ -47,7 +49,7 @@
 def shellquote(s):
     return "'" + s.replace("'", "'\\''") + "'"
 
-def run_template(name, opts,path='', expected_num=None):
+def run_template(name, opts,path='', expected_num=None, ansible_config=None, ansible_hosts=None, run_ansible_script=None):
     template = os_template_env.get_template(name)
     buffer = template.render(opts)
 
@@ -64,25 +66,35 @@
     f.write(buffer)
     f.flush()
 
-    
+    # This is messy -- there's no way to specify ansible config file from
+    # the command line, but we can specify it using the environment.
+    env = os.environ.copy()
+    if ansible_config:
+       env["ANSIBLE_CONFIG"] = ansible_config
+    if ansible_hosts:
+       env["ANSIBLE_HOSTS"] = ansible_hosts
+
     if (not Config().observer_pretend):
-        run = os.popen(XOS_DIR + '/observer/run_ansible %s'%shellquote(fqp))
+        if not run_ansible_script:
+            run_ansible_script = os.path.join(XOS_DIR, "observer/run_ansible")
+
+        #run = os.popen(XOS_DIR + '/observer/run_ansible %s'%shellquote(fqp), env=env)
+        run = subprocess.Popen("%s %s" % (run_ansible_script, shellquote(fqp)), shell=True, stdout=subprocess.PIPE, env=env).stdout
         msg = run.read()
         status = run.close()
 
         
     else:
         msg = open(fqp+'.out').read()
-        
+
     try:
         ok_results = parse_output(msg)
-        if (len(ok_results) != expected_num):
-            raise ValueError('Unexpected num')
+        if (expected_num is not None) and (len(ok_results) != expected_num):
+            raise ValueError('Unexpected num %s!=%d' % (str(expected_num), len(ok_results)) )
     except ValueError,e:
-        all_fatal = re.findall(r'^msg: (.*)',msg,re.MULTILINE)
+        all_fatal = [e.message] + re.findall(r'^msg: (.*)',msg,re.MULTILINE)
         all_fatal2 = re.findall(r'^ERROR: (.*)',msg,re.MULTILINE)
 
-
         all_fatal.extend(all_fatal2)
         try:
             error = ' // '.join(all_fatal)
@@ -92,6 +104,44 @@
 
     return ok_results
 
+def run_template_ssh(name, opts, path='', expected_num=None):
+    instance_id = opts["instance_id"]
+    sliver_name = opts["sliver_name"]
+    hostname = opts["hostname"]
+    private_key = opts["private_key"]
+
+    (private_key_handle, private_key_pathname) = tempfile.mkstemp()
+    (config_handle, config_pathname) = tempfile.mkstemp()
+    (hosts_handle, hosts_pathname) = tempfile.mkstemp()
+
+    try:
+        proxy_command = "ProxyCommand ssh -q -i %s -o StrictHostKeyChecking=no %s@%s" % (private_key_pathname, instance_id, hostname)
+
+        os.write(private_key_handle, private_key)
+        os.close(private_key_handle)
+
+        os.write(config_handle, "[ssh_connection]\n")
+        os.write(config_handle, 'ssh_args = -o "%s" -o StrictHostKeyChecking=no\n' % proxy_command)
+        os.write(config_handle, 'scp_if_ssh = True\n')
+        os.close(config_handle)
+
+        os.write(hosts_handle, "[%s]\n" % sliver_name)
+        os.write(hosts_handle, "%s ansible_ssh_private_key_file=%s\n" % (hostname, private_key_pathname))
+        os.close(hosts_handle)
+
+        print "ANSIBLE_CONFIG=%s" % config_pathname
+        print "ANSIBLE_HOSTS=%s" % hosts_pathname
+
+        return run_template(name, opts, path, expected_num, ansible_config = config_pathname, ansible_hosts = hosts_pathname, run_ansible_script="/opt/xos/observer/run_ansible_verbose")
+
+    finally:
+        #os.remove(private_key_pathname)
+        #os.remove(config_pathname)
+        #os.remove(hosts_pathname)
+        pass
+
+
+
 def main():
     run_template('ansible/sync_user_deployments.yaml',{ "endpoint" : "http://172.31.38.128:5000/v2.0/",
              "name" : "Sapan Bhatia",
diff --git a/xos/openstack_observer/backend.py b/xos/openstack_observer/backend.py
index 48dae2e..5657491 100644
--- a/xos/openstack_observer/backend.py
+++ b/xos/openstack_observer/backend.py
@@ -1,3 +1,5 @@
+import os
+import sys
 import threading
 import time
 from observer.event_loop import XOSObserver
@@ -22,6 +24,7 @@
             model_policy_thread = threading.Thread(target=run_policy)
             model_policy_thread.start()
         else:
+            model_policy_thread = None
             print "Skipping model policies thread for service observer."
 
 
@@ -29,3 +32,15 @@
         #event_manager = EventListener(wake_up=observer.wake_up)
         #event_manager_thread = threading.Thread(target=event_manager.run)
         #event_manager_thread.start()
+
+        while True:
+            try:
+                time.sleep(1000)
+            except KeyboardInterrupt:
+                print "exiting due to keyboard interrupt"
+                # TODO: See about setting the threads as daemons
+                observer_thread._Thread__stop()
+                if model_policy_thread:
+                    model_policy_thread._Thread__stop()
+                sys.exit(1)
+
diff --git a/xos/openstack_observer/event_loop.py b/xos/openstack_observer/event_loop.py
index 04b5c97..a63ff3c 100644
--- a/xos/openstack_observer/event_loop.py
+++ b/xos/openstack_observer/event_loop.py
@@ -193,7 +193,8 @@
 		pp.pprint(step_graph)
 		self.ordered_steps = toposort(self.dependency_graph, map(lambda s:s.__name__,self.sync_steps))
 		#self.ordered_steps = ['SyncRoles', 'SyncControllerSites', 'SyncControllerSitePrivileges','SyncImages', 'SyncControllerImages','SyncControllerUsers','SyncControllerUserSitePrivileges','SyncControllerSlices', 'SyncControllerSlicePrivileges', 'SyncControllerUserSlicePrivileges', 'SyncControllerNetworks','SyncSlivers']
-		#self.ordered_steps = ['SyncControllerSites','SyncControllerUsers','SyncControllerSlices','SyncControllerNetworks']
+		#self.ordered_steps = ['SyncControllerSites','SyncRoles','SyncControllerUsers','SyncControllerSlices','SyncControllerNetworks']
+		#self.ordered_steps = ['SyncControllerNetworks']
 		#self.ordered_steps = ['SyncSlivers','SyncNetworkSlivers']
 
 		print "Order of steps=",self.ordered_steps
diff --git a/xos/openstack_observer/run_ansible_verbose b/xos/openstack_observer/run_ansible_verbose
new file mode 100755
index 0000000..d17cad7
--- /dev/null
+++ b/xos/openstack_observer/run_ansible_verbose
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+source /opt/ansible/hacking/env-setup >> /dev/null
+ansible-playbook -vvv "$@"
diff --git a/xos/openstack_observer/steps/sync_controller_images.py b/xos/openstack_observer/steps/sync_controller_images.py
index 19009dd..94b18a0 100644
--- a/xos/openstack_observer/steps/sync_controller_images.py
+++ b/xos/openstack_observer/steps/sync_controller_images.py
@@ -8,6 +8,7 @@
 from core.models import Image, ControllerImages
 from util.logger import observer_logger as logger 
 from observer.ansible import *
+import json
 
 class SyncControllerImages(OpenStackSyncStep):
     provides=[ControllerImages]
@@ -23,6 +24,11 @@
 
     def sync_record(self, controller_image):
         logger.info("Working on image %s on controller %s" % (controller_image.image.name, controller_image.controller))
+
+	controller_register = json.loads(controller_image.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_image.controller.name)
+
         image_fields = {'endpoint':controller_image.controller.auth_url,
                         'admin_user':controller_image.controller.admin_user,
                         'admin_password':controller_image.controller.admin_password,
diff --git a/xos/openstack_observer/steps/sync_controller_networks.py b/xos/openstack_observer/steps/sync_controller_networks.py
index 8866e53..d327b7b 100644
--- a/xos/openstack_observer/steps/sync_controller_networks.py
+++ b/xos/openstack_observer/steps/sync_controller_networks.py
@@ -11,6 +11,7 @@
 from util.logger import observer_logger as logger
 from observer.ansible import *
 from openstack.driver import OpenStackDriver
+import json
 
 import pdb
 
@@ -44,7 +45,8 @@
                     'name':network_name,
                     'subnet_name':subnet_name,
                     'ansible_tag':'%s-%s@%s'%(network_name,slice.slicename,controller_network.controller.name),
-                    'cidr':cidr
+                    'cidr':cidr,
+                    'delete':False	
                     }
 
         res = run_template('sync_controller_networks.yaml', network_fields, path = 'controller_networks',expected_num=2)
@@ -61,6 +63,10 @@
     def sync_record(self, controller_network):
         logger.info("sync'ing network controller %s for network %s slice %s controller %s" % (controller_network, controller_network.network, str(controller_network.network.owner), controller_network.controller))
 
+	controller_register = json.loads(controller_network.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_network.controller.name)
+
         if not controller_network.controller.admin_user:
             logger.info("controller %r has no admin_user, skipping" % controller_network.controller)
             return
@@ -70,6 +76,32 @@
 	    logger.info("saved network controller: %s" % (controller_network))
 
     def delete_record(self, controller_network):
+	controller_register = json.loads(controller_network.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_network.controller.name)
+
+	try:
+        	slice = controller_network.network.owner # XXX: FIXME!!
+        except:
+                raise Exception('Could not get slice for Network %s'%controller_network.network.name)
+
+	network_name = controller_network.network.name
+        subnet_name = '%s-%d'%(network_name,controller_network.pk)
+	cidr = controller_network.subnet
+	network_fields = {'endpoint':controller_network.controller.auth_url,
+                    'admin_user':slice.creator.email, # XXX: FIXME
+                    'tenant_name':slice.name, # XXX: FIXME
+                    'admin_password':slice.creator.remote_password,
+                    'name':network_name,
+                    'subnet_name':subnet_name,
+                    'ansible_tag':'%s-%s@%s'%(network_name,slice.slicename,controller_network.controller.name),
+                    'cidr':cidr,
+		    'delete':True	
+                    }
+
+        res = run_template('sync_controller_networks.yaml', network_fields, path = 'controller_networks',expected_num=1)
+
+	"""
         driver = OpenStackDriver().client_driver(caller=controller_network.network.owner.creator,
                                                  tenant=controller_network.network.owner.name,
                                                  controller=controller_network.controller.name)
@@ -81,3 +113,4 @@
             driver.delete_router(controller_network.router_id)
         if controller_network.net_id:
             driver.delete_network(controller_network.net_id)
+	"""
diff --git a/xos/openstack_observer/steps/sync_controller_networks.yaml b/xos/openstack_observer/steps/sync_controller_networks.yaml
index 8f0d4c1..6754c47 100644
--- a/xos/openstack_observer/steps/sync_controller_networks.yaml
+++ b/xos/openstack_observer/steps/sync_controller_networks.yaml
@@ -15,6 +15,7 @@
         state=present
         {% endif %}
         shared=true
+  {% if not delete %}
   - quantum_subnet:
         auth_url={{ endpoint }} 
         login_username={{ admin_user }}
@@ -30,3 +31,4 @@
         no_gateway=true 
         cidr={{ cidr }}
         {% endif %}
+  {% endif %}
diff --git a/xos/openstack_observer/steps/sync_controller_site_privileges.py b/xos/openstack_observer/steps/sync_controller_site_privileges.py
index 499a0ff..6a13736 100644
--- a/xos/openstack_observer/steps/sync_controller_site_privileges.py
+++ b/xos/openstack_observer/steps/sync_controller_site_privileges.py
@@ -9,6 +9,7 @@
 from core.models.controlleruser import ControllerUser, ControllerSitePrivilege
 from util.logger import observer_logger as logger
 from observer.ansible import *
+import json
 
 class SyncControllerSitePrivileges(OpenStackSyncStep):
     provides=[SitePrivilege]
@@ -25,6 +26,11 @@
     def sync_record(self, controller_site_privilege):
         logger.info("sync'ing controler_site_privilege %s at controller %s" % (controller_site_privilege, controller_site_privilege.controller))
 
+	controller_register = json.loads(controller_site_privilege.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_site_privilege.controller.name)
+
+
         if not controller_site_privilege.controller.admin_user:
             logger.info("controller %r has no admin_user, skipping" % controller_site_privilege.controller)
             return
@@ -68,6 +74,10 @@
             controller_site_privilege.save()
 
     def delete_record(self, controller_site_privilege):
+	controller_register = json.loads(controller_site_privilege.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_site_privilege.controller.name)
+
         if controller_site_privilege.role_id:
             driver = self.driver.admin_driver(controller=controller_site_privilege.controller)
             user = ControllerUser.objects.get(
diff --git a/xos/openstack_observer/steps/sync_controller_sites.py b/xos/openstack_observer/steps/sync_controller_sites.py
index acb6ba9..f101315 100644
--- a/xos/openstack_observer/steps/sync_controller_sites.py
+++ b/xos/openstack_observer/steps/sync_controller_sites.py
@@ -6,6 +6,7 @@
 from core.models.site import *
 from observer.ansible import *
 from util.logger import observer_logger as logger
+import json
 
 class SyncControllerSites(OpenStackSyncStep):
     requested_interval=0
@@ -17,6 +18,10 @@
         return pending.filter(controller__isnull=False)
 
     def sync_record(self, controller_site):
+	controller_register = json.loads(controller_site.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_site.controller.name)
+
 	template = os_template_env.get_template('sync_controller_sites.yaml')
 	tenant_fields = {'endpoint':controller_site.controller.auth_url,
 		         'admin_user': controller_site.controller.admin_user,
@@ -34,6 +39,10 @@
         controller_site.save()
             
     def delete_record(self, controller_site):
+	controller_register = json.loads(controller_site.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_site.controller.name)
+
 	if controller_site.tenant_id:
             driver = self.driver.admin_driver(controller=controller_site.controller)
             driver.delete_tenant(controller_site.tenant_id)
diff --git a/xos/openstack_observer/steps/sync_controller_slice_privileges.py b/xos/openstack_observer/steps/sync_controller_slice_privileges.py
index f1600ca..38f23c2 100644
--- a/xos/openstack_observer/steps/sync_controller_slice_privileges.py
+++ b/xos/openstack_observer/steps/sync_controller_slice_privileges.py
@@ -9,6 +9,7 @@
 from core.models.controlleruser import ControllerUser, ControllerSlicePrivilege
 from observer.ansible import *
 from util.logger import observer_logger as logger
+import json
 
 class SyncControllerSlicePrivileges(OpenStackSyncStep):
     provides=[SlicePrivilege]
@@ -25,6 +26,10 @@
     def sync_record(self, controller_slice_privilege):
         logger.info("sync'ing controler_slice_privilege %s at controller %s" % (controller_slice_privilege, controller_slice_privilege.controller))
 
+	controller_register = json.loads(controller_slice_privilege.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_slice_privilege.controller.name)
+
         if not controller_slice_privilege.controller.admin_user:
             logger.info("controller %r has no admin_user, skipping" % controller_slice_privilege.controller)
             return
@@ -68,6 +73,10 @@
             controller_slice_privilege.save()
 
     def delete_record(self, controller_slice_privilege):
+	controller_register = json.loads(controller_slice_privilege.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_slice_privilege.controller.name)
+
         if controller_slice_privilege.role_id:
             driver = self.driver.admin_driver(controller=controller_slice_privilege.controller)
             user = ControllerUser.objects.get(
diff --git a/xos/openstack_observer/steps/sync_controller_slices.py b/xos/openstack_observer/steps/sync_controller_slices.py
index 591a1b6..8d4a5e0 100644
--- a/xos/openstack_observer/steps/sync_controller_slices.py
+++ b/xos/openstack_observer/steps/sync_controller_slices.py
@@ -5,12 +5,11 @@
 from django.db.models import F, Q
 from xos.config import Config
 from observer.openstacksyncstep import OpenStackSyncStep
-from core.models import User
-from core.models.slice import Slice, ControllerSlice
-from core.models.controlleruser import ControllerUser
+from core.models import *
 from observer.ansible import *
 from openstack.driver import OpenStackDriver
 from util.logger import observer_logger as logger
+import json
 
 class SyncControllerSlices(OpenStackSyncStep):
     provides=[Slice]
@@ -26,6 +25,10 @@
     def sync_record(self, controller_slice):
         logger.info("sync'ing slice controller %s" % controller_slice)
 
+        controller_register = json.loads(controller_slice.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+            raise Exception('Controller %s is disabled'%controller_slice.controller.name)
+
         if not controller_slice.controller.admin_user:
             logger.info("controller %r has no admin_user, skipping" % controller_slice.controller)
             return
@@ -55,31 +58,38 @@
         tenant_id = res[0]['id']
         if (not controller_slice.tenant_id):
             try:
-                    driver = OpenStackDriver().admin_driver(controller=controller_slice.controller)
-                    driver.shell.nova.quotas.update(tenant_id=controller_slice.tenant_id, instances=int(controller_slice.slice.max_slivers))
+                driver = OpenStackDriver().admin_driver(controller=controller_slice.controller)
+                driver.shell.nova.quotas.update(tenant_id=controller_slice.tenant_id, instances=int(controller_slice.slice.max_slivers))
             except:
-                    logger.log_exc('Could not update quota for %s'%controller_slice.slice.name)
-                    raise Exception('Could not update quota for %s'%controller_slice.slice.name)
-                
+                logger.log_exc('Could not update quota for %s'%controller_slice.slice.name)
+                raise Exception('Could not update quota for %s'%controller_slice.slice.name)
+
             controller_slice.tenant_id = tenant_id
             controller_slice.backend_status = '1 - OK'
             controller_slice.save()
 
 
     def delete_record(self, controller_slice):
-        user = User.objects.get(id=controller_slice.slice.creator.id)
-        driver = OpenStackDriver().admin_driver(controller=controller_slice.controller)
-        client_driver = driver.client_driver(caller=user,
-                                             tenant=controller_slice.slice.name,
-                                             controller=controller_slice.controller)
+        controller_register = json.loads(controller_slice.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+            raise Exception('Controller %s is disabled'%controller_slice.controller.name)
 
-        if controller_slice.router_id and controller_slice.subnet_id:
-            client_driver.delete_router_interface(controller_slice.router_id, controller_slice.subnet_id)
-        if controller_slice.subnet_id:
-            client_driver.delete_subnet(controller_slice.subnet_id)
-        if controller_slice.router_id:
-            client_driver.delete_router(controller_slice.router_id)
-        if controller_slice.network_id:
-            client_driver.delete_network(controller_slice.network_id)
-        if controller_slice.tenant_id:
-            driver.delete_tenant(controller_slice.tenant_id)
+        controller_users = ControllerUser.objects.filter(user=controller_slice.slice.creator,
+                                                              controller=controller_slice.controller)
+        if not controller_users:
+            raise Exception("slice createor %s has not accout at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name))
+        else:
+            controller_user = controller_users[0]
+
+        tenant_fields = {'endpoint':controller_slice.controller.auth_url,
+                          'admin_user': controller_slice.controller.admin_user,
+                          'admin_password': controller_slice.controller.admin_password,
+                          'admin_tenant': 'admin',
+                          'tenant': controller_slice.slice.name,
+                          'tenant_description': controller_slice.slice.description,
+                          'name':controller_user.user.email,
+                          'ansible_tag':'%s@%s'%(controller_slice.slice.name,controller_slice.controller.name),
+                          'delete': True}
+
+        expected_num = 1
+        run_template('sync_controller_slices.yaml', tenant_fields, path='controller_slices', expected_num=expected_num)
diff --git a/xos/openstack_observer/steps/sync_controller_slices.yaml b/xos/openstack_observer/steps/sync_controller_slices.yaml
index de1caf4..380f001 100644
--- a/xos/openstack_observer/steps/sync_controller_slices.yaml
+++ b/xos/openstack_observer/steps/sync_controller_slices.yaml
@@ -2,7 +2,11 @@
 - hosts: 127.0.0.1
   connection: local
   tasks:
+  {% if delete %}
+  - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}" state=absent
+  {% else %}	
   - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}"
   {% for role in roles %}
   - keystone_user: endpoint={{ endpoint}} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} user="{{ name }}" role={{ role }} tenant={{ tenant }}
   {% endfor %}
+  {% endif %} 
diff --git a/xos/openstack_observer/steps/sync_controller_users.py b/xos/openstack_observer/steps/sync_controller_users.py
index acb3050..47d1096 100644
--- a/xos/openstack_observer/steps/sync_controller_users.py
+++ b/xos/openstack_observer/steps/sync_controller_users.py
@@ -9,6 +9,7 @@
 from core.models.controlleruser import ControllerUser
 from observer.ansible import *
 from util.logger import observer_logger as logger
+import json
 
 class SyncControllerUsers(OpenStackSyncStep):
     provides=[User]
@@ -25,6 +26,10 @@
     def sync_record(self, controller_user):
         logger.info("sync'ing user %s at controller %s" % (controller_user.user, controller_user.controller))
 
+	controller_register = json.loads(controller_user.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_user.controller.name)
+
         if not controller_user.controller.admin_user:
             logger.info("controller %r has no admin_user, skipping" % controller_user.controller)
             return
@@ -72,6 +77,10 @@
             controller_user.save()
 
     def delete_record(self, controller_user):
+	controller_register = json.loads(controller_user.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%controller_user.controller.name)
+
         if controller_user.kuser_id:
             driver = self.driver.admin_driver(controller=controller_user.controller)
             driver.delete_user(controller_user.kuser_id)
diff --git a/xos/openstack_observer/steps/sync_slivers.py b/xos/openstack_observer/steps/sync_slivers.py
index de17791..9b5dd99 100644
--- a/xos/openstack_observer/steps/sync_slivers.py
+++ b/xos/openstack_observer/steps/sync_slivers.py
@@ -29,6 +29,10 @@
 
     def sync_record(self, sliver):
         logger.info("sync'ing sliver:%s slice:%s controller:%s " % (sliver, sliver.slice.name, sliver.node.site_deployment.controller))
+        controller_register = json.loads(sliver.node.site_deployment.controller.backend_register)
+
+        if (controller_register.get('disabled',False)):
+            raise Exception('Controller %s is disabled'%sliver.node.site_deployment.controller.name)
 
         metadata_update = {}
         if (sliver.numberCores):
@@ -47,6 +51,9 @@
         if sliver.slice.creator.public_key:
             pubkeys.add(sliver.slice.creator.public_key)
 
+        if sliver.slice.service and sliver.slice.service.public_key:
+            pubkeys.add(sliver.slice.service.public_key)
+
         nics = []
         networks = [ns.network for ns in NetworkSlice.objects.filter(slice=sliver.slice)]
         controller_networks = ControllerNetwork.objects.filter(network__in=networks,
@@ -54,8 +61,10 @@
 
         for controller_network in controller_networks:
             if controller_network.network.template.visibility == 'private' and \
-               controller_network.network.template.translation == 'none' and controller_network.net_id:
-                nics.append(controller_network.net_id)
+               controller_network.network.template.translation == 'none':
+                   if not controller_network.net_id:
+                        raise Exception("Private Network %s has no id; Try again later" % controller_network.network.name)
+                   nics.append(controller_network.net_id)
 
         # now include network template
         network_templates = [network.template.shared_network_name for network in networks \
@@ -73,16 +82,20 @@
                 if net['name']=='public':
                     nics.append(net['id'])
 
-        # look up image id
-        if (not sliver.image.id):
+        image_id = None
+        controller_images = sliver.image.controllerimages.filter(controller=sliver.node.site_deployment.controller)
+        if controller_images:
+            image_id = controller_images[0].glance_image_id
+            logger.info("using image_id from ControllerImage object: " + str(image_id))
+
+        if image_id is None:
             controller_driver = self.driver.admin_driver(controller=sliver.node.site_deployment.controller)
             image_id = None
             images = controller_driver.shell.glanceclient.images.list()
             for image in images:
                 if image.name == sliver.image.name or not image_id:
                     image_id = image.id
-        else:
-            image_id = sliver.image.id
+                    logger.info("using image_id from glance: " + str(image_id))
 
         try:
             legacy = Config().observer_legacy
@@ -134,6 +147,11 @@
         sliver.save()
 
     def delete_record(self, sliver):
+        controller_register = json.loads(sliver.node.site_deployment.controller.backend_register)
+
+        if (controller_register.get('disabled',False)):
+            raise Exception('Controller %s is disabled'%sliver.node.site_deployment.controller.name)
+
         sliver_name = '%s-%d'%(sliver.slice.name,sliver.id)
         controller = sliver.node.site_deployment.controller
         tenant_fields = {'endpoint':controller.auth_url,
@@ -147,12 +165,12 @@
                      'delete': True}
 
         try:
-               res = run_template('sync_slivers.yaml', tenant_fields,path='slivers', expected_num=1)

+            res = run_template('sync_slivers.yaml', tenant_fields,path='slivers', expected_num=1)

         except Exception,e:

-               print "Could not sync %s"%sliver_name

-               #import traceback

-               #traceback.print_exc()

-               raise e

+            print "Could not sync %s"%sliver_name

+            #import traceback

+            #traceback.print_exc()

+            raise e

 
         if (len(res)!=1):
             raise Exception('Could not delete sliver %s'%sliver.slice.name)
diff --git a/xos/openstack_observer/syncstep.py b/xos/openstack_observer/syncstep.py
index a4d591d..b752760 100644
--- a/xos/openstack_observer/syncstep.py
+++ b/xos/openstack_observer/syncstep.py
@@ -108,7 +108,6 @@
                     next_run = scratchpad['next_run']
                     if (not backoff_disabled and next_run>time.time()):
                         sync_failed = True
-                        print "BACKING OFF, exponent = %d"%scratchpad['exponent']
             except:
                 pass
 
@@ -176,5 +175,11 @@
 
         return failed
 
+    def sync_record(self, o):
+        return
+
+    def delete_record(self, o):
+        return
+
     def __call__(self, **args):
         return self.call(**args)
diff --git a/xos/scripts/opencloud b/xos/scripts/opencloud
index 4a82c03..94edc46 100755
--- a/xos/scripts/opencloud
+++ b/xos/scripts/opencloud
@@ -135,6 +135,7 @@
     python ./manage.py makemigrations hpc
     python ./manage.py makemigrations requestrouter
     python ./manage.py makemigrations syndicate_storage
+    python ./manage.py makemigrations cord
     #python ./manage.py makemigrations servcomp
 }
 
diff --git a/xos/tools/ansible_hosts.py b/xos/tools/ansible_hosts.py
index e17edf6..0cdb809 100644
--- a/xos/tools/ansible_hosts.py
+++ b/xos/tools/ansible_hosts.py
@@ -13,6 +13,8 @@
 
 NODES_API = REST_API + "nodes/"
 SITES_API = REST_API + "sites/"
+SLICES_API = REST_API + "slices/"
+SLIVERS_API = REST_API + "sliver/"
 
 def get_nodes_by_site():
     r = requests.get(SITES_API + "?no_hyperlinks=1", auth=opencloud_auth)
@@ -31,6 +33,38 @@
 
     return sites
 
+"""
+   WIP
+
+def get_nodes_by_slice():
+    r = requests.get(SLICES_API + "?no_hyperlinks=1", auth=opencloud_auth)
+    sites_list = r.json()
+    slices = {}
+    for slice in slices_list:
+        slice["hostnames"] = []
+        slices[str(slices["id"])] = slice
+
+    r = requests.get(NODES_API + "?no_hyperlinks=1", auth=opencloud_auth)
+    nodes_list = r.json()
+    nodes = {}
+    for node in nodes_list:
+        nodes[str(nodes["id"])] = node
+
+    r = requests.get(SLIVERS_API + "?no_hyperlinks=1", auth=opencloud_auth)
+    slivers = r.json()
+    for slivers in nodes:
+        if sliver["node"] not in nodes:
+            continue
+        if sliver["slice"] not in slices:
+            continue
+
+        hostname = nodes[sliver["node"]].name
+
+        slices[sliver["slice"]]["hostnames"].append(hostname)
+
+    return slices
+"""
+
 def main():
     global opencloud_auth
 
diff --git a/xos/tools/cleanup_unique.py b/xos/tools/cleanup_unique.py
new file mode 100644
index 0000000..29fb047
--- /dev/null
+++ b/xos/tools/cleanup_unique.py
@@ -0,0 +1,101 @@
+import os
+import sys
+sys.path.append("/opt/xos")
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
+import django
+from core.models import *
+from hpc.models import *
+from cord.models import *
+django.setup()
+
+for obj in ControllerNetwork.deleted_objects.all():
+    print "Purging deleted object", obj
+    obj.delete(purge=True)
+
+for obj in ControllerSite.deleted_objects.all():
+    print "Purging deleted object", obj
+    obj.delete(purge=True)
+
+for obj in ControllerSlice.deleted_objects.all():
+    print "Purging deleted object", obj
+    obj.delete(purge=True)
+
+for obj in NetworkSlice.deleted_objects.all():
+    print "Purging deleted object", obj
+    obj.delete(purge=True)
+
+for obj in NetworkSliver.deleted_objects.all():
+    print "Purging deleted object", obj
+    obj.delete(purge=True)
+
+for obj in DeploymentPrivilege.deleted_objects.all():
+    print "Purging deleted object", obj
+    obj.delete(purge=True)
+
+for obj in SiteDeployment.deleted_objects.all():
+    print "Purging deleted object", obj
+    obj.delete(purge=True)
+
+seen=[]
+for obj in ControllerNetwork.objects.all():
+     seen.append(obj.id)
+     conflicts = ControllerNetwork.objects.filter(network=obj.network, controller=obj.controller)
+     for conflict in conflicts:
+         if conflict.id not in seen:
+             print "Purging", conflict, conflict.id, "due to duplicate of", obj.id
+             conflict.delete(purge=True)
+
+seen=[]
+for obj in NetworkSlice.objects.all():
+     seen.append(obj.id)
+     conflicts = NetworkSlice.objects.filter(network=obj.network, slice=obj.slice)
+     for conflict in conflicts:
+         if conflict.id not in seen:
+             print "Purging", conflict, conflict.id, "due to duplicate of", obj.id
+             conflict.delete(purge=True)
+
+seen=[]
+for obj in NetworkSliver.objects.all():
+     seen.append(obj.id)
+     conflicts = NetworkSliver.objects.filter(network=obj.network, sliver=obj.sliver)
+     for conflict in conflicts:
+         if conflict.id not in seen:
+             print "Purging", conflict, conflict.id, "due to duplicate of", obj.id
+             conflict.delete(purge=True)
+
+seen=[]
+for obj in DeploymentPrivilege.objects.all():
+     seen.append(obj.id)
+     conflicts = DeploymentPrivilege.objects.filter(user=obj.user, deployment=obj.deployment, role=obj.role)
+     for conflict in conflicts:
+         if conflict.id not in seen:
+             print "Purging", conflict, conflict.id, "due to duplicate of", obj.id
+             conflict.delete(purge=True)
+
+seen=[]
+for obj in SiteDeployment.objects.all():
+     seen.append(obj.id)
+     conflicts = SiteDeployment.objects.filter(site=obj.site, deployment=obj.deployment, controller=obj.controller)
+     for conflict in conflicts:
+         if conflict.id not in seen:
+             print "Purging", conflict, conflict.id, "due to duplicate of", obj.id
+             conflict.delete(purge=True)
+
+seen=[]
+for obj in ControllerSite.objects.all():
+     seen.append(obj.id)
+     conflicts = ControllerSite.objects.filter(site=obj.site, controller=obj.controller)
+     for conflict in conflicts:
+         if conflict.id not in seen:
+             print "Purging", conflict, conflict.id, "due to duplicate of", obj.id
+             conflict.delete(purge=True)
+
+seen=[]
+for obj in ControllerSlice.objects.all():
+     seen.append(obj.id)
+     conflicts = ControllerSlice.objects.filter(slice=obj.slice, controller=obj.controller)
+     for conflict in conflicts:
+         if conflict.id not in seen:
+             print "Purging", conflict, conflict.id, "due to duplicate of", obj.id
+             conflict.delete(purge=True)
+
diff --git a/xos/xos/exceptions.py b/xos/xos/exceptions.py
index c13de49..9ce38a3 100644
--- a/xos/xos/exceptions.py
+++ b/xos/xos/exceptions.py
@@ -20,6 +20,13 @@
                             "specific_error": why,
                             "fields": fields})
 
+class XOSNotFound(RestFrameworkPermissionDenied):
+    status_code=404
+    def __init__(self, why="object not found", fields={}):
+        APIException.__init__(self, {"error": "XOSNotFound",
+                            "specific_error": why,
+                            "fields": fields})
+
 class XOSValidationError(APIException):
     status_code=403
     def __init__(self, why="validation error", fields={}):
diff --git a/xos/xos/settings.py b/xos/xos/settings.py
index ba47d72..26ebbf2 100644
--- a/xos/xos/settings.py
+++ b/xos/xos/settings.py
@@ -145,6 +145,7 @@
     'django_evolution',
     'core',
     'hpc',
+    'cord',
     'requestrouter',
 #    'urlfilter',
 #    'servcomp',
@@ -257,7 +258,7 @@
     }
 }
 
-RESTAPI_HOSTNAME = getattr(config, "server_hostname", socket.gethostname())
+RESTAPI_HOSTNAME = getattr(config, "server_restapihostname", getattr(config, "server_hostname", socket.gethostname()))
 RESTAPI_PORT = int(getattr(config, "server_port", "8000"))
 
 BIGQUERY_TABLE = getattr(config, "bigquery_table", "demoevents")
diff --git a/xos/xos/urls.py b/xos/xos/urls.py
index 7f7f5bc..0adf32d 100644
--- a/xos/xos/urls.py
+++ b/xos/xos/urls.py
@@ -7,7 +7,7 @@
 from xosapi import *
 
 from core.views.legacyapi import LegacyXMLRPC
-from core.views.services import ServiceGridView
+from core.views.services import ServiceGridView, ServiceGraphView
 #from core.views.analytics import AnalyticsAjaxView
 from core.models import *
 from rest_framework import generics
@@ -28,6 +28,7 @@
     url(r'^stats', 'core.views.stats.Stats', name='stats'),
     url(r'^observer', 'core.views.observer.Observer', name='observer'),
     url(r'^serviceGrid', ServiceGridView.as_view(), name='serviceGrid'),
+    url(r'^serviceGraph.png', ServiceGraphView.as_view(), name='serviceGraph'),
     url(r'^hpcConfig', 'core.views.hpc_config.HpcConfig', name='hpcConfig'),
 
     url(r'^docs/', include('rest_framework_swagger.urls')),