Merge branch 'master' of github.com:open-cloud/xos
diff --git a/xos/observers/__init__.py b/xos/observers/__init__.py
deleted file mode 100644
index 8b13789..0000000
--- a/xos/observers/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/xos/observers/base/SyncInstanceUsingAnsible.py b/xos/observers/base/SyncInstanceUsingAnsible.py
deleted file mode 100644
index aafbd85..0000000
--- a/xos/observers/base/SyncInstanceUsingAnsible.py
+++ /dev/null
@@ -1,221 +0,0 @@
-import hashlib
-import os
-import socket
-import sys
-import base64
-import time
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from synchronizers.base.ansible import run_template_ssh
-from core.models import Service, Slice, ControllerSlice, ControllerUser
-from util.logger import Logger, logging
-
-logger = Logger(level=logging.INFO)
-
-class SyncInstanceUsingAnsible(SyncStep):
-    # All of the following should be defined for classes derived from this
-    # base class. Examples below use VCPETenant.
-
-    # provides=[VCPETenant]
-    # observes=VCPETenant
-    # requested_interval=0
-    # template_name = "sync_vcpetenant.yaml"
-    # service_key_name = "/opt/xos/observers/vcpe/vcpe_private_key"
-
-    def __init__(self, **args):
-        SyncStep.__init__(self, **args)
-
-    def defer_sync(self, o, reason):
-        logger.info("defer object %s due to %s" % (str(o), reason))
-        raise Exception("defer object %s due to %s" % (str(o), reason))
-
-    def get_extra_attributes(self, o):
-        # This is a place to include extra attributes that aren't part of the
-        # object itself.
-
-        return {}
-
-    def get_instance(self, o):
-        # We need to know what instance is associated with the object. Let's
-        # assume 'o' has a field called 'instance'. If the field is called
-        # something else, or if custom logic is needed, then override this
-        # method.
-
-        return o.instance
-
-    def run_playbook(self, o, fields, template_name=None):
-        if not template_name:
-            template_name = self.template_name
-        tStart = time.time()
-        run_template_ssh(template_name, fields)
-        logger.info("playbook execution time %d" % int(time.time()-tStart))
-
-    def pre_sync_hook(self, o, fields):
-        pass
-
-    def post_sync_hook(self, o, fields):
-        pass
-
-    def sync_fields(self, o, fields):
-        self.run_playbook(o, fields)
-
-    def prepare_record(self, o):
-        pass
-
-    def get_node(self,o):
-        return o.node
-
-    def get_node_key(self, node):
-        return "/root/setup/node_key"
-
-    def get_ansible_fields(self, instance):
-        # return all of the fields that tell Ansible how to talk to the context
-        # that's setting up the container.
-
-        if (instance.isolation == "vm"):
-            # legacy where container was configured by sync_vcpetenant.py
-
-            fields = { "instance_name": instance.name,
-                       "hostname": instance.node.name,
-                       "instance_id": instance.instance_id,
-                       "username": "ubuntu",
-                     }
-            key_name = self.service_key_name
-        elif (instance.isolation == "container"):
-            # container on bare metal
-            node = self.get_node(instance)
-            hostname = node.name
-            fields = { "hostname": hostname,
-                       "baremetal_ssh": True,
-                       "instance_name": "rootcontext",
-                       "username": "root",
-                       "container_name": "%s-%s" % (instance.slice.name, str(instance.id))
-                     }
-            key_name = self.get_node_key(node)
-        else:
-            # container in a VM
-            if not instance.parent:
-                raise Exception("Container-in-VM has no parent")
-            if not instance.parent.instance_id:
-                raise Exception("Container-in-VM parent is not yet instantiated")
-            if not instance.parent.slice.service:
-                raise Exception("Container-in-VM parent has no service")
-            if not instance.parent.slice.service.private_key_fn:
-                raise Exception("Container-in-VM parent service has no private_key_fn")
-            fields = { "hostname": instance.parent.node.name,
-                       "instance_name": instance.parent.name,
-                       "instance_id": instance.parent.instance_id,
-                       "username": "ubuntu",
-                       "nat_ip": instance.parent.get_ssh_ip(),
-                       "container_name": "%s-%s" % (instance.slice.name, str(instance.id))
-                         }
-            key_name = instance.parent.slice.service.private_key_fn
-
-        if not os.path.exists(key_name):
-            raise Exception("Node key %s does not exist" % key_name)
-
-        key = file(key_name).read()
-
-        fields["private_key"] = key
-
-        # now the ceilometer stuff
-
-        cslice = ControllerSlice.objects.get(slice=instance.slice)
-        if not cslice:
-            raise Exception("Controller slice object for %s does not exist" % instance.slice.name)
-
-        cuser = ControllerUser.objects.get(user=instance.creator)
-        if not cuser:
-            raise Exception("Controller user object for %s does not exist" % instance.creator)
-
-        fields.update({"keystone_tenant_id": cslice.tenant_id,
-                       "keystone_user_id": cuser.kuser_id,
-                       "rabbit_user": instance.controller.rabbit_user,
-                       "rabbit_password": instance.controller.rabbit_password,
-                       "rabbit_host": instance.controller.rabbit_host})
-
-        return fields
-
-    def sync_record(self, o):
-        logger.info("sync'ing object %s" % str(o))
-
-        self.prepare_record(o)
-
-        instance = self.get_instance(o)
-
-        if isinstance(instance, basestring):
-            # sync to some external host
-
-            # XXX - this probably needs more work...
-
-            fields = { "hostname": instance,
-                       "instance_id": "ubuntu",     # this is the username to log into
-                       "private_key": service.key,
-                     }
-        else:
-            # sync to an XOS instance
-            if not instance:
-                self.defer_sync(o, "waiting on instance")
-                return
-
-            if not instance.instance_name:
-                self.defer_sync(o, "waiting on instance.instance_name")
-                return
-
-            fields = self.get_ansible_fields(instance)
-
-            fields["ansible_tag"] =  o.__class__.__name__ + "_" + str(o.id)
-
-        # If 'o' defines a 'sync_attributes' list, then we'll copy those
-        # attributes into the Ansible recipe's field list automatically.
-        if hasattr(o, "sync_attributes"):
-            for attribute_name in o.sync_attributes:
-                fields[attribute_name] = getattr(o, attribute_name)
-
-        fields.update(self.get_extra_attributes(o))
-
-        self.sync_fields(o, fields)
-
-        o.save()
-
-    def delete_record(self, o):
-        try:
-            controller = o.get_controller()
-            controller_register = json.loads(o.node.site_deployment.controller.backend_register)
-
-            if (controller_register.get('disabled',False)):
-                raise InnocuousException('Controller %s is disabled'%o.node.site_deployment.controller.name)
-        except AttributeError:
-            pass
-
-        instance = self.get_instance(o)
-        if isinstance(instance, basestring):
-            # sync to some external host
-
-            # XXX - this probably needs more work...
-
-            fields = { "hostname": instance,
-                       "instance_id": "ubuntu",     # this is the username to log into
-                       "private_key": service.key,
-                     }
-        else:
-            # sync to an XOS instance
-            fields = self.get_ansible_fields(instance)
-
-            fields["ansible_tag"] =  o.__class__.__name__ + "_" + str(o.id)
-
-        # If 'o' defines a 'sync_attributes' list, then we'll copy those
-        # attributes into the Ansible recipe's field list automatically.
-        if hasattr(o, "sync_attributes"):
-            for attribute_name in o.sync_attributes:
-                fields[attribute_name] = getattr(o, attribute_name)
-
-        fields.update(self.map_delete_inputs(o))
-
-        fields['delete']=True
-        res = self.run_playbook(o,fields)
-        try:
-                self.map_delete_outputs(o,res)
-        except AttributeError:
-                pass
diff --git a/xos/observers/base/SyncSliverUsingAnsible.py b/xos/observers/base/SyncSliverUsingAnsible.py
deleted file mode 100644
index a76b300..0000000
--- a/xos/observers/base/SyncSliverUsingAnsible.py
+++ /dev/null
@@ -1,95 +0,0 @@
-import hashlib
-import os
-import socket
-import sys
-import base64
-import time
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from synchronizers.base.ansible import run_template_ssh
-from core.models import Service, Slice
-from util.logger import Logger, logging
-
-logger = Logger(level=logging.INFO)
-
-class SyncInstanceUsingAnsible(SyncStep):
-    # All of the following should be defined for classes derived from this
-    # base class. Examples below use VCPETenant.
-
-    # provides=[VCPETenant]
-    # observes=VCPETenant
-    # requested_interval=0
-    # template_name = "sync_vcpetenant.yaml"
-    # service_key_name = "/opt/xos/observers/vcpe/vcpe_private_key"
-
-    def __init__(self, **args):
-        SyncStep.__init__(self, **args)
-
-    def defer_sync(self, o, reason):
-        logger.info("defer object %s due to %s" % (str(o), reason))
-        raise Exception("defer object %s due to %s" % (str(o), reason))
-
-    def get_extra_attributes(self, o):
-        # This is a place to include extra attributes that aren't part of the
-        # object itself.
-
-        return {}
-
-    def get_instance(self, o):
-        # We need to know what instance is associated with the object. Let's
-        # assume 'o' has a field called 'instance'. If the field is called
-        # something else, or if custom logic is needed, then override this
-        # method.
-
-        return o.instance
-
-    def run_playbook(self, o, fields):
-        tStart = time.time()
-        run_template_ssh(self.template_name, fields)
-        logger.info("playbook execution time %d" % int(time.time()-tStart))
-
-    def pre_sync_hook(self, o, fields):
-        pass
-
-    def post_sync_hook(self, o, fields):
-        pass
-
-    def sync_fields(self, o, fields):
-        self.run_playbook(o, fields)
-
-    def sync_record(self, o):
-        logger.info("sync'ing object %s" % str(o))
-
-        instance = self.get_instance(o)
-        if not instance:
-            self.defer_sync(o, "waiting on instance")
-            return
-
-        if not os.path.exists(self.service_key_name):
-            raise Exception("Service key %s does not exist" % self.service_key_name)
-
-        service_key = file(self.service_key_name).read()
-
-        fields = { "instance_name": instance.name,
-                   "hostname": instance.node.name,
-                   "instance_id": instance.instance_id,
-                   "private_key": service_key,
-                   "ansible_tag": "vcpe_tenant_" + str(o.id)
-                 }
-
-        # If 'o' defines a 'sync_attributes' list, then we'll copy those
-        # attributes into the Ansible recipe's field list automatically.
-        if hasattr(o, "sync_attributes"):
-            for attribute_name in o.sync_attributes:
-                fields[attribute_name] = getattr(o, attribute_name)
-
-        fields.update(self.get_extra_attributes(o))
-
-        self.sync_fields(o, fields)
-
-        o.save()
-
-    def delete_record(self, m):
-        pass
-
diff --git a/xos/observers/base/__init__.py b/xos/observers/base/__init__.py
deleted file mode 100644
index 8b13789..0000000
--- a/xos/observers/base/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/xos/observers/helloworld/helloworld-observer.py b/xos/observers/helloworld/helloworld-observer.py
deleted file mode 100755
index d6a71ff..0000000
--- a/xos/observers/helloworld/helloworld-observer.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-
-# This imports and runs ../../xos-observer.py
-
-import importlib
-import os
-import sys
-observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../..")
-sys.path.append(observer_path)
-mod = importlib.import_module("xos-observer")
-mod.main()
diff --git a/xos/observers/helloworld/helloworld_config b/xos/observers/helloworld/helloworld_config
deleted file mode 100644
index e32ee0c..0000000
--- a/xos/observers/helloworld/helloworld_config
+++ /dev/null
@@ -1,47 +0,0 @@
-[plc]
-name=plc
-deployment=plc
-
-[db]
-name=xos
-user=postgres
-password=password
-host=localhost
-port=5432
-
-[api]
-host=localhost
-port=8000
-ssl_key=None
-ssl_cert=None
-ca_ssl_cert=None
-ratelimit_enabled=0
-omf_enabled=0
-mail_support_address=support@localhost
-nova_enabled=True
-logfile=/var/log/xos.log
-
-[nova]
-admin_user=admin@domain.com
-admin_password=admin
-admin_tenant=admin
-url=http://localhost:5000/v2.0/
-default_image=None
-default_flavor=m1.small
-default_security_group=default
-ca_ssl_cert=/etc/ssl/certs/ca-certificates.crt
-
-[observer]
-pretend=False
-backoff_disabled=False
-images_directory=/opt/xos/images
-dependency_graph=/opt/xos/model-deps
-logfile=/var/log/xos_backend.log
-steps_dir=/opt/xos/observers/helloworld/steps
-applist=helloworld
-
-[gui]
-disable_minidashboard=True
-#branding_name=CORD
-#branding_css=/static/cord.css
-#branding_icon=/static/onos-logo.png
diff --git a/xos/observers/helloworld/model-deps b/xos/observers/helloworld/model-deps
deleted file mode 100644
index 63188f0..0000000
--- a/xos/observers/helloworld/model-deps
+++ /dev/null
@@ -1,19 +0,0 @@
-{
-    "OriginServer": [
-        "ContentProvider"
-    ], 
-    "ContentProvider": [
-        "ServiceProvider"
-    ], 
-    "CDNPrefix": [
-        "ContentProvider"
-    ], 
-    "AccessMap": [
-        "ContentProvider"
-    ], 
-    "SiteMap": [
-        "ContentProvider", 
-        "ServiceProvider", 
-        "CDNPrefix"
-    ]
-}
diff --git a/xos/observers/helloworld/nohup.out b/xos/observers/helloworld/nohup.out
deleted file mode 100644
index 74072c6..0000000
--- a/xos/observers/helloworld/nohup.out
+++ /dev/null
@@ -1 +0,0 @@
-python: can't open file 'helloworld-observer.py': [Errno 2] No such file or directory
diff --git a/xos/observers/helloworld/run.sh b/xos/observers/helloworld/run.sh
deleted file mode 100755
index f56ffe3..0000000
--- a/xos/observers/helloworld/run.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#if [[ ! -e ./hpc-backend.py ]]; then
-#    ln -s ../xos-observer.py hpc-backend.py
-#fi
-
-export XOS_DIR=/opt/xos
-python helloworld-observer.py  -C $XOS_DIR/observers/helloworld/helloworld_config
diff --git a/xos/observers/helloworld/start.sh b/xos/observers/helloworld/start.sh
deleted file mode 100755
index 89240cd..0000000
--- a/xos/observers/helloworld/start.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-export XOS_DIR=/opt/xos
-
-echo $XOS_DIR/observers/helloworld/helloworld_config
-python helloworld-observer.py -C $XOS_DIR/observers/helloworld/helloworld_config
diff --git a/xos/observers/helloworld/steps/sync_hello.py b/xos/observers/helloworld/steps/sync_hello.py
deleted file mode 100644
index 82600e7..0000000
--- a/xos/observers/helloworld/steps/sync_hello.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import os
-import sys
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from services.helloworld.models import Hello,World
-from util.logger import Logger, logging
-
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-logger = Logger(level=logging.INFO)
-
-class SyncHello(SyncStep):
-    provides=[Hello]
-    observes=Hello
-    requested_interval=0
-    
-    def sync_record(self, record):
-        instance = record.instance_backref        
-        instance.userData="packages:\n  - apache2\nruncmd:\n  - update-rc.d apache2 enable\n  - service apache2 start\nwrite_files:\n-   content: Hello %s\n    path: /var/www/html/hello.txt"%record.name
-        instance.save()
-        
-    def delete_record(self, m):
-        return
diff --git a/xos/observers/helloworld/steps/sync_world.py b/xos/observers/helloworld/steps/sync_world.py
deleted file mode 100644
index 0a121df..0000000
--- a/xos/observers/helloworld/steps/sync_world.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import os
-import sys
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from services.helloworld.models import Hello,World
-from util.logger import Logger, logging
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-logger = Logger(level=logging.INFO)
-
-class SyncWorld(SyncStep):
-    provides=[World]
-    observes=World
-    requested_interval=0
-    
-    def sync_record(self, record):
-        open('/tmp/hello-synchronizer','w').write(record.name)	
-        
-    def delete_record(self, m):
-        return
diff --git a/xos/observers/helloworld/stop.sh b/xos/observers/helloworld/stop.sh
deleted file mode 100755
index a0b4a8e..0000000
--- a/xos/observers/helloworld/stop.sh
+++ /dev/null
@@ -1 +0,0 @@
-pkill -9 -f hpc-observer.py
diff --git a/xos/observers/helloworldservice_complete/helloworldservice-observer.py b/xos/observers/helloworldservice_complete/helloworldservice-observer.py
deleted file mode 100755
index 75dcc46..0000000
--- a/xos/observers/helloworldservice_complete/helloworldservice-observer.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-
-# This imports and runs ../../xos-observer.py
-# Runs the standard XOS observer
-
-import importlib
-import os
-import sys
-observer_path = os.path.join(os.path.dirname(
-    os.path.realpath(__file__)), "../..")
-sys.path.append(observer_path)
-mod = importlib.import_module("xos-observer")
-mod.main()
diff --git a/xos/observers/helloworldservice_complete/helloworldservice_config b/xos/observers/helloworldservice_complete/helloworldservice_config
deleted file mode 100644
index 716e3a0..0000000
--- a/xos/observers/helloworldservice_complete/helloworldservice_config
+++ /dev/null
@@ -1,36 +0,0 @@
-# Required by XOS
-[db]
-name=xos
-user=postgres
-password=password
-host=localhost
-port=5432
-
-# Required by XOS
-[api]
-nova_enabled=True
-
-# Sets options for the observer
-[observer]
-# Optional name
-name=helloworldservice
-# This is the location to the dependency graph you generate
-dependency_graph=/opt/xos/observers/helloworldservice_complete/model-deps
-# The location of your SyncSteps
-steps_dir=/opt/xos/observers/helloworldservice_complete/steps
-# A temporary directory that will be used by ansible
-sys_dir=/opt/xos/observers/helloworldservice_complete/sys
-# Location of the file to save logging messages to the backend log is often used
-logfile=/var/log/xos_backend.log
-# If this option is true, then nothing will change, we simply pretend to run
-pretend=False
-# If this is False then XOS will use an exponential backoff when the observer
-# fails, since we will be waiting for an instance, we don't want this.
-backoff_disabled=True
-# We want the output from ansible to be logged
-save_ansible_output=True
-# This determines how we SSH to a client, if this is set to True then we try
-# to ssh using the instance name as a proxy, if this is disabled we ssh using
-# the NAT IP of the instance. On CloudLab the first option will fail so we must
-# set this to False
-proxy_ssh=False
diff --git a/xos/observers/helloworldservice_complete/model-deps b/xos/observers/helloworldservice_complete/model-deps
deleted file mode 100644
index 0967ef4..0000000
--- a/xos/observers/helloworldservice_complete/model-deps
+++ /dev/null
@@ -1 +0,0 @@
-{}
diff --git a/xos/observers/helloworldservice_complete/run.sh b/xos/observers/helloworldservice_complete/run.sh
deleted file mode 100755
index 6bce079..0000000
--- a/xos/observers/helloworldservice_complete/run.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-# Runs the XOS observer using helloworldservice_config
-export XOS_DIR=/opt/xos
-python helloworldservice-observer.py  -C $XOS_DIR/observers/helloworldservice_complete/helloworldservice_config
diff --git a/xos/observers/helloworldservice_complete/steps/sync_helloworldtenant.py b/xos/observers/helloworldservice_complete/steps/sync_helloworldtenant.py
deleted file mode 100644
index 5604296..0000000
--- a/xos/observers/helloworldservice_complete/steps/sync_helloworldtenant.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import os
-import sys
-from django.db.models import Q, F
-from services.helloworldservice_complete.models import HelloWorldServiceComplete, HelloWorldTenantComplete
-from observers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
-
-parentdir = os.path.join(os.path.dirname(__file__), "..")
-sys.path.insert(0, parentdir)
-
-# Class to define how we sync a tenant. Using SyncInstanceUsingAnsible we
-# indicate where the find the YAML for ansible, where to find the SSH key,
-# and the logic for determining what tenant needs updating, what additional
-# attributes are needed, and how to delete an instance.
-class SyncHelloWorldTenantComplete(SyncInstanceUsingAnsible):
-    # Indicates the position in the data model, this will run when XOS needs to
-    # enact a HelloWorldTenantComplete
-    provides = [HelloWorldTenantComplete]
-    # The actual model being enacted, usually the same as provides.
-    observes = HelloWorldTenantComplete
-    # Number of milliseconds between interruptions of the observer
-    requested_interval = 0
-    # The ansible template to run
-    template_name = "sync_helloworldtenant.yaml"
-    # The location of the SSH private key to use when ansible connects to
-    # instances.
-    service_key_name = "/opt/xos/observers/helloworldservice_complete/helloworldservice_private_key"
-
-    def __init__(self, *args, **kwargs):
-        super(SyncHelloWorldTenantComplete, self).__init__(*args, **kwargs)
-
-    # Defines the logic for determining what HelloWorldTenantCompletes need to be
-    # enacted.
-    def fetch_pending(self, deleted):
-        # If the update is not a deletion, then we get all of the instnaces that
-        # have been updated or have not been enacted.
-        if (not deleted):
-            objs = HelloWorldTenantComplete.get_tenant_objects().filter(
-                Q(enacted__lt=F('updated')) | Q(enacted=None), Q(lazy_blocked=False))
-        else:
-            # If this is a deletion we get all of the deleted tenants..
-            objs = HelloWorldTenantComplete.get_deleted_tenant_objects()
-
-        return objs
-
-    # Gets the attributes that are used by the Ansible template but are not
-    # part of the set of default attributes.
-    def get_extra_attributes(self, o):
-        return {"display_message": o.display_message}
diff --git a/xos/observers/helloworldservice_complete/steps/sync_helloworldtenant.yaml b/xos/observers/helloworldservice_complete/steps/sync_helloworldtenant.yaml
deleted file mode 100644
index 719c75f..0000000
--- a/xos/observers/helloworldservice_complete/steps/sync_helloworldtenant.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- hosts: {{ instance_name }}
-  gather_facts: False
-  connection: ssh
-  user: ubuntu
-  sudo: yes
-  tasks:
-  - name: install apache
-    apt: name=apache2 state=present update_cache=yes
-
-  - name: write message
-    shell: echo "{{ display_message }}" > /var/www/html/index.html
-
-  - name: stop apache
-    service: name=apache2 state=stopped
-
-  - name: start apache
-    service: name=apache2 state=started
diff --git a/xos/observers/helloworldservice_complete/stop.sh b/xos/observers/helloworldservice_complete/stop.sh
deleted file mode 100755
index 76e68d9..0000000
--- a/xos/observers/helloworldservice_complete/stop.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-# Kill the observer
-pkill -9 -f helloworldservice-observer.py
diff --git a/xos/observers/hpc/Makefile b/xos/observers/hpc/Makefile
deleted file mode 100644
index 4a03bd4..0000000
--- a/xos/observers/hpc/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-hpcobserver.tar.gz:
-	rm -rf BUILD/hpc_observer
-	mkdir -p BUILD/hpc_observer
-	mkdir -p BUILD/hpc_observer/steps
-	mkdir -p BUILD/hpc_observer/deleters
-	cp hpclib.py hpc_observer_config run.sh start.sh  stop.sh BUILD/hpc_observer/
-	cp steps/*.py BUILD/hpc_observer/steps/
-	cp deleters/*.py BUILD/hpc_observer/deleters/
-	mkdir -p TARS
-	cd BUILD; tar -czf ../TARS/hpc_observer.tar.gz hpc_observer
diff --git a/xos/observers/hpc/fsck.py b/xos/observers/hpc/fsck.py
deleted file mode 100644
index a6f641b..0000000
--- a/xos/observers/hpc/fsck.py
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/env python
-import argparse
-import imp
-import inspect
-import os
-import sys
-os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
-sys.path.append("/opt/xos")
-from xos.config import Config, DEFAULT_CONFIG_FN, XOS_DIR
-from util.logger import Logger, logging
-from synchronizers.base.syncstep import SyncStep
-
-try:
-    from django import setup as django_setup # django 1.7
-except:
-    django_setup = False
-
-logger = Logger(level=logging.INFO)
-
-class XOSConsistencyCheck:
-	def __init__(self):
-                self.sync_steps = []
-		self.load_sync_step_modules()
-
-	def load_sync_step_modules(self, step_dir=None):
-		if step_dir is None:
-			if hasattr(Config(), "observer_steps_dir"):
-				step_dir = Config().observer_steps_dir
-			else:
-				step_dir = XOS_DIR+"/observer/steps"
-
-		for fn in os.listdir(step_dir):
-			pathname = os.path.join(step_dir,fn)
-			if os.path.isfile(pathname) and fn.endswith(".py") and (fn!="__init__.py"):
-				module = imp.load_source(fn[:-3],pathname)
-				for classname in dir(module):
-					c = getattr(module, classname, None)
-
-					# make sure 'c' is a descendent of SyncStep and has a
-					# provides field (this eliminates the abstract base classes
-					# since they don't have a provides)
-
-					if inspect.isclass(c) and issubclass(c, SyncStep) and hasattr(c,"provides") and (c not in self.sync_steps):
-						self.sync_steps.append(c)
-		logger.info('loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps]))
-
-        def run(self):
-            updated = True
-            while updated:
-                updated = False
-
-                for step in self.sync_steps:
-                    if hasattr(step, "consistency_check"):
-                        updated = updated or step(driver=None).consistency_check()
-
-                if updated:
-                    logger.info('re-running consistency checks because something changed')
-
-def main():
-    if not "-C" in sys.argv:
-        print >> sys.stderr, "You probably wanted to use -C " + XOS_DIR + "/hpc_observer/hpc_observer_config"
-
-    # Generate command line parser
-    parser = argparse.ArgumentParser(usage='%(prog)s [options]')
-    # smbaker: util/config.py parses sys.argv[] directly to get config file name; include the option here to avoid
-    #   throwing unrecognized argument exceptions
-    parser.add_argument('-C', '--config', dest='config_file', action='store', default=DEFAULT_CONFIG_FN,
-                        help='Name of config file.')
-    args = parser.parse_args()
-
-    if django_setup: # 1.7
-        django_setup()
-
-    cc = XOSConsistencyCheck()
-    cc.run()
-
-if __name__ == '__main__':
-    main()
-
diff --git a/xos/observers/hpc/hpc-observer.py b/xos/observers/hpc/hpc-observer.py
deleted file mode 100755
index d6a71ff..0000000
--- a/xos/observers/hpc/hpc-observer.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-
-# This imports and runs ../../xos-observer.py
-
-import importlib
-import os
-import sys
-observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../..")
-sys.path.append(observer_path)
-mod = importlib.import_module("xos-observer")
-mod.main()
diff --git a/xos/observers/hpc/hpc_observer_config b/xos/observers/hpc/hpc_observer_config
deleted file mode 100644
index 326e731..0000000
--- a/xos/observers/hpc/hpc_observer_config
+++ /dev/null
@@ -1,36 +0,0 @@
-
-[plc]
-name=plc
-deployment=VICCI
-
-[db]
-name=xos
-user=postgres
-password=password
-host=localhost
-port=5432
-
-[api]
-host=128.112.171.237
-port=8000
-ssl_key=None
-ssl_cert=None
-ca_ssl_cert=None
-ratelimit_enabled=0
-omf_enabled=0
-mail_support_address=support@localhost
-nova_enabled=True
-
-[observer]
-name=hpc
-dependency_graph=/opt/xos/observers/hpc/model-deps
-steps_dir=/opt/xos/observers/hpc/steps
-deleters_dir=/opt/xos/observers/hpc/deleters
-log_file=console
-#/var/log/hpc.log
-driver=None
-#cmi_hostname=openclouddev0.internet2.edu
-
-[feefie]
-client_id='vicci_dev_central'
-user_id='pl'
diff --git a/xos/observers/hpc/hpc_watcher.py b/xos/observers/hpc/hpc_watcher.py
deleted file mode 100644
index d2efdcc..0000000
--- a/xos/observers/hpc/hpc_watcher.py
+++ /dev/null
@@ -1,629 +0,0 @@
-"""
-    hpc_watcher.py
-
-    Daemon to watch the health of HPC and RR instances.
-
-    This deamon uses HpcHealthCheck objects in the Data Model to conduct
-    periodic tests of HPC and RR nodes. Two types of Health Checks are
-    supported:
-
-       kind="dns": checks the request routers to make sure that a DNS
-         name is resolveable and returns the right kind of records.
-
-         resource_name should be set to the domain name to lookup.
-
-         result_contains is option and can be used to hold "A", "CNAME", or
-            a particular address or hostname that should be contained in the
-            query's answer.
-
-       kind="http": checks the hpc nodes to make sure that a URL can be
-         retrieved from the node.
-
-         resource_name should be set to the HostName:Url to fetch. For
-         example, cdn-stream.htm.fiu.edu:/hft2441/intro.mp4
-
-     In addition to the above, HPC heartbeat probes are conducted, similar to
-     the ones that dnsredir conducts.
-
-     The results of health checks are stored in a tag attached to the Instance
-     the healthcheck was conducted against. If all healthchecks of a particular
-     variety were successful for a instance, then "success" will be stored in
-     the tag. Otherwise, the first healthcheck to fail will be stored in the
-     tag.
-
-     Ubuntu prereqs:
-         apt-get install python-pycurl
-         pip install dnslib
-"""
-
-import os
-import socket
-import sys
-sys.path.append("/opt/xos")
-os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
-import django
-from django.contrib.contenttypes.models import ContentType
-from core.models import *
-from services.hpc.models import *
-from services.requestrouter.models import *
-django.setup()
-import time
-import pycurl
-import traceback
-import json
-from StringIO import StringIO
-
-from dnslib.dns import DNSRecord,DNSHeader,DNSQuestion,QTYPE
-from dnslib.digparser import DigParser
-
-from threading import Thread, Condition
-
-"""
-from dnslib import *
-q = DNSRecord(q=DNSQuestion("cdn-stream.htm.fiu.edu"))
-a_pkt = q.send("150.135.65.10", tcp=False, timeout=10)
-a = DNSRecord.parse(a_pkt)
-
-from dnslib import *
-q = DNSRecord(q=DNSQuestion("onlab.vicci.org"))
-a_pkt = q.send("150.135.65.10", tcp=False, timeout=10)
-a = DNSRecord.parse(a_pkt)
-"""
-
-class WorkQueue:
-    def __init__(self):
-        self.job_cv = Condition()
-        self.jobs = []
-        self.result_cv = Condition()
-        self.results = []
-        self.outstanding = 0
-
-    def get_job(self):
-        self.job_cv.acquire()
-        while not self.jobs:
-            self.job_cv.wait()
-        result = self.jobs.pop()
-        self.job_cv.release()
-        return result
-
-    def submit_job(self, job):
-        self.job_cv.acquire()
-        self.jobs.append(job)
-        self.job_cv.notify()
-        self.job_cv.release()
-        self.outstanding = self.outstanding + 1
-
-    def get_result(self):
-        self.result_cv.acquire()
-        while not self.results:
-            self.result_cv.wait()
-        result = self.results.pop()
-        self.result_cv.release()
-        self.outstanding = self.outstanding - 1
-        return result
-
-    def submit_result(self, result):
-        self.result_cv.acquire()
-        self.results.append(result)
-        self.result_cv.notify()
-        self.result_cv.release()
-
-class DnsResolver(Thread):
-    def __init__(self, queue):
-        Thread.__init__(self)
-        self.queue = queue
-        self.daemon = True
-        self.start()
-
-    def run(self):
-        while True:
-            job = self.queue.get_job()
-            self.handle_job(job)
-            self.queue.submit_result(job)
-
-    def handle_job(self, job):
-        domain = job["domain"]
-        server = job["server"]
-        port = job["port"]
-        result_contains = job.get("result_contains", None)
-
-        try:
-            q = DNSRecord(q=DNSQuestion(domain)) #, getattr(QTYPE,"A")))
-
-            a_pkt = q.send(server, port, tcp=False, timeout=10)
-            a = DNSRecord.parse(a_pkt)
-
-            found_record = False
-            for record in a.rr:
-                if (not result_contains):
-                    QTYPE_A = getattr(QTYPE,"A")
-                    QTYPE_CNAME = getattr(QTYPE, "CNAME")
-                    if ((record.rtype==QTYPE_A) or (record.qtype==QTYPE_CNAME)):
-                        found_record = True
-                else:
-                    tmp = QTYPE.get(record.rtype) + str(record.rdata)
-                    if (result_contains in tmp):
-                        found_record = True
-
-            if not found_record:
-                if result_contains:
-                    job["status"] =  "%s,No %s records" % (domain, result_contains)
-                else:
-                    job["status"] =  "%s,No A or CNAME records" % domain
-
-                return
-
-        except Exception, e:
-            job["status"] = "%s,Exception: %s" % (domain, str(e))
-            return
-
-        job["status"] = "success"
-
-class HpcHeartbeat(Thread):
-    def __init__(self, queue):
-        Thread.__init__(self)
-        self.queue = queue
-        self.daemon = True
-        self.start()
-
-    def run(self):
-        while True:
-            job = self.queue.get_job()
-            self.handle_job(job)
-            self.queue.submit_result(job)
-
-    def curl_error_message(self, e):
-        if e.args[0] == 6:
-            return "couldn't resolve host"
-        if e.args[0] == 7:
-            return "failed to connect"
-        return "curl error %d" % e.args[0]
-
-    def handle_job(self, job):
-        server = job["server"]
-        port = job["port"]
-
-        try:
-            buffer = StringIO()
-            c = pycurl.Curl()
-
-            c.setopt(c.URL, "http://%s:%s/heartbeat" % (server, port))
-            c.setopt(c.WRITEDATA, buffer)
-            c.setopt(c.HTTPHEADER, ['host: hpc-heartbeat', 'X-heartbeat: 1'])
-            c.setopt(c.TIMEOUT, 10)
-            c.setopt(c.CONNECTTIMEOUT, 10)
-            c.setopt(c.NOSIGNAL, 1)
-
-            try:
-                c.perform()
-                response_code = c.getinfo(c.RESPONSE_CODE)
-            except Exception, e:
-                #traceback.print_exc()
-                job["status"] = self.curl_error_message(e)
-                return
-            finally:
-                c.close()
-
-            if response_code != 200:
-                job["status"] = "error response %d" % response_code
-                return
-
-        except Exception, e:
-            job["status"] = "Exception: %s" % str(e)
-            return
-
-        job["status"] = "success"
-
-class HpcFetchUrl(Thread):
-    def __init__(self, queue):
-        Thread.__init__(self)
-        self.queue = queue
-        self.daemon = True
-        self.start()
-
-    def run(self):
-        while True:
-            job = self.queue.get_job()
-            self.handle_job(job)
-            self.queue.submit_result(job)
-
-    def curl_error_message(self, e):
-        if e.args[0] == 6:
-            return "couldn't resolve host"
-        if e.args[0] == 7:
-            return "failed to connect"
-        return "curl error %d" % e.args[0]
-
-    def handle_job(self, job):
-        server = job["server"]
-        port = job["port"]
-        url = job["url"]
-        domain = job["domain"]
-
-        def progress(download_t, download_d, upload_t, upload_d):
-            # limit download size to a megabyte
-            if (download_d > 1024*1024):
-                return 1
-            else:
-                return 0
-
-        try:
-            buffer = StringIO()
-            c = pycurl.Curl()
-
-            c.setopt(c.URL, "http://%s:%s/%s" % (server, port, url))
-            c.setopt(c.WRITEDATA, buffer)
-            c.setopt(c.HTTPHEADER, ['host: ' + domain])
-            c.setopt(c.TIMEOUT, 10)
-            c.setopt(c.CONNECTTIMEOUT, 10)
-            c.setopt(c.NOSIGNAL, 1)
-            c.setopt(c.NOPROGRESS, 0)
-            c.setopt(c.PROGRESSFUNCTION, progress)
-
-            try:
-                try:
-                    c.perform()
-                except Exception, e:
-                    # prevent callback abort from raising exception
-                    if (e.args[0] != pycurl.E_ABORTED_BY_CALLBACK):
-                        raise
-                response_code = c.getinfo(c.RESPONSE_CODE)
-                bytes_downloaded = int(c.getinfo(c.SIZE_DOWNLOAD))
-                total_time = float(c.getinfo(c.TOTAL_TIME))
-            except Exception, e:
-                #traceback.print_exc()
-                job["status"] = self.curl_error_message(e)
-                return
-            finally:
-                c.close()
-
-            if response_code != 200:
-                job["status"] = "error response %s" %  str(response_code)
-                return
-
-        except Exception, e:
-            #traceback.print_exc()
-            job["status"] = "Exception: %s" % str(e)
-            return
-
-        job["status"] = "success"
-        job["bytes_downloaded"] = bytes_downloaded
-        job["total_time"] = total_time
-
-class WatcherWorker(Thread):
-    def __init__(self, queue):
-        Thread.__init__(self)
-        self.queue = queue
-        self.daemon = True
-        self.start()
-
-    def run(self):
-        while True:
-            job = self.queue.get_job()
-            self.handle_job(job)
-            self.queue.submit_result(job)
-
-    def curl_error_message(self, e):
-        if e.args[0] == 6:
-            return "couldn't resolve host"
-        if e.args[0] == 7:
-            return "failed to connect"
-        return "curl error %d" % e.args[0]
-
-    def handle_job(self, job):
-        server = job["server"]
-        port = job["port"]
-
-        try:
-            buffer = StringIO()
-            c = pycurl.Curl()
-
-            c.setopt(c.URL, "http://%s:%s/" % (server, port))
-            c.setopt(c.WRITEDATA, buffer)
-            c.setopt(c.TIMEOUT, 10)
-            c.setopt(c.CONNECTTIMEOUT, 10)
-            c.setopt(c.NOSIGNAL, 1)
-
-            try:
-                c.perform()
-                response_code = c.getinfo(c.RESPONSE_CODE)
-            except Exception, e:
-                #traceback.print_exc()
-                job["status"] = json.dumps( {"status": self.curl_error_message(e)} )
-                return
-            finally:
-                c.close()
-
-            if response_code != 200:
-                job["status"] = json.dumps( {"status": "error response %d" % response_code} )
-                return
-
-            d = json.loads(buffer.getvalue())
-            d["status"] = "success";
-            job["status"] = json.dumps(d)
-
-        except Exception, e:
-            job["status"] = json.dumps( {"status": "Exception: %s" % str(e)} )
-            return
-
-class BaseWatcher(Thread):
-    def __init__(self):
-        Thread.__init__(self)
-        self.daemon = True
-
-    def get_public_ip(self, service, instance):
-        network_name = None
-        if "hpc" in instance.slice.name:
-            network_name = getattr(service, "watcher_hpc_network", None)
-        elif "demux" in instance.slice.name:
-            network_name = getattr(service, "watcher_dnsdemux_network", None)
-        elif "redir" in instance.slice.name:
-            network_name = getattr(service, "watcher_dnsredir_network", None)
-
-        if network_name and network_name.lower()=="nat":
-            return None
-
-        if (network_name is None) or (network_name=="") or (network_name.lower()=="public"):
-            return instance.get_public_ip()
-
-        for ns in instance.ports.all():
-            if (ns.ip) and (ns.network.name==network_name):
-                return ns.ip
-
-        raise ValueError("Couldn't find network %s" % str(network_name))
-
-    def set_status(self, instance, service, kind, msg, check_error=True):
-        #print instance.node.name, kind, msg
-        if check_error:
-            instance.has_error = (msg!="success")
-
-        instance_type = ContentType.objects.get_for_model(instance)
-
-        t = Tag.objects.filter(service=service, name=kind+".msg", content_type__pk=instance_type.id, object_id=instance.id)
-        if t:
-            t=t[0]
-            if (t.value != msg):
-                t.value = msg
-                t.save()
-        else:
-            Tag(service=service, name=kind+".msg", content_object = instance, value=msg).save()
-
-        t = Tag.objects.filter(service=service, name=kind+".time", content_type__pk=instance_type.id, object_id=instance.id)
-        if t:
-            t=t[0]
-            t.value = str(time.time())
-            t.save()
-        else:
-            Tag(service=service, name=kind+".time", content_object = instance, value=str(time.time())).save()
-
-    def get_service_slices(self, service, kind=None):
-        try:
-            slices = service.slices.all()
-        except:
-            # buggy data model
-            slices = service.service.all()
-
-        if kind:
-            return [x for x in slices if (kind in x.name)]
-        else:
-            return list(slices)
-
-class RRWatcher(BaseWatcher):
-    def __init__(self):
-        BaseWatcher.__init__(self)
-
-        self.resolver_queue = WorkQueue()
-        for i in range(0,10):
-            DnsResolver(queue = self.resolver_queue)
-
-    def check_request_routers(self, service, instances):
-        for instance in instances:
-            instance.has_error = False
-
-            try:
-                ip = self.get_public_ip(service, instance)
-            except Exception, e:
-                self.set_status(instance, service, "watcher.DNS", "exception: %s" % str(e))
-                continue
-            if not ip:
-                try:
-                    ip = socket.gethostbyname(instance.node.name)
-                except:
-                    self.set_status(instance, service, "watcher.DNS", "dns resolution failure")
-                    continue
-
-            if not ip:
-                self.set_status(instance, service, "watcher.DNS", "no IP address")
-                continue
-
-            checks = HpcHealthCheck.objects.filter(kind="dns")
-            if not checks:
-                self.set_status(instance, service, "watcher.DNS", "no DNS HealthCheck tests configured")
-
-            for check in checks:
-                self.resolver_queue.submit_job({"domain": check.resource_name, "server": ip, "port": 53, "instance": instance, "result_contains": check.result_contains})
-
-        while self.resolver_queue.outstanding > 0:
-            result = self.resolver_queue.get_result()
-            instance = result["instance"]
-            if (result["status"]!="success") and (not instance.has_error):
-                self.set_status(instance, service, "watcher.DNS", result["status"])
-
-        for instance in instances:
-            if not instance.has_error:
-                self.set_status(instance, service, "watcher.DNS", "success")
-
-    def run_once(self):
-        for hpcService in HpcService.objects.all():
-            for slice in self.get_service_slices(hpcService, "dnsdemux"):
-                self.check_request_routers(hpcService, slice.instances.all())
-
-        for rrService in RequestRouterService.objects.all():
-            for slice in self.get_service_slices(rrService, "dnsdemux"):
-                self.check_request_routers(rrService, slice.instances.all())
-
-    def run(self):
-        while True:
-            self.run_once()
-            time.sleep(10)
-
-            django.db.reset_queries()
-
-class HpcProber(BaseWatcher):
-    def __init__(self):
-        BaseWatcher.__init__(self)
-
-        self.heartbeat_queue = WorkQueue()
-        for i in range(0, 10):
-            HpcHeartbeat(queue = self.heartbeat_queue)
-
-    def probe_hpc(self, service, instances):
-        for instance in instances:
-            instance.has_error = False
-
-            self.heartbeat_queue.submit_job({"server": instance.node.name, "port": 8009, "instance": instance})
-
-        while self.heartbeat_queue.outstanding > 0:
-            result = self.heartbeat_queue.get_result()
-            instance = result["instance"]
-            if (result["status"]!="success") and (not instance.has_error):
-                self.set_status(instance, service, "watcher.HPC-hb", result["status"])
-
-        for instance in instances:
-            if not instance.has_error:
-                self.set_status(instance, service, "watcher.HPC-hb", "success")
-
-    def run_once(self):
-        for hpcService in HpcService.objects.all():
-            for slice in self.get_service_slices(hpcService, "hpc"):
-                self.probe_hpc(hpcService, slice.instances.all())
-
-    def run(self):
-        while True:
-            self.run_once()
-            time.sleep(10)
-
-            django.db.reset_queries()
-
-class HpcFetcher(BaseWatcher):
-    def __init__(self):
-        BaseWatcher.__init__(self)
-
-        self.fetch_queue = WorkQueue()
-        for i in range(0, 10):
-            HpcFetchUrl(queue = self.fetch_queue)
-
-    def fetch_hpc(self, service, instances):
-        for instance in instances:
-            instance.has_error = False
-            instance.url_status = []
-
-            checks = HpcHealthCheck.objects.filter(kind="http")
-            if not checks:
-                self.set_status(instance, service, "watcher.HPC-fetch", "no HTTP HealthCheck tests configured")
-
-            for check in checks:
-                if (not check.resource_name) or (":" not in check.resource_name):
-                    self.set_status(instance, service, "watcher.HPC-fetch", "malformed resource_name: " + str(check.resource_name))
-                    break
-
-                (domain, url) = check.resource_name.split(":",1)
-
-                self.fetch_queue.submit_job({"server": instance.node.name, "port": 80, "instance": instance, "domain": domain, "url": url})
-
-        while self.fetch_queue.outstanding > 0:
-            result = self.fetch_queue.get_result()
-            instance = result["instance"]
-            if (result["status"] == "success"):
-                instance.url_status.append( (result["domain"] + result["url"], "success", result["bytes_downloaded"], result["total_time"]) )
-            if (result["status"]!="success") and (not instance.has_error):
-                self.set_status(instance, service, "watcher.HPC-fetch", result["status"])
-
-        for instance in instances:
-            self.set_status(instance, service, "watcher.HPC-fetch-urls", json.dumps(instance.url_status), check_error=False)
-            if not instance.has_error:
-                self.set_status(instance, service, "watcher.HPC-fetch", "success")
-
-    def run_once(self):
-        for hpcService in HpcService.objects.all():
-            for slice in self.get_service_slices(hpcService, "hpc"):
-                try:
-                    self.fetch_hpc(hpcService, slice.instances.all())
-                except:
-                    traceback.print_exc()
-
-    def run(self):
-        while True:
-            self.run_once()
-            time.sleep(10)
-
-            django.db.reset_queries()
-
-class WatcherFetcher(BaseWatcher):
-    def __init__(self):
-        BaseWatcher.__init__(self)
-
-        self.fetch_queue = WorkQueue()
-        for i in range(0, 10):
-             WatcherWorker(queue = self.fetch_queue)
-
-    def fetch_watcher(self, service, instances):
-        for instance in instances:
-            try:
-                ip = self.get_public_ip(service, instance)
-            except Exception, e:
-                self.set_status(instance, service, "watcher.watcher", json.dumps({"status": "exception: %s" % str(e)}) )
-                continue
-            if not ip:
-                try:
-                    ip = socket.gethostbyname(instance.node.name)
-                except:
-                    self.set_status(instance, service, "watcher.watcher", json.dumps({"status": "dns resolution failure"}) )
-                    continue
-
-            if not ip:
-                self.set_status(instance, service, "watcher.watcher", json.dumps({"status": "no IP address"}) )
-                continue
-
-            port = 8015
-            if ("redir" in instance.slice.name):
-                port = 8016
-            elif ("demux" in instance.slice.name):
-                port = 8017
-
-            self.fetch_queue.submit_job({"server": ip, "port": port, "instance": instance})
-
-        while self.fetch_queue.outstanding > 0:
-            result = self.fetch_queue.get_result()
-            instance = result["instance"]
-            self.set_status(instance, service, "watcher.watcher", result["status"])
-
-    def run_once(self):
-        for hpcService in HpcService.objects.all():
-            for slice in self.get_service_slices(hpcService):
-                self.fetch_watcher(hpcService, slice.instances.all())
-
-    def run(self):
-        while True:
-            self.run_once()
-            time.sleep(10)
-
-            django.db.reset_queries()
-
-
-if __name__ == "__main__":
-    if "--once" in sys.argv:
-        RRWatcher().run_once()
-        HpcProber().run_once()
-        HpcFetcher().run_once()
-        WatcherFetcher().run_once()
-    else:
-        RRWatcher().start()
-        HpcProber().start()
-        HpcFetcher().start()
-        WatcherFetcher().start()
-
-        print "Running forever..."
-        while True:
-            time.sleep(60)
-
diff --git a/xos/observers/hpc/hpclib.py b/xos/observers/hpc/hpclib.py
deleted file mode 100644
index 88abf23..0000000
--- a/xos/observers/hpc/hpclib.py
+++ /dev/null
@@ -1,126 +0,0 @@
-import os
-import base64
-import string
-import sys
-import xmlrpclib
-
-if __name__ == '__main__':
-    sys.path.append("/opt/xos")
-    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
-
-from xos.config import Config
-from core.models import Service
-from services.hpc.models import HpcService
-from services.requestrouter.models import RequestRouterService
-from util.logger import Logger, logging
-
-logger = Logger(level=logging.INFO)
-
-class APIHelper:
-    def __init__(self, proxy, auth, method=None):
-        self.proxy = proxy
-        self.auth = auth
-        self.method = method
-
-    def __getattr__(self, name):
-        if name.startswith("_"):
-            return getattr(self, name)
-        else:
-            return APIHelper(self.proxy, self.auth, name)
-
-    def __call__(self, *args):
-        method = getattr(self.proxy, self.method)
-        return method(self.auth, *args)
-
-class CmiClient:
-    def __init__(self, hostname, port=8003, username="apiuser", password="apiuser"):
-        self.connect_api(hostname, port, username, password)
-
-    def connect_api(self, hostname, port=8003, username="apiuser", password="apiuser"):
-        #print "https://%s:%d/COAPI/" % (hostname, port)
-        cob = xmlrpclib.ServerProxy("https://%s:%d/COAPI/" % (hostname, port), allow_none=True)
-        cob_auth = {}
-        cob_auth["Username"] = username
-        cob_auth["AuthString"] = password
-        cob_auth["AuthMethod"] = "password"
-
-        onev = xmlrpclib.ServerProxy("https://%s:%d/ONEV_API/" % (hostname, port), allow_none=True)
-        onev_auth = {}
-        onev_auth["Username"] = username
-        onev_auth["AuthString"] = password
-        onev_auth["AuthMethod"] = "password"
-
-        self.cob = APIHelper(cob, cob_auth)
-        self.onev = APIHelper(onev, onev_auth)
-
-class HpcLibrary:
-    def __init__(self):
-        self._client = None
-
-    def make_account_name(self, x):
-        x=x.lower()
-        y = ""
-        for c in x:
-            if (c in (string.lowercase + string.digits)):
-                y = y + c
-        return y[:20]
-
-    def get_hpc_service(self):
-        hpc_service_name = getattr(Config(), "observer_hpc_service", None)
-        if hpc_service_name:
-            hpc_service = HpcService.objects.filter(name = hpc_service_name)
-        else:
-            hpc_service = HpcService.objects.all()
-
-        if not hpc_service:
-            if hpc_service_name:
-                raise Exception("No HPC Service with name %s" % hpc_service_name)
-            else:
-                raise Exception("No HPC Services")
-        hpc_service = hpc_service[0]
-
-        return hpc_service
-
-    def get_cmi_hostname(self, hpc_service=None):
-        if getattr(Config(),"observer_cmi_hostname",None):
-            return getattr(Config(),"observer_cmi_hostname")
-
-        if (hpc_service is None):
-            hpc_service = self.get_hpc_service()
-
-        if hpc_service.cmi_hostname:
-            return hpc_service.cmi_hostname
-
-        try:
-            slices = hpc_service.slices.all()
-        except:
-            # deal with buggy data model
-            slices = hpc_service.service.all()
-
-        for slice in slices:
-            if slice.name.endswith("cmi"):
-                for instance in slice.instances.all():
-                    if instance.node:
-                         return instance.node.name
-
-        raise Exception("Failed to find a CMI instance")
-
-    @property
-    def client(self):
-        if self._client is None:
-            self._client = CmiClient(self.get_cmi_hostname())
-        return self._client
-
-if __name__ == '__main__':
-    import django
-    django.setup()
-
-    lib = HpcLibrary()
-
-    print "testing API connection to", lib.get_cmi_hostname()
-    lib.client.cob.GetNewObjects()
-    lib.client.onev.ListAll("CDN")
-
-
-
-
diff --git a/xos/observers/hpc/model-deps b/xos/observers/hpc/model-deps
deleted file mode 100644
index 63188f0..0000000
--- a/xos/observers/hpc/model-deps
+++ /dev/null
@@ -1,19 +0,0 @@
-{
-    "OriginServer": [
-        "ContentProvider"
-    ], 
-    "ContentProvider": [
-        "ServiceProvider"
-    ], 
-    "CDNPrefix": [
-        "ContentProvider"
-    ], 
-    "AccessMap": [
-        "ContentProvider"
-    ], 
-    "SiteMap": [
-        "ContentProvider", 
-        "ServiceProvider", 
-        "CDNPrefix"
-    ]
-}
diff --git a/xos/observers/hpc/run.sh b/xos/observers/hpc/run.sh
deleted file mode 100755
index f77d751..0000000
--- a/xos/observers/hpc/run.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#if [[ ! -e ./hpc-backend.py ]]; then
-#    ln -s ../xos-observer.py hpc-backend.py
-#fi
-
-export XOS_DIR=/opt/xos
-python hpc-observer.py  -C $XOS_DIR/observers/hpc/hpc_observer_config
diff --git a/xos/observers/hpc/start.sh b/xos/observers/hpc/start.sh
deleted file mode 100755
index 305c07f..0000000
--- a/xos/observers/hpc/start.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#if [[ ! -e ./hpc-backend.py ]]; then
-#    ln -s ../xos-observer.py hpc-backend.py
-#fi
-
-export XOS_DIR=/opt/xos
-nohup python hpc-observer.py  -C $XOS_DIR/observers/hpc/hpc_observer_config > /dev/null 2>&1 &
diff --git a/xos/observers/hpc/steps/garbage_collector.py b/xos/observers/hpc/steps/garbage_collector.py
deleted file mode 100644
index 25172a3..0000000
--- a/xos/observers/hpc/steps/garbage_collector.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import os
-import sys
-import base64
-import traceback
-from collections import defaultdict
-from django.db.models import F, Q
-from xos.config import Config
-from util.logger import Logger, logging
-from synchronizers.base.syncstep import SyncStep
-from services.hpc.models import ServiceProvider, ContentProvider, CDNPrefix, OriginServer
-from core.models import *
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-from hpclib import HpcLibrary
-
-logger = Logger(level=logging.INFO)
-
-class GarbageCollector(SyncStep, HpcLibrary):
-#    requested_interval = 86400
-    requested_interval = 0
-    provides=[]
-
-    def __init__(self, **args):
-        SyncStep.__init__(self, **args)
-        HpcLibrary.__init__(self)
-
-    def call(self, **args):
-        logger.info("running garbage collector")
-        try:
-            self.gc_originservers()
-            self.gc_cdnprefixes()
-            self.gc_contentproviders()
-            self.gc_serviceproviders()
-        except:
-            traceback.print_exc()
-
-    def gc_onev(self, ps_class, ps_idField, onev_className, onev_idField):
-        # get the CMI's objects
-        onev_objs = self.client.onev.ListAll(onev_className)
-
-        # get the data model's objects,
-        ps_objs = ps_class.objects.filter(enacted__isnull=False)
-        ps_ids = [str(getattr(x,ps_idField,None)) for x in ps_objs]
-
-        # for each onev object, if it's id does not exist in a data model
-        # object, then delete it.
-        for onev_obj in onev_objs:
-            onev_id = onev_obj[onev_idField]
-            if str(onev_id) not in ps_ids:
-                logger.info("garbage collecting %s %s" % (onev_className, str(onev_id)))
-                self.client.onev.Delete(onev_className, onev_id)
-
-    def gc_originservers(self):
-        self.gc_onev(OriginServer, "origin_server_id", "OriginServer", "origin_server_id")
-
-    def gc_cdnprefixes(self):
-        self.gc_onev(CDNPrefix, "cdn_prefix_id", "CDNPrefix", "cdn_prefix_id")
-
-    def gc_contentproviders(self):
-        self.gc_onev(ContentProvider, "content_provider_id", "ContentProvider", "content_provider_id")
-
-    def gc_serviceproviders(self):
-        self.gc_onev(ServiceProvider, "service_provider_id", "ServiceProvider", "service_provider_id")
-
diff --git a/xos/observers/hpc/steps/sync_cdnprefix.py b/xos/observers/hpc/steps/sync_cdnprefix.py
deleted file mode 100644
index d2b0276..0000000
--- a/xos/observers/hpc/steps/sync_cdnprefix.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import os
-import sys
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from core.models import Service
-from services.hpc.models import ServiceProvider, ContentProvider, CDNPrefix
-from util.logger import Logger, logging
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-from hpclib import HpcLibrary
-
-logger = Logger(level=logging.INFO)
-
-class SyncCDNPrefix(SyncStep, HpcLibrary):
-    provides=[CDNPrefix]
-    observes=CDNPrefix
-    requested_interval=0
-
-    def __init__(self, **args):
-        SyncStep.__init__(self, **args)
-        HpcLibrary.__init__(self)
-
-    def filter_hpc_service(self, objs):
-        hpcService = self.get_hpc_service()
-
-        return [x for x in objs if x.contentProvider.serviceProvider.hpcService == hpcService]
-
-    def fetch_pending(self, deleted):
-        #self.consistency_check()
-
-        return self.filter_hpc_service(SyncStep.fetch_pending(self, deleted))
-
-    def consistency_check(self):
-        # set to true if something changed
-        result=False
-
-        # sanity check to make sure our PS objects have CMI objects behind them
-        all_p_ids = [x["cdn_prefix_id"] for x in self.client.onev.ListAll("CDNPrefix")]
-
-        all_p_ids = []
-        all_origins = {}
-        for x in self.client.onev.ListAll("CDNPrefix"):
-            id = x["cdn_prefix_id"]
-            all_p_ids.append(id)
-            all_origins[id] = x.get("default_origin_server", None)
-
-        for p in CDNPrefix.objects.all():
-            if (p.cdn_prefix_id is None):
-                continue
-
-            if (p.cdn_prefix_id not in all_p_ids):
-                logger.info("CDN Prefix %s was not found on CMI" % p.cdn_prefix_id)
-                p.cdn_prefix_id=None
-                p.save()
-                result = True
-
-            if (p.defaultOriginServer!=None) and (all_origins.get(p.cdn_prefix_id,None) != p.defaultOriginServer.url):
-                logger.info("CDN Prefix %s does not have default origin server on CMI" % str(p))
-                p.save() # this will set updated>enacted and force observer to re-sync
-                result = True
-
-        return result
-
-    def sync_record(self, cp):
-        logger.info("sync'ing cdn prefix %s" % str(cp))
-
-        if (not cp.contentProvider) or (not cp.contentProvider.content_provider_id):
-            raise Exception("CDN Prefix %s is linked to a contentProvider without an id" % str(cp))
-
-        cpid = cp.contentProvider.content_provider_id
-
-        cp_dict = {"service": "HyperCache", "enabled": cp.enabled, "content_provider_id": cpid, "cdn_prefix": cp.prefix}
-
-        if cp.defaultOriginServer and cp.defaultOriginServer.url:
-            if (not cp.defaultOriginServer.origin_server_id):
-                # It's probably a bad idea to try to set defaultOriginServer before
-                # we've crated defaultOriginServer.
-                raise Exception("cdn prefix %s is waiting for it's default origin server to get an id" % str(cp))
-
-            cp_dict["default_origin_server"] = cp.defaultOriginServer.url
-
-        #print cp_dict
-
-        if not cp.cdn_prefix_id:
-            id = self.client.onev.Create("CDNPrefix", cp_dict)
-            cp.cdn_prefix_id = id
-        else:
-            del cp_dict["content_provider_id"]  # this can't be updated
-            del cp_dict["cdn_prefix"] # this can't be updated either
-            self.client.onev.Update("CDNPrefix", cp.cdn_prefix_id, cp_dict)
-
-        cp.save()
-
-    def delete_record(self, m):
-        if m.cdn_prefix_id is not None:
-            self.client.onev.Delete("CDNPrefix", m.cdn_prefix_id)
diff --git a/xos/observers/hpc/steps/sync_contentprovider.py b/xos/observers/hpc/steps/sync_contentprovider.py
deleted file mode 100644
index 4e95c36..0000000
--- a/xos/observers/hpc/steps/sync_contentprovider.py
+++ /dev/null
@@ -1,78 +0,0 @@
-import os
-import sys
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from core.models import Service
-from services.hpc.models import ServiceProvider, ContentProvider
-from util.logger import Logger, logging
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-from hpclib import HpcLibrary
-
-logger = Logger(level=logging.INFO)
-
-class SyncContentProvider(SyncStep, HpcLibrary):
-    provides=[ContentProvider]
-    observes=ContentProvider
-    requested_interval=0
-
-    def __init__(self, **args):
-        SyncStep.__init__(self, **args)
-        HpcLibrary.__init__(self)
-
-    def filter_hpc_service(self, objs):
-        hpcService = self.get_hpc_service()
-
-        return [x for x in objs if x.serviceProvider.hpcService == hpcService]
-
-    def fetch_pending(self, deleted):
-        #self.consistency_check()
-
-        return self.filter_hpc_service(SyncStep.fetch_pending(self, deleted))
-
-    def consistency_check(self):
-        # set to true if something changed
-        result=False
-
-        # sanity check to make sure our PS objects have CMI objects behind them
-        all_cp_ids = [x["content_provider_id"] for x in self.client.onev.ListAll("ContentProvider")]
-        for cp in ContentProvider.objects.all():
-            if (cp.content_provider_id is not None) and (cp.content_provider_id not in all_cp_ids):
-                logger.info("Content provider %s was not found on CMI" % cp.content_provider_id)
-                cp.content_provider_id=None
-                cp.save()
-                result = True
-
-        return result
-
-    def sync_record(self, cp):
-        logger.info("sync'ing content provider %s" % str(cp))
-        account_name = self.make_account_name(cp.name)
-
-        if (not cp.serviceProvider) or (not cp.serviceProvider.service_provider_id):
-            raise Exception("ContentProvider %s is linked to a serviceProvider with no id" % str(cp))
-
-        spid = cp.serviceProvider.service_provider_id
-
-        cp_dict = {"account": account_name, "name": cp.name, "enabled": cp.enabled}
-
-        #print cp_dict
-
-        if not cp.content_provider_id:
-            cp_dict["service_provider_id"] = spid
-            id = self.client.onev.Create("ContentProvider", cp_dict)
-            cp.content_provider_id = id
-        else:
-            self.client.onev.Update("ContentProvider", cp.content_provider_id, cp_dict)
-
-        cp.save()
-
-    def delete_record(self, m):
-        if m.content_provider_id is not None:
-            self.client.onev.Delete("ContentProvider", m.content_provider_id)
-
diff --git a/xos/observers/hpc/steps/sync_hpcservices.py b/xos/observers/hpc/steps/sync_hpcservices.py
deleted file mode 100644
index 47165cc..0000000
--- a/xos/observers/hpc/steps/sync_hpcservices.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import os
-import sys
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from core.models import Service
-from services.hpc.models import HpcService
-from services.requestrouter.models import RequestRouterService
-from util.logger import Logger, logging
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-from hpclib import HpcLibrary
-
-logger = Logger(level=logging.INFO)
-
-class SyncHpcService(SyncStep, HpcLibrary):
-    provides=[HpcService]
-    observes=HpcService
-    requested_interval=0
-
-    def __init__(self, **args):
-        SyncStep.__init__(self, **args)
-        HpcLibrary.__init__(self)
-
-    def filter_hpc_service(self, objs):
-        hpcService = self.get_hpc_service()
-
-        return [x for x in objs if x == hpcService]
-
-    def fetch_pending(self, deleted):
-        # Looks like deletion is not supported for this object - Sapan
-        if (deleted):
-            return []
-        else:
-            return self.filter_hpc_service(HpcService.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None)))
-
-    def sync_record(self, hpc_service):
-        logger.info("sync'ing hpc_service %s" % str(hpc_service))
-        hpc_service.save()
diff --git a/xos/observers/hpc/steps/sync_originserver.py b/xos/observers/hpc/steps/sync_originserver.py
deleted file mode 100644
index 435bbc1..0000000
--- a/xos/observers/hpc/steps/sync_originserver.py
+++ /dev/null
@@ -1,92 +0,0 @@
-import os
-import sys
-import base64
-
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from core.models import Service
-from services.hpc.models import ServiceProvider, ContentProvider, CDNPrefix, OriginServer
-from util.logger import Logger, logging
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-from hpclib import HpcLibrary
-
-logger = Logger(level=logging.INFO)
-
-class SyncOriginServer(SyncStep, HpcLibrary):
-    provides=[OriginServer]
-    observes=OriginServer
-    requested_interval=0
-
-    def __init__(self, **args):
-        SyncStep.__init__(self, **args)
-        HpcLibrary.__init__(self)
-
-    def filter_hpc_service(self, objs):
-        hpcService = self.get_hpc_service()
-
-        return [x for x in objs if x.contentProvider.serviceProvider.hpcService == hpcService]
-
-    def fetch_pending(self, deleted):
-        #self.consistency_check()
-
-        return self.filter_hpc_service(SyncStep.fetch_pending(self, deleted))
-
-    def consistency_check(self):
-        # set to true if something changed
-        result=False
-
-        # sanity check to make sure our PS objects have CMI objects behind them
-        all_ors_ids = [x["origin_server_id"] for x in self.client.onev.ListAll("OriginServer")]
-        for ors in OriginServer.objects.all():
-            if (ors.origin_server_id is not None) and (ors.origin_server_id not in all_ors_ids):
-                # we have an origin server ID, but it doesn't exist in the CMI
-                # something went wrong
-                # start over
-                logger.info("origin server %s was not found on CMI" % ors.origin_server_id)
-                ors.origin_server_id=None
-                ors.save()
-                result = True
-
-        return result
-
-    def sync_record(self, ors):
-        logger.info("sync'ing origin server %s" % str(ors))
-
-        if (not ors.contentProvider) or (not ors.contentProvider.content_provider_id):
-            raise Exception("Origin Server %s is linked to a contentProvider with no id" % str(ors))
-
-        cpid = ors.contentProvider.content_provider_id
-
-        # validation requires URL start with http://
-        url = ors.url
-        if not url.startswith("http://"):
-            url = "http://" + url
-
-        ors_dict = {"authenticated_content": ors.authenticated, "zone_redirects": ors.redirects, "content_provider_id": cpid, "url": url, "service_type": "HyperCache", "caching_type": "Optimistic", "description": ors.description}
-        if not ors_dict["description"]:
-            ors_dict["description"] = "blank"
-
-        #print os_dict
-
-        if not ors.origin_server_id:
-            id = self.client.onev.Create("OriginServer", ors_dict)
-            ors.origin_server_id = id
-        else:
-            self.client.onev.Update("OriginServer", ors.origin_server_id, ors_dict)
-
-        # ... something breaks (analytics) if the URL starts with http://, so we
-        # change it in cob after we added it via onev.
-        url = url[7:]
-        self.client.cob.UpdateContent(ors.origin_server_id, {"url": url})
-
-        ors.silent = True
-        ors.save()
-
-    def delete_record(self, m):
-        if m.origin_server_id is not None:
-            self.client.onev.Delete("OriginServer", m.origin_server_id)
diff --git a/xos/observers/hpc/steps/sync_serviceprovider.py b/xos/observers/hpc/steps/sync_serviceprovider.py
deleted file mode 100644
index e4e31f0..0000000
--- a/xos/observers/hpc/steps/sync_serviceprovider.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import os
-import sys
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from core.models import Service
-from services.hpc.models import ServiceProvider
-from util.logger import Logger, logging
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-from hpclib import HpcLibrary
-
-logger = Logger(level=logging.INFO)
-
-class SyncServiceProvider(SyncStep, HpcLibrary):
-    provides=[ServiceProvider]
-    observes=ServiceProvider
-    requested_interval=0
-
-    def __init__(self, **args):
-        SyncStep.__init__(self, **args)
-        HpcLibrary.__init__(self)
-
-    def filter_hpc_service(self, objs):
-        hpcService = self.get_hpc_service()
-
-        return [x for x in objs if x.hpcService == hpcService]
-
-    def fetch_pending(self, deleted):
-        #self.consistency_check()
-
-        return self.filter_hpc_service(SyncStep.fetch_pending(self, deleted))
-
-    def consistency_check(self):
-        # set to true if something changed
-        result=False
-
-        # sanity check to make sure our PS objects have CMI objects behind them
-        all_sp_ids = [x["service_provider_id"] for x in self.client.onev.ListAll("ServiceProvider")]
-        for sp in ServiceProvider.objects.all():
-            if (sp.service_provider_id is not None) and (sp.service_provider_id not in all_sp_ids):
-                logger.info("Service provider %s was not found on CMI" % sp.service_provider_id)
-                sp.service_provider_id=None
-                sp.save()
-                result = True
-
-        return result
-
-    def sync_record(self, sp):
-        logger.info("sync'ing service provider %s" % str(sp))
-        account_name = self.make_account_name(sp.name)
-        sp_dict = {"account": account_name, "name": sp.name, "enabled": sp.enabled}
-        if not sp.service_provider_id:
-            id = self.client.onev.Create("ServiceProvider", sp_dict)
-            sp.service_provider_id = id
-        else:
-            self.client.onev.Update("ServiceProvider", sp.service_provider_id, sp_dict)
-
-        sp.save()
-
-    def delete_record(self, m):
-        if m.service_provider_id is not None:
-            self.client.onev.Delete("ServiceProvider", m.service_provider_id)
diff --git a/xos/observers/hpc/steps/sync_sitemap.py b/xos/observers/hpc/steps/sync_sitemap.py
deleted file mode 100644
index e662458..0000000
--- a/xos/observers/hpc/steps/sync_sitemap.py
+++ /dev/null
@@ -1,118 +0,0 @@
-import os
-import sys
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from core.models import Service
-from services.hpc.models import ServiceProvider, ContentProvider, CDNPrefix, SiteMap
-from util.logger import Logger, logging
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-from hpclib import HpcLibrary
-
-logger = Logger(level=logging.INFO)
-
-class SyncSiteMap(SyncStep, HpcLibrary):
-    provides=[SiteMap]
-    observes=SiteMap
-    requested_interval=0
-
-    def __init__(self, **args):
-        SyncStep.__init__(self, **args)
-        HpcLibrary.__init__(self)
-
-    def filter_hpc_service(self, objs):
-        hpcService = self.get_hpc_service()
-
-        filtered_objs = []
-        for x in objs:
-            if ((x.hpcService == hpcService) or
-               ((x.serviceProvider != None) and (x.serviceProvider.hpcService == hpcService)) or
-               ((x.contentProvider != None) and (x.contentProvider.serviceProvider.hpcService == hpcService)) or
-               ((x.cdnPrefix!= None) and (x.cdnPrefix.contentProvider.serviceProvider.hpcService == hpcService))):
-                filtered_objs.append(x)
-
-        return filtered_objs
-
-    def fetch_pending(self, deleted):
-        return self.filter_hpc_service(SyncStep.fetch_pending(self, deleted))
-
-    def consistency_check(self):
-        # set to true if something changed
-        result=False
-
-        # sanity check to make sure our PS objects have CMI objects behind them
-        all_map_ids = [x["map_id"] for x in self.client.onev.ListAll("Map")]
-        for map in SiteMap.objects.all():
-            if (map.map_id is not None) and (map.map_id not in all_map_ids):
-                logger.info("Map %s was not found on CMI" % map.map_id)
-                map.map_id=None
-                map.save()
-                result = True
-
-        return result
-
-    def update_bind(self, map, map_dict, field_name, to_name, ids):
-        for id in ids:
-            if (not id in map_dict.get(field_name, [])):
-                print "Bind Map", map.map_id, "to", to_name, id
-                self.client.onev.Bind("Map", map.map_id, to_name, id)
-
-        for id in map_dict.get(field_name, []):
-            if (not id in ids):
-                print "Unbind Map", map.map_id, "from", to_name, id
-                self.client.onev.UnBind("map", map.map_id, to_name, id)
-
-    def sync_record(self, map):
-        logger.info("sync'ing SiteMap %s" % str(map))
-
-        if not map.map:
-            # no contents
-            return
-
-        content = map.map.read()
-
-        map_dict = {"name": map.name, "type": "site", "content": content}
-
-        cdn_prefix_ids=[]
-        service_provider_ids=[]
-        content_provider_ids=[]
-
-        if (map.contentProvider):
-            if not map.contentProvider.content_provider_id:
-                raise Exception("Map %s links to a contentProvider with no id" % map.name)
-            conent_provider_ids = [map.contentProvider.content_provider_id]
-
-        if (map.serviceProvider):
-            if not map.serviceProvider.service_provider_id:
-                raise Exception("Map %s links to a serviceProvider with no id" % map.name)
-            service_provider_ids = [map.serviceProvider.service_provider_id]
-
-        if (map.cdnPrefix):
-            if not map.cdnPrefix.cdn_prefix_id:
-                raise Exception("Map %s links to a cdnPrefix with no id" % map.name)
-            cdn_prefix_ids = [map.cdnPrefix.cdn_prefix_id]
-
-        if not map.map_id:
-            print "Create Map", map_dict
-            id = self.client.onev.Create("Map", map_dict)
-            map.map_id = id
-        else:
-            print "Update Map", map_dict
-            # these things we probably cannot update
-            del map_dict["name"]
-            self.client.onev.Update("Map", map.map_id, map_dict)
-
-        cmi_map_dict = self.client.onev.Read("Map", map.map_id)
-
-        self.update_bind(map, cmi_map_dict, "cdn_prefix_ids", "CDNPrefix", cdn_prefix_ids)
-
-        map.save()
-
-    def delete_record(self, m):
-        if m.map_id is not None:
-            self.client.onev.Delete("Map", m.map_id)
diff --git a/xos/observers/hpc/stop.sh b/xos/observers/hpc/stop.sh
deleted file mode 100755
index a0b4a8e..0000000
--- a/xos/observers/hpc/stop.sh
+++ /dev/null
@@ -1 +0,0 @@
-pkill -9 -f hpc-observer.py
diff --git a/xos/observers/hpc/supervisor/hpc-observer.conf b/xos/observers/hpc/supervisor/hpc-observer.conf
deleted file mode 100644
index f2c79d4..0000000
--- a/xos/observers/hpc/supervisor/hpc-observer.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-[program:hpc-observer]
-command=python /opt/xos/observers/hpc/hpc-observer.py -C /opt/xos/observers/hpc/hpc_observer_config
diff --git a/xos/observers/hpc/supervisor/hpc-watcher.conf b/xos/observers/hpc/supervisor/hpc-watcher.conf
deleted file mode 100644
index e0f4eb1..0000000
--- a/xos/observers/hpc/supervisor/hpc-watcher.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-[program:hpc-watcher]
-command=python /opt/xos/observers/hpc/hpc_watcher.py
diff --git a/xos/observers/monitoring_channel/files/docker.list b/xos/observers/monitoring_channel/files/docker.list
deleted file mode 100644
index 0ee9ae0..0000000
--- a/xos/observers/monitoring_channel/files/docker.list
+++ /dev/null
@@ -1 +0,0 @@
-deb https://get.docker.com/ubuntu docker main
diff --git a/xos/observers/monitoring_channel/files/vm-resolv.conf b/xos/observers/monitoring_channel/files/vm-resolv.conf
deleted file mode 100644
index cae093a..0000000
--- a/xos/observers/monitoring_channel/files/vm-resolv.conf
+++ /dev/null
@@ -1 +0,0 @@
-nameserver 8.8.8.8
diff --git a/xos/observers/monitoring_channel/model-deps b/xos/observers/monitoring_channel/model-deps
deleted file mode 100644
index 0967ef4..0000000
--- a/xos/observers/monitoring_channel/model-deps
+++ /dev/null
@@ -1 +0,0 @@
-{}
diff --git a/xos/observers/monitoring_channel/monitoring_channel_observer.py b/xos/observers/monitoring_channel/monitoring_channel_observer.py
deleted file mode 100755
index d6a71ff..0000000
--- a/xos/observers/monitoring_channel/monitoring_channel_observer.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-
-# This imports and runs ../../xos-observer.py
-
-import importlib
-import os
-import sys
-observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../..")
-sys.path.append(observer_path)
-mod = importlib.import_module("xos-observer")
-mod.main()
diff --git a/xos/observers/monitoring_channel/monitoring_channel_observer_config b/xos/observers/monitoring_channel/monitoring_channel_observer_config
deleted file mode 100644
index 5657e1d..0000000
--- a/xos/observers/monitoring_channel/monitoring_channel_observer_config
+++ /dev/null
@@ -1,41 +0,0 @@
-
-[plc]
-name=plc
-deployment=VICCI
-
-[db]
-name=xos
-user=postgres
-password=password
-host=localhost
-port=5432
-
-[api]
-host=128.112.171.237
-port=8000
-ssl_key=None
-ssl_cert=None
-ca_ssl_cert=None
-ratelimit_enabled=0
-omf_enabled=0
-mail_support_address=support@localhost
-nova_enabled=True
-
-[observer]
-name=monitoring_channel
-dependency_graph=/opt/xos/observers/monitoring_channel/model-deps
-steps_dir=/opt/xos/observers/monitoring_channel/steps
-sys_dir=/opt/xos/observers/monitoring_channel/sys
-deleters_dir=/opt/xos/observers/monitoring_channel/deleters
-log_file=console
-driver=None
-pretend=False
-backoff_disabled=True
-save_ansible_output=True
-# set proxy_ssh to false on cloudlab
-proxy_ssh=False
-full_setup=True
-
-[feefie]
-client_id='vicci_dev_central'
-user_id='pl'
diff --git a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.py b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.py
deleted file mode 100644
index c265517..0000000
--- a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import hashlib
-import os
-import socket
-import sys
-import base64
-import time
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from synchronizers.base.ansible import run_template_ssh
-from observers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
-from core.models import Service, Slice
-from services.ceilometer.models import MonitoringChannel
-from util.logger import Logger, logging
-
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-logger = Logger(level=logging.INFO)
-
-class SyncMonitoringChannel(SyncInstanceUsingAnsible):
-    provides=[MonitoringChannel]
-    observes=MonitoringChannel
-    requested_interval=0
-    template_name = "sync_monitoringchannel.yaml"
-    service_key_name = "/opt/xos/observers/monitoring_channel/monitoring_channel_private_key"
-
-    def __init__(self, *args, **kwargs):
-        super(SyncMonitoringChannel, self).__init__(*args, **kwargs)
-
-    def fetch_pending(self, deleted):
-        if (not deleted):
-            objs = MonitoringChannel.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
-        else:
-            objs = MonitoringChannel.get_deleted_tenant_objects()
-
-        return objs
-
-    def get_extra_attributes(self, o):
-        # This is a place to include extra attributes. In the case of Monitoring Channel, we need to know
-        #   1) Allowed tenant ids
-        #   2) Ceilometer API service endpoint URL if running externally
-        #   3) Credentials to access Ceilometer API service
-
-        instance = self.get_instance(o)
-
-        try:
-            full_setup = Config().observer_full_setup
-        except:
-            full_setup = True
-
-        fields = {"unique_id": o.id,
-                  "allowed_tenant_ids": o.tenant_list,
-                  "auth_url":instance.controller.auth_url,
-                  "admin_user":instance.controller.admin_user,
-                  "admin_password":instance.controller.admin_password,
-                  "admin_tenant":instance.controller.admin_tenant,
-                  "full_setup": full_setup}
-
-        return fields
-
-    def run_playbook(self, o, fields):
-        #ansible_hash = hashlib.md5(repr(sorted(fields.items()))).hexdigest()
-        #quick_update = (o.last_ansible_hash == ansible_hash)
-
-        #if quick_update:
-        #    logger.info("quick_update triggered; skipping ansible recipe")
-        #else:
-        super(SyncMonitoringChannel, self).run_playbook(o, fields)
-
-        #o.last_ansible_hash = ansible_hash
-
-    def map_delete_inputs(self, o):
-        fields = {"unique_id": o.id,
-                  "delete": True}
-        return fields
diff --git a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
deleted file mode 100644
index 6c5fc8c..0000000
--- a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
+++ /dev/null
@@ -1,104 +0,0 @@
----
-- hosts: {{ instance_name }}
-  gather_facts: False
-  connection: ssh
-  user: ubuntu
-  sudo: yes
-  vars:
-      unique_id: {{ unique_id }}
-      auth_url: {{ auth_url }}
-      admin_user: {{ admin_user }}
-      admin_password: {{ admin_password }}
-      admin_tenant: {{ admin_tenant }}
-      shared_lan_ip: {{ private_ip }}
-      shared_lan_mac: {{ private_mac }}
-      headnode_flat_lan_ip: {{ rabbit_host }}
-      ceilometer_client_acess_ip: {{ ceilometer_ip }}
-      ceilometer_client_acess_mac: {{ ceilometer_mac }}
-      ceilometer_host_port: {{ ceilometer_port }}
-      allowed_tenant_ids:
-        {% for allowed_tenant_id in allowed_tenant_ids %}
-        - {{ allowed_tenant_id }}
-        {% endfor %}
-
-  tasks:
-{% if delete %}
-  - name: Remove tenant
-# FIXME: Adding dummy template action to avoid "action attribute missing in task" error
-    template: src=/opt/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2 dest=/usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config mode=0777
-    notify:
-     - stop monitoring-channel
-     - remove container
-{% else %}
-{% if full_setup %}
-  - name: Docker repository
-    copy: src=/opt/xos/observers/monitoring_channel/files/docker.list
-      dest=/etc/apt/sources.list.d/docker.list
-
-  - name: Import the repository key
-    apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9
-
-  - name: install Docker
-    apt: name=lxc-docker state=present update_cache=yes
-
-  - name: install python-setuptools
-    apt: name=python-setuptools state=present
-
-  - name: install pip
-    easy_install: name=pip
-
-  - name: install docker-py
-    pip: name=docker-py version=0.5.3
-
-  - name: install Pipework
-    get_url: url=https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework
-       dest=/usr/local/bin/pipework
-       mode=0755
-
-  - name: Disable resolvconf service
-    shell: service resolvconf stop
-    shell: echo manual > /etc/init/resolvconf.override
-    shell: rm -f /etc/resolv.conf
-
-  - name: Install resolv.conf
-    copy: src=/opt/xos/observers/monitoring_channel/files/vm-resolv.conf
-      dest=/etc/resolv.conf
-{% endif %}
-
-  - name: ceilometer proxy config
-    template: src=/opt/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2 dest=/usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config mode=0777
-    notify:
-#    - restart monitoring-channel
-     - stop monitoring-channel
-     - remove container
-     - start monitoring-channel
-
-  - name: Monitoring channel upstart
-    template: src=/opt/xos/observers/monitoring_channel/templates/monitoring-channel.conf.j2 dest=/etc/init/monitoring-channel-{{ unique_id }}.conf
-
-  - name: Monitoring channel startup script
-    template: src=/opt/xos/observers/monitoring_channel/templates/start-monitoring-channel.sh.j2 dest=/usr/local/sbin/start-monitoring-channel-{{ unique_id }}.sh mode=0755
-    notify:
-#    - restart monitoring-channel
-     - stop monitoring-channel
-     - remove container
-     - start monitoring-channel
-
-# These are samples, not necessary for correct function of demo
-
-  - name: Make sure Monitoring channel service is running
-    service: name=monitoring-channel-{{ unique_id }} state=started
-{% endif %}
-
-  handlers:
-  - name: restart monitoring-channel
-    shell: service monitoring-channel-{{ unique_id }} stop; sleep 1; service monitoring-channel-{{ unique_id }} start
-
-  - name: stop monitoring-channel
-    service: name=monitoring-channel-{{ unique_id }} state=stopped
-
-  - name: remove container
-    docker: name=monitoring-channel-{{ unique_id }} state=absent image=monitoring-channel
-
-  - name: start monitoring-channel
-    service: name=monitoring-channel-{{ unique_id }} state=started
diff --git a/xos/observers/monitoring_channel/supervisor/monitoring_channel_observer.conf b/xos/observers/monitoring_channel/supervisor/monitoring_channel_observer.conf
deleted file mode 100644
index 1b78703..0000000
--- a/xos/observers/monitoring_channel/supervisor/monitoring_channel_observer.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-[program:monitoring_channel_observer]
-command=python /opt/xos/observers/monitoring_channel/monitoring_channel_observer.py -C /opt/xos/observers/monitoring_channel/monitoring_channel_observer_config
diff --git a/xos/observers/monitoring_channel/templates/Dockerfile.monitoring_channel b/xos/observers/monitoring_channel/templates/Dockerfile.monitoring_channel
deleted file mode 100644
index 45defb8..0000000
--- a/xos/observers/monitoring_channel/templates/Dockerfile.monitoring_channel
+++ /dev/null
@@ -1,26 +0,0 @@
-FROM       ubuntu:14.04.2
-MAINTAINER Andy Bavier <acb@cs.princeton.edu>
-
-# XXX Workaround for docker bug:
-# https://github.com/docker/docker/issues/6345
-# Kernel 3.15 breaks docker, uss the line below as a workaround
-# until there is a fix
-RUN ln -s -f /bin/true /usr/bin/chfn
-# XXX End workaround
-
-# Install.
-RUN apt-get update && apt-get install -y \
-    python-pip \
-    python-dev
-
-RUN pip install web.py
-RUN pip install wsgilog
-RUN pip install python-ceilometerclient
-RUN mkdir -p /usr/local/share
-ADD ceilometer_proxy_server.py /usr/local/share/
-RUN chmod +x /usr/local/share/ceilometer_proxy_server.py
-ADD start_ceilometer_proxy /usr/local/sbin/
-RUN chmod +x /usr/local/sbin/start_ceilometer_proxy
-EXPOSE 8000
-WORKDIR /usr/local/share
-CMD /usr/local/sbin/start_ceilometer_proxy
diff --git a/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2 b/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2
deleted file mode 100644
index 4c712f1..0000000
--- a/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2
+++ /dev/null
@@ -1,16 +0,0 @@
-# This file autogenerated by monitoring-channel observer
-# It contains a list of attributes to be used by ceilometer proxy web server
-# syntax: key=value
-
-[default]
-auth_url={{ auth_url }}
-admin_user={{ admin_user }}
-admin_tenant={{ admin_tenant }}
-admin_password={{ admin_password }}
-
-[allowed_tenants]
-{% if allowed_tenant_ids %}
-{% for tenant_id in allowed_tenant_ids %}
-{{ tenant_id }}
-{% endfor %}
-{% endif %}
diff --git a/xos/observers/monitoring_channel/templates/ceilometer_proxy_server.py b/xos/observers/monitoring_channel/templates/ceilometer_proxy_server.py
deleted file mode 100644
index 62f0804..0000000
--- a/xos/observers/monitoring_channel/templates/ceilometer_proxy_server.py
+++ /dev/null
@@ -1,283 +0,0 @@
-#!/usr/bin/env python
-import web
-import ConfigParser
-import io
-import json
-from ceilometerclient import client
-import logging
-import urllib
-import urllib2
-from wsgilog import WsgiLog
-
-web.config.debug=False
-
-logfile = "ceilometer_proxy_server.log"
-level=logging.INFO
-logger=logging.getLogger('ceilometer_proxy_server')
-logger.setLevel(level)
-handler=logging.handlers.RotatingFileHandler(logfile,maxBytes=1000000, backupCount=1)
-logger.addHandler(handler)
-
-class FileLog(WsgiLog):
-    def __init__(self, application):
-        WsgiLog.__init__(
-            self,
-            application,
-            logformat = '%(message)s',
-            tofile = True,
-            toprint = True,
-            prnlevel = level,
-            file = logfile,
-            backups =1
-            )
-    def __call__(self, environ, start_response):
-        def hstart_response(status, response_headers, *args):
-             out = start_response(status, response_headers, *args)
-             try:
-                 logline=environ["SERVER_PROTOCOL"]+" "+environ["REQUEST_METHOD"]+" "+environ["REQUEST_URI"]+" - "+status
-             except err:
-                 logline="Could not log <%s> due to err <%s>" % (str(environ), err)
-             logger.info(logline)
-
-             return out
-
-        return super(FileLog, self).__call__(environ, hstart_response)
-
-#TODOs:
-#-See if we can avoid using python-ceilometerclient and instead use the REST calls directly with AuthToken
-#
-urls = (
-    r'^/v2/meters$', 'meter_list',
-    r'^/v2/meters/(?P<meter_name>[A-Za-z0-9_:.\-]+)/statistics$', 'statistics_list',
-    r'^/v2/samples$', 'sample_list',
-    r'^/v2/resources$', 'resource_list',
-    r'^/v2/subscribe$', 'pubsub_handler',
-)
-
-app = web.application(urls, globals())
-
-config = None
-ceilometer_client = None
-
-
-def parse_ceilometer_proxy_config():
-    global config
-    config = ConfigParser.RawConfigParser(allow_no_value=True)
-    config.read('ceilometer_proxy_config')
- 
-def ceilometerclient():
-    global config, ceilometer_client
-    if ceilometer_client:
-         return ceilometer_client
-
-    if not config:
-         parse_ceilometer_proxy_config()
-
-    keystone = {}
-    keystone['os_username']=config.get('default','admin_user')
-    keystone['os_password']=config.get('default','admin_password')
-    keystone['os_auth_url']=config.get('default','auth_url')
-    keystone['os_tenant_name']=config.get('default','admin_tenant')
-    ceilometer_client = client.get_client(2,**keystone)
-    logger.info('ceilometer get_client is successful')
-    return ceilometer_client
-
-def make_query(user_id=None, tenant_id=None, resource_id=None,
-               user_ids=None, tenant_ids=None, resource_ids=None):
-    """Returns query built from given parameters.
-
-    This query can be then used for querying resources, meters and
-    statistics.
-
-    :Parameters:
-      - `user_id`: user_id, has a priority over list of ids
-      - `tenant_id`: tenant_id, has a priority over list of ids
-      - `resource_id`: resource_id, has a priority over list of ids
-      - `user_ids`: list of user_ids
-      - `tenant_ids`: list of tenant_ids
-      - `resource_ids`: list of resource_ids
-    """
-    user_ids = user_ids or []
-    tenant_ids = tenant_ids or []
-    resource_ids = resource_ids or []
-
-    query = []
-    if user_id:
-        user_ids = [user_id]
-    for u_id in user_ids:
-        query.append({"field": "user_id", "op": "eq", "value": u_id})
-
-    if tenant_id:
-        tenant_ids = [tenant_id]
-    for t_id in tenant_ids:
-        query.append({"field": "project_id", "op": "eq", "value": t_id})
-
-    if resource_id:
-        resource_ids = [resource_id]
-    for r_id in resource_ids:
-        query.append({"field": "resource_id", "op": "eq", "value": r_id})
-
-    return query
-
-def filter_query_params(query_params):
-    new_query=[]
-    i=0
-    user_specified_tenants=[]
-    for field in query_params['q.field']:
-        if (field != 'project_id') and (field != 'project'):
-            query = {}
-            query['field']=field
-            if query_params['q.op'][i] != '':
-                 query['op']=query_params['q.op'][i]
-            query['value']=query_params['q.value'][i]
-            new_query.append(query)
-        else:
-            user_specified_tenants.append(query_params['q.value'][i])
-        i=i+1
-    return new_query,user_specified_tenants
-
-class meter_list:
-    def GET(self):
-        global config
-        keyword_args = {
-             "q.field": [],
-             "q.op": [],
-             "q.type": [],
-             "q.value": [],
-        }
-        query_params = web.input(**keyword_args)
-        new_query, user_specified_tenants = filter_query_params(query_params)
-
-        client = ceilometerclient()
-        meters=[]
-        for (k,v) in config.items('allowed_tenants'):
-             if user_specified_tenants and (k not in user_specified_tenants):
-                 continue
-             final_query=[]
-             final_query.extend(new_query)
-             query = make_query(tenant_id=k)
-             final_query.extend(query)
-             logger.debug('final query=%s',final_query)
-             results = client.meters.list(q=final_query)
-             meters.extend(results)
-        return json.dumps([ob._info for ob in meters])
-
-class statistics_list:
-    def GET(self, meter_name):
-        global config
-        keyword_args = {
-             "q.field": [],
-             "q.op": [],
-             "q.type": [],
-             "q.value": [],
-             "period": None
-        }
-        query_params = web.input(**keyword_args)
-        new_query, user_specified_tenants = filter_query_params(query_params)
-
-        client = ceilometerclient()
-        period = query_params.period
-        statistics = []
-        for (k,v) in config.items('allowed_tenants'):
-              if user_specified_tenants and (k not in user_specified_tenants):
-                  continue
-              final_query=[]
-              final_query.extend(new_query)
-              query = make_query(tenant_id=k)
-              final_query.extend(query)
-              logger.debug('final query=%s',final_query)
-              results = client.statistics.list(meter_name=meter_name, q=final_query, period=period)
-              statistics.extend(results)
-        return json.dumps([ob._info for ob in statistics])
-
-class sample_list:
-    def GET(self):
-        global config
-        keyword_args = {
-             "q.field": [],
-             "q.op": [],
-             "q.type": [],
-             "q.value": [],
-             "limit": None,
-        }
-        query_params = web.input(**keyword_args)
-        new_query, user_specified_tenants = filter_query_params(query_params)
-
-        client = ceilometerclient()
-        limit=query_params.limit
-        samples=[]
-        for (k,v) in config.items('allowed_tenants'):
-              if user_specified_tenants and (k not in user_specified_tenants):
-                  continue
-              final_query=[]
-              final_query.extend(new_query)
-              query = make_query(tenant_id=k)
-              final_query.extend(query)
-              logger.debug('final query=%s',final_query)
-              results = client.new_samples.list(q=final_query,limit=limit)
-              samples.extend(results)
-        return json.dumps([ob._info for ob in samples])
-
-class resource_list:
-    def GET(self):
-        global config
-        keyword_args = {
-             "q.field": [],
-             "q.op": [],
-             "q.type": [],
-             "q.value": [],
-             "limit": None,
-             "links": None,
-        }
-        query_params = web.input(**keyword_args)
-        new_query, user_specified_tenants = filter_query_params(query_params)
-
-        client = ceilometerclient()
-        limit=query_params.limit
-        links=query_params.links
-        resources=[]
-        for (k,v) in config.items('allowed_tenants'):
-              if user_specified_tenants and (k not in user_specified_tenants):
-                  continue
-              final_query=[]
-              final_query.extend(new_query)
-              query = make_query(tenant_id=k)
-              final_query.extend(query)
-              logger.debug('final query=%s',final_query)
-              results = client.resources.list(q=final_query, limit=limit, links=links)
-              resources.extend(results)
-        return json.dumps([ob._info for ob in resources])
-
-class pubsub_handler:
-    def POST(self):
-        global config
-        parse_ceilometer_proxy_config()
-        data_str = unicode(web.data(),'iso-8859-1')
-        post_data = json.loads(data_str)
-        final_query=[]
-        for (k,v) in config.items('allowed_tenants'):
-             query = make_query(tenant_id=k)
-             final_query.extend(query)
-        if not final_query:
-             raise Exception("Not allowed to subscribe to any meters")
-        post_data["query"] = final_query
-        #TODO: The PUB/SUB url needs to be read from config
-        put_request = urllib2.Request("http://10.11.10.1:4455/subscribe", json.dumps(post_data))
-        put_request.get_method = lambda: 'SUB'
-        put_request.add_header('Content-Type', 'application/json')
-        response = urllib2.urlopen(put_request)
-        response_text = response.read()
-        return json.dumps(response_text)
-
-    def DELETE(self):
-        data_str = web.data()
-        #TODO: The PUB/SUB url needs to be read from config
-        put_request = urllib2.Request("http://10.11.10.1:4455/unsubscribe", data_str)
-        put_request.get_method = lambda: 'UNSUB'
-        put_request.add_header('Content-Type', 'application/json')
-        response = urllib2.urlopen(put_request)
-        response_text = response.read()
-        return json.dumps(response_text)
-
-if __name__ == "__main__":
-    app.run(FileLog)
diff --git a/xos/observers/monitoring_channel/templates/monitoring-channel.conf.j2 b/xos/observers/monitoring_channel/templates/monitoring-channel.conf.j2
deleted file mode 100644
index eb937ac..0000000
--- a/xos/observers/monitoring_channel/templates/monitoring-channel.conf.j2
+++ /dev/null
@@ -1,10 +0,0 @@
-# Upstart script for Monitoring channel
-description "Upstart script for Monitoring channel container"
-author "andy@onlab.us"
-start on filesystem and started docker
-stop on runlevel [!2345]
-respawn
-
-script
-  /usr/local/sbin/start-monitoring-channel-{{ unique_id }}.sh
-end script
diff --git a/xos/observers/monitoring_channel/templates/start-monitoring-channel.sh.j2 b/xos/observers/monitoring_channel/templates/start-monitoring-channel.sh.j2
deleted file mode 100755
index f56c247..0000000
--- a/xos/observers/monitoring_channel/templates/start-monitoring-channel.sh.j2
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/bash
-
-function mac_to_iface {
-    MAC=$1
-    ifconfig|grep $MAC| awk '{print $1}'|grep -v '\.'
-}
-
-function generate_mac_from_ip {
-    IP=$1
-    printf "02:42:%02x:%02x:%02x:%02x\n" `echo $IP|awk -F '.' '{print $1, $2, $3, $4}'`
-}
-
-iptables -L > /dev/null
-ip6tables -L > /dev/null
-
-MONITORING_CHANNEL=monitoring-channel-{{ unique_id }}
-HEADNODEFLATLANIP={{ headnode_flat_lan_ip }}
-HOST_FORWARDING_PORT_FOR_CEILOMETER={{ ceilometer_host_port }}
-
-docker inspect $MONITORING_CHANNEL > /dev/null 2>&1
-if [ "$?" == 1 ]
-then
-    #sudo docker build -t monitoring-channel -f Dockerfile.monitoring_channel .
-    sudo docker pull srikanthvavila/monitoring-channel
-    docker run -d --name=$MONITORING_CHANNEL --add-host="ctl:$HEADNODEFLATLANIP" --privileged=true -p $HOST_FORWARDING_PORT_FOR_CEILOMETER:8000 srikanthvavila/monitoring-channel
-else
-    docker start $MONITORING_CHANNEL
-fi
-
-# Set up networking via pipework
-#SHARED_LAN_IFACE=$( mac_to_iface {{ shared_lan_mac }} )
-#docker exec $MONITORING_CHANNEL ifconfig eth0 >> /dev/null || pipework $SHARED_LAN_IFACE -i eth0 $MONITORING_CHANNEL 192.168.0.1/24
-
-# Make sure VM's eth0 (hpc_client) has no IP address
-#ifconfig $HPC_IFACE 0.0.0.0
-
-# Now copy ceilometer proxy configuration to container
-cat /usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config | docker exec -i $MONITORING_CHANNEL bash -c 'cat > /usr/local/share/ceilometer_proxy_config'
-
-# Attach to container
-docker start -a $MONITORING_CHANNEL
diff --git a/xos/observers/monitoring_channel/templates/start_ceilometer_proxy b/xos/observers/monitoring_channel/templates/start_ceilometer_proxy
deleted file mode 100644
index ddaa9c8..0000000
--- a/xos/observers/monitoring_channel/templates/start_ceilometer_proxy
+++ /dev/null
@@ -1 +0,0 @@
-/usr/local/share/ceilometer_proxy_server.py 8000
diff --git a/xos/observers/onos/model-deps b/xos/observers/onos/model-deps
deleted file mode 100644
index 2da80e0..0000000
--- a/xos/observers/onos/model-deps
+++ /dev/null
@@ -1,5 +0,0 @@
-{
-    "ONOSApp": [
-        "ONOSService"
-    ]
-}
diff --git a/xos/observers/onos/onos-observer.py b/xos/observers/onos/onos-observer.py
deleted file mode 100755
index d6a71ff..0000000
--- a/xos/observers/onos/onos-observer.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-
-# This imports and runs ../../xos-observer.py
-
-import importlib
-import os
-import sys
-observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../..")
-sys.path.append(observer_path)
-mod = importlib.import_module("xos-observer")
-mod.main()
diff --git a/xos/observers/onos/onos_observer_config b/xos/observers/onos/onos_observer_config
deleted file mode 100644
index 3c6d63d..0000000
--- a/xos/observers/onos/onos_observer_config
+++ /dev/null
@@ -1,41 +0,0 @@
-
-[plc]
-name=plc
-deployment=VICCI
-
-[db]
-name=xos
-user=postgres
-password=password
-host=localhost
-port=5432
-
-[api]
-host=128.112.171.237
-port=8000
-ssl_key=None
-ssl_cert=None
-ca_ssl_cert=None
-ratelimit_enabled=0
-omf_enabled=0
-mail_support_address=support@localhost
-nova_enabled=True
-
-[observer]
-name=onos
-dependency_graph=/opt/xos/observers/onos/model-deps
-steps_dir=/opt/xos/observers/onos/steps
-sys_dir=/opt/xos/observers/onos/sys
-deleters_dir=/opt/xos/observers/onos/deleters
-log_file=console
-driver=None
-pretend=False
-backoff_disabled=True
-save_ansible_output=True
-# set proxy_ssh to false on cloudlab
-proxy_ssh=False
-full_setup=True
-
-[feefie]
-client_id='vicci_dev_central'
-user_id='pl'
diff --git a/xos/observers/onos/run.sh b/xos/observers/onos/run.sh
deleted file mode 100755
index ea4c511..0000000
--- a/xos/observers/onos/run.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#if [[ ! -e ./vcpe-observer.py ]]; then
-#    ln -s ../../xos-observer.py vcpe-observer.py
-#fi
-
-export XOS_DIR=/opt/xos
-python onos-observer.py  -C $XOS_DIR/observers/onos/onos_observer_config
diff --git a/xos/observers/onos/scripts/dockerip.sh b/xos/observers/onos/scripts/dockerip.sh
deleted file mode 100644
index 732c3fe..0000000
--- a/xos/observers/onos/scripts/dockerip.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-MODE=`docker inspect --format '{{ .HostConfig.NetworkMode }}' $1  | tr -d '\n' | tr -d '\r'`
-if [[ "$MODE" == "host" ]]; then
-    echo -n "127.0.0.1"
-else
-    docker inspect --format '{{ .NetworkSettings.IPAddress }}' $1 | tr -d '\n' | tr -d '\r'
-fi
-
diff --git a/xos/observers/onos/start.sh b/xos/observers/onos/start.sh
deleted file mode 100755
index c13ffbe..0000000
--- a/xos/observers/onos/start.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#if [[ ! -e ./vcpe-observer.py ]]; then
-#    ln -s ../../xos-observer.py vcpe-observer.py
-#fi
-
-export XOS_DIR=/opt/xos
-nohup python onos-observer.py  -C $XOS_DIR/observers/onos/onos_observer_config > /dev/null 2>&1 &
diff --git a/xos/observers/onos/steps/sync_onosapp.py b/xos/observers/onos/steps/sync_onosapp.py
deleted file mode 100644
index 047306d..0000000
--- a/xos/observers/onos/steps/sync_onosapp.py
+++ /dev/null
@@ -1,167 +0,0 @@
-import hashlib
-import os
-import socket
-import socket
-import sys
-import base64
-import time
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from synchronizers.base.ansible import run_template_ssh
-from observers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
-from core.models import Service, Slice
-from services.onos.models import ONOSService, ONOSApp
-from util.logger import Logger, logging
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-logger = Logger(level=logging.INFO)
-
-class SyncONOSApp(SyncInstanceUsingAnsible):
-    provides=[ONOSApp]
-    observes=ONOSApp
-    requested_interval=0
-    template_name = "sync_onosapp.yaml"
-    service_key_name = "/opt/xos/observers/onos/onos_key"
-
-    def __init__(self, *args, **kwargs):
-        super(SyncONOSApp, self).__init__(*args, **kwargs)
-
-    def fetch_pending(self, deleted):
-        if (not deleted):
-            objs = ONOSApp.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
-        else:
-            objs = ONOSApp.get_deleted_tenant_objects()
-
-        return objs
-
-    def get_instance(self, o):
-        # We assume the ONOS service owns a slice, so pick one of the instances
-        # inside that slice to sync to.
-
-        serv = self.get_onos_service(o)
-
-        if serv.use_external_host:
-            return serv.use_external_host
-
-        if serv.slices.exists():
-            slice = serv.slices.all()[0]
-            if slice.instances.exists():
-                return slice.instances.all()[0]
-
-        return None
-
-    def get_onos_service(self, o):
-        if not o.provider_service:
-            return None
-
-        onoses = ONOSService.get_service_objects().filter(id=o.provider_service.id)
-        if not onoses:
-            return None
-
-        return onoses[0]
-
-    def get_files_dir(self, o):
-        if not hasattr(Config(), "observer_steps_dir"):
-            # make steps_dir mandatory; there's no valid reason for it to not
-            # be defined.
-            raise Exception("observer_steps_dir is not defined in config file")
-
-        step_dir = Config().observer_steps_dir
-
-        return os.path.join(step_dir, "..", "files", str(self.get_onos_service(o).id), o.name)
-
-    def get_cluster_configuration(self, o):
-        instance = self.get_instance(o)
-        if not instance:
-           raise "No instance for ONOS App"
-        node_ips = [socket.gethostbyname(instance.node.name)]
-
-        ipPrefix = ".".join(node_ips[0].split(".")[:3]) + ".*"
-        result = '{ "nodes": ['
-        result = result + ",".join(['{ "ip": "%s"}' % ip for ip in node_ips])
-        result = result + '], "ipPrefix": "%s"}' % ipPrefix
-        return result
-
-
-    def write_configs(self, o):
-        o.config_fns = []
-        o.rest_configs = []
-        o.files_dir = self.get_files_dir(o)
-
-        if not os.path.exists(o.files_dir):
-            os.makedirs(o.files_dir)
-
-        # Combine the service attributes with the tenant attributes. Tenant
-        # attribute can override service attributes.
-        attrs = o.provider_service.serviceattribute_dict
-        attrs.update(o.tenantattribute_dict)
-
-        ordered_attrs = attrs.keys()
-
-        o.early_rest_configs=[]
-        if ("cordvtn" in o.dependencies):
-            # For VTN, since it's running in a docker host container, we need
-            # to make sure it configures the cluster using the right ip addresses.
-            # NOTE: rest_onos/v1/cluster/configuration/ will reboot the cluster and
-            #   must go first.
-            name="rest_onos/v1/cluster/configuration/"
-            value= self.get_cluster_configuration(o)
-            fn = name[5:].replace("/","_")
-            endpoint = name[5:]
-            file(os.path.join(o.files_dir, fn),"w").write(" " +value)
-            o.early_rest_configs.append( {"endpoint": endpoint, "fn": fn} )
-
-        for name in attrs.keys():
-            value = attrs[name]
-            if name.startswith("config_"):
-                fn = name[7:] # .replace("_json",".json")
-                o.config_fns.append(fn)
-                file(os.path.join(o.files_dir, fn),"w").write(value)
-            if name.startswith("rest_"):
-                fn = name[5:].replace("/","_")
-                endpoint = name[5:]
-                # Ansible goes out of it's way to make our life difficult. If
-                # 'lookup' sees a file that it thinks contains json, then it'll
-                # insist on parsing and return a json object. We just want
-                # a string, so prepend a space and then strip the space off
-                # later.
-                file(os.path.join(o.files_dir, fn),"w").write(" " +value)
-                o.rest_configs.append( {"endpoint": endpoint, "fn": fn} )
-
-    def prepare_record(self, o):
-        self.write_configs(o)
-
-    def get_extra_attributes(self, o):
-        instance = self.get_instance(o)
-
-        fields={}
-        fields["files_dir"] = o.files_dir
-        fields["appname"] = o.name
-        fields["nat_ip"] = instance.get_ssh_ip()
-        fields["config_fns"] = o.config_fns
-        fields["rest_configs"] = o.rest_configs
-        fields["early_rest_configs"] = o.early_rest_configs
-        if o.dependencies:
-            fields["dependencies"] = [x.strip() for x in o.dependencies.split(",")]
-        else:
-            fields["dependencies"] = []
-
-        if (instance.isolation=="container"):
-            fields["ONOS_container"] = "%s-%s" % (instance.slice.name, str(instance.id))
-        else:
-            fields["ONOS_container"] = "ONOS"
-        return fields
-
-    def sync_fields(self, o, fields):
-        # the super causes the playbook to be run
-        super(SyncONOSApp, self).sync_fields(o, fields)
-
-    def run_playbook(self, o, fields):
-        super(SyncONOSApp, self).run_playbook(o, fields)
-
-    def delete_record(self, m):
-        pass
diff --git a/xos/observers/onos/steps/sync_onosapp.yaml b/xos/observers/onos/steps/sync_onosapp.yaml
deleted file mode 100644
index a03368b..0000000
--- a/xos/observers/onos/steps/sync_onosapp.yaml
+++ /dev/null
@@ -1,114 +0,0 @@
----
-- hosts: {{ instance_name }}
-  gather_facts: False
-  connection: ssh
-  user: {{ username }}
-  sudo: yes
-  vars:
-    appname: {{ appname }}
-    dependencies: {{ dependencies }}
-{% if rest_configs %}
-    rest_configs:
-{% for rest_config in rest_configs %}
-       - endpoint: {{ rest_config.endpoint }}
-         body: "{{ '{{' }} lookup('file', '{{ files_dir }}/{{ rest_config.fn }}') {{ '}}' }}"
-{% endfor %}
-{% endif %}
-{% if early_rest_configs %}
-    early_rest_configs:
-{% for early_rest_config in early_rest_configs %}
-       - endpoint: {{ early_rest_config.endpoint }}
-         body: "{{ '{{' }} lookup('file', '{{ files_dir }}/{{ early_rest_config.fn }}') {{ '}}' }}"
-{% endfor %}
-{% endif %}
-
-  tasks:
-
-  - name: Get Docker IP
-    script: /opt/xos/observers/onos/scripts/dockerip.sh {{ ONOS_container }}
-    register: onosaddr
-
-  - name: Wait for ONOS to come up
-    wait_for:
-      host={{ '{{' }} onosaddr.stdout {{ '}}' }}
-      port={{ '{{' }} item {{ '}}' }}
-      state=present
-    with_items:
-    - 8101
-    - 8181
-    - 9876
-
-  - name: Config file directory
-    file:
-      path=/home/ubuntu/{{ appname }}/
-      state=directory
-
-{% if config_fns %}
-  - name: Copy over configuration files
-    copy:
-      src={{ files_dir }}/{{ '{{' }} item {{ '}}' }}
-      dest=/home/ubuntu/{{ appname }}/{{ '{{' }} item {{ '}}' }}
-    with_items:
-        {% for config_fn in config_fns %}
-        - {{ config_fn }}
-        {% endfor %}
-
-  - name: Make sure config directory exists
-    shell: docker exec {{ ONOS_container }} mkdir -p /root/onos/config/
-    sudo: yes
-
-  - name: Copy config files into container
-    shell: docker cp {{ appname }}/{{ '{{' }} item {{ '}}' }} {{ ONOS_container }}:/root/onos/config/
-    sudo: yes
-    with_items:
-        {% for config_fn in config_fns %}
-        - {{ config_fn }}
-        {% endfor %}
-{% endif %}
-
-  # Don't know how to check for this condition, just wait
-  - name: Wait for ONOS to install the apps
-    wait_for: timeout=15
-
-{% if early_rest_configs %}
-  - name: Add ONOS early configuration values
-    uri:
-      url: http://{{ '{{' }} onosaddr.stdout {{ '}}' }}:8181/{{ '{{' }} item.endpoint {{ '}}' }}
-      body: "{{ '{{' }} item.body {{ '}}' }}"
-      body_format: raw
-      method: POST
-      user: karaf
-      password: karaf
-    with_items: "early_rest_configs"
-
-  # Don't know how to check for this condition, just wait
-  - name: Wait for ONOS to restart
-    wait_for: timeout=15
-{% endif %}
-
-{% if dependencies %}
-  - name: Add dependencies to ONOS
-    uri:
-      url: http://{{ '{{' }} onosaddr.stdout {{ '}}' }}:8181/onos/v1/applications/{{ '{{' }} item {{ '}}' }}/active
-      method: POST
-      user: karaf
-      password: karaf
-    with_items:
-        {% for dependency in dependencies %}
-        - {{ dependency }}
-        {% endfor %}
-{% endif %}
-
-{% if rest_configs %}
-# Do this after services have been activated, or it will cause an exception.
-# vOLT will re-read its net config; vbng may not.
-  - name: Add ONOS configuration values
-    uri:
-      url: http://{{ '{{' }} onosaddr.stdout {{ '}}' }}:8181/{{ '{{' }} item.endpoint {{ '}}' }} #http://localhost:8181/onos/v1/network/configuration/
-      body: "{{ '{{' }} item.body {{ '}}' }}"
-      body_format: raw
-      method: POST
-      user: karaf
-      password: karaf
-    with_items: "rest_configs"
-{% endif %}
diff --git a/xos/observers/onos/steps/sync_onosservice.py b/xos/observers/onos/steps/sync_onosservice.py
deleted file mode 100644
index efa5d72..0000000
--- a/xos/observers/onos/steps/sync_onosservice.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import hashlib
-import os
-import socket
-import sys
-import base64
-import time
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from synchronizers.base.ansible import run_template_ssh
-from observers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
-from core.models import Service, Slice
-from services.onos.models import ONOSService, ONOSApp
-from util.logger import Logger, logging
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-logger = Logger(level=logging.INFO)
-
-class SyncONOSService(SyncInstanceUsingAnsible):
-    provides=[ONOSService]
-    observes=ONOSService
-    requested_interval=0
-    template_name = "sync_onosservice.yaml"
-    service_key_name = "/opt/xos/observers/onos/onos_key"
-
-    def __init__(self, *args, **kwargs):
-        super(SyncONOSService, self).__init__(*args, **kwargs)
-
-    def fetch_pending(self, deleted):
-        if (not deleted):
-            objs = ONOSService.get_service_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
-        else:
-            objs = ONOSService.get_deleted_service_objects()
-
-        return objs
-
-    def get_instance(self, o):
-        # We assume the ONOS service owns a slice, so pick one of the instances
-        # inside that slice to sync to.
-
-        serv = o
-
-        if serv.use_external_host:
-            return serv.use_external_host
-
-        if serv.slices.exists():
-            slice = serv.slices.all()[0]
-            if slice.instances.exists():
-                return slice.instances.all()[0]
-
-        return None
-
-    def get_extra_attributes(self, o):
-        fields={}
-        fields["instance_hostname"] = self.get_instance(o).instance_name.replace("_","-")
-        fields["appname"] = o.name
-        fields["nat_ip"] = self.get_instance(o).get_ssh_ip()
-        fields["ONOS_container"] = "ONOS"
-        return fields
-
-    def sync_fields(self, o, fields):
-        # the super causes the playbook to be run
-        super(SyncONOSService, self).sync_fields(o, fields)
-
-    def run_playbook(self, o, fields):
-        instance = self.get_instance(o)
-        if (instance.isolation=="container"):
-            # If the instance is already a container, then we don't need to
-            # install ONOS.
-            return
-        super(SyncONOSService, self).run_playbook(o, fields)
-
-    def delete_record(self, m):
-        pass
diff --git a/xos/observers/onos/steps/sync_onosservice.yaml b/xos/observers/onos/steps/sync_onosservice.yaml
deleted file mode 100644
index fd9c3db..0000000
--- a/xos/observers/onos/steps/sync_onosservice.yaml
+++ /dev/null
@@ -1,66 +0,0 @@
----
-- hosts: {{ instance_name }}
-  gather_facts: False
-  connection: ssh
-  user: ubuntu
-  sudo: yes
-
-  tasks:
-
-  - name: Fix /etc/hosts
-    lineinfile:
-      dest=/etc/hosts
-      regexp="127.0.0.1 localhost"
-      line="127.0.0.1 localhost {{ instance_hostname }}"
-
-  - name: Add repo key
-    apt_key:
-      keyserver=hkp://pgp.mit.edu:80
-      id=58118E89F3A912897C070ADBF76221572C52609D
-
-  - name: Install Docker repo
-    apt_repository:
-      repo="deb https://apt.dockerproject.org/repo ubuntu-trusty main"
-      state=present
-
-  - name: Install Docker
-    apt:
-      name={{ '{{' }} item {{ '}}' }}
-      state=latest
-      update_cache=yes
-    with_items:
-    - docker-engine
-    - python-pip
-    - python-httplib2
-
-  - name: Install docker-py
-    pip:
-      name=docker-py
-      state=latest
-
-  - name: Start ONOS container
-    docker:
-      docker_api_version: "1.18"
-      name: {{ ONOS_container }}
-      # was: reloaded
-      state: running
-      image: onosproject/onos
-      ports:
-      - "6653:6653"
-      - "8101:8101"
-      - "8181:8181"
-      - "9876:9876"
-
-  - name: Get Docker IP
-    script: /opt/xos/observers/onos/scripts/dockerip.sh {{ ONOS_container }}
-    register: dockerip
-
-  - name: Wait for ONOS to come up
-    wait_for:
-      host={{ '{{' }} dockerip.stdout {{ '}}' }}
-      port={{ '{{' }} item {{ '}}' }}
-      state=present
-    with_items:
-    - 8101
-    - 8181
-    - 9876
diff --git a/xos/observers/onos/stop.sh b/xos/observers/onos/stop.sh
deleted file mode 100755
index 17d6eb7..0000000
--- a/xos/observers/onos/stop.sh
+++ /dev/null
@@ -1 +0,0 @@
-pkill -9 -f onos-observer.py
diff --git a/xos/observers/onos/supervisor/onos-observer.conf b/xos/observers/onos/supervisor/onos-observer.conf
deleted file mode 100644
index 36e00d9..0000000
--- a/xos/observers/onos/supervisor/onos-observer.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[supervisord]
-logfile=/var/log/supervisord.log ; (main log file;default $CWD/supervisord.log)
-pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid)
-nodaemon=true
-
-[program:synchronizer]
-command=python /opt/xos/observers/onos/onos-observer.py -C /opt/xos/observers/onos/onos_observer_config
-stderr_logfile=/var/log/supervisor/synchronizer.err.log
-stdout_logfile=/var/log/supervisor/synchronizer.out.log
diff --git a/xos/observers/requestrouter/configurationPush.py b/xos/observers/requestrouter/configurationPush.py
deleted file mode 100644
index 857de8b..0000000
--- a/xos/observers/requestrouter/configurationPush.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import ansible.playbook
-import ansible.constants as C
-import ansible.utils.template
-from ansible import errors
-from ansible import callbacks
-from ansible import utils
-from subprocess import call
-
-class ConfigurationPush:
-	def __init__(self):
-		pass
-
-	def config_push(self, service_name, user, playbook_name,hostfile):
-		'''stats = callbacks.AggregateStats()
-		playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY)
-		runner_cb = callbacks.PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY)
-		pb = ansible.playbook.PlayBook(playbook="playbook/site.yml",
-					callbacks=playbook_cb,
-            				runner_callbacks=runner_cb,
-            				stats=stats
-					)
-		result = pb.run()
-		print result
-		'''
-
-		call("ansible-playbook --private-key=planetw "+playbook_name+" -i "+hostfile+" -u "+user+"  --extra-vars \"name="+service_name+"\"", shell=True)
-	
-
-if __name__ == "__main__":
-        main()
diff --git a/xos/observers/requestrouter/deleters/servicemap_deleter.py b/xos/observers/requestrouter/deleters/servicemap_deleter.py
deleted file mode 100644
index 427242f..0000000
--- a/xos/observers/requestrouter/deleters/servicemap_deleter.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import os
-import sys
-import traceback
-from services.requestrouter.models import ServiceMap
-from synchronizers.base.deleter import Deleter
-from util.logger import Logger, logging
-from xos.config import Config, XOS_DIR
-
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-from rrlib import RequestRouterLibrary
-from configurationPush import ConfigurationPush
-import rrlib_config
-
-logger = Logger(level=logging.INFO)
-
-class ServiceMapDeleter(Deleter, RequestRouterLibrary, ConfigurationPush):
-        model='ServiceMap'
-
-        def __init__(self, **args):
-            Deleter.__init__(self, **args)
-            RequestRouterLibrary.__init__(self)
-            ConfigurationPush.__init__(self)
-
-
-        def call(self, pk, model_dict):
-          try:
-              servicemap = ServiceMap.objects.get(pk=pk)
-              service_uid = self.get_servicemap_uid(servicemap)
-              self.config_push(service_uid, rrlib_config.REDIR_USER, XOS_DIR + "/observers/requestrouter/playbook/site_redir_delete.yml", "/etc/ansible/requestrouter/dnsredir/hosts")
-              self.config_push(service_uid, rrlib_config.DEMUX_USER, XOS_DIR + "/observers/requestrouter/playbook/site_demux_delete.yml", "/etc/ansible/requestrouter/dnsdemux/hosts")
-              print "XXX delete ServiceMap %s", servicemap.name
-              return True
-          except Exception, e:
-              traceback.print_exc()
-              logger.exception("Failed to erase map '%s'" % map_name)
-              return False
-
-if __name__ == "__main__":
-  smap = ServiceMapDeleter()
-  smap.call( 6, {'name': 'Service23'} )
diff --git a/xos/observers/requestrouter/model-deps b/xos/observers/requestrouter/model-deps
deleted file mode 100644
index 36ef620..0000000
--- a/xos/observers/requestrouter/model-deps
+++ /dev/null
@@ -1,12 +0,0 @@
-{
-    "ServiceMap": [
-        "Slice"
-    ], 
-    "Slice": [
-        "Site", 
-        "Service"
-    ], 
-    "User": [
-        "Site"
-    ]
-}
diff --git a/xos/observers/requestrouter/playbook/roles/delete_demux/handlers/main.yml b/xos/observers/requestrouter/playbook/roles/delete_demux/handlers/main.yml
deleted file mode 100644
index a888468..0000000
--- a/xos/observers/requestrouter/playbook/roles/delete_demux/handlers/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: restart dnsdemux
-  service: name=dnsdemux state=restarted
diff --git a/xos/observers/requestrouter/playbook/roles/delete_demux/tasks/main.yml b/xos/observers/requestrouter/playbook/roles/delete_demux/tasks/main.yml
deleted file mode 100644
index 417401a..0000000
--- a/xos/observers/requestrouter/playbook/roles/delete_demux/tasks/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-# This playbook contains plays to delete configuration files from dnsdemux slice
-- name: delete dnsdemux config files
-  file: path={{dst_dnsdemux_conf}}/{{name}}.conf state=absent
-  notify: restart dnsdemux
diff --git a/xos/observers/requestrouter/playbook/roles/delete_demux/vars/main.yml b/xos/observers/requestrouter/playbook/roles/delete_demux/vars/main.yml
deleted file mode 100644
index b457050..0000000
--- a/xos/observers/requestrouter/playbook/roles/delete_demux/vars/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# Variables listed here are applicable to all host groups
-
-src_dnsdemux: ../../../../temp_config/dnsdemux
-dst_dnsdemux_conf: /etc/dnsdemux/default/
-
diff --git a/xos/observers/requestrouter/playbook/roles/delete_redir/handlers/main.yml b/xos/observers/requestrouter/playbook/roles/delete_redir/handlers/main.yml
deleted file mode 100644
index 8aa497e..0000000
--- a/xos/observers/requestrouter/playbook/roles/delete_redir/handlers/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: restart dnsredir
-  service: name=dnsredir state=restarted
diff --git a/xos/observers/requestrouter/playbook/roles/delete_redir/tasks/main.yml b/xos/observers/requestrouter/playbook/roles/delete_redir/tasks/main.yml
deleted file mode 100644
index 58833bb..0000000
--- a/xos/observers/requestrouter/playbook/roles/delete_redir/tasks/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-# This playbook contains plays to delete configuration files from dnsredir slice
-- name: delete  maps.d directory ( and deletes all intermeadiate directories )
-  #copy: src={{src_dnsredir}}/{{name}}.d dest={{dst_dnsredir_confdir}}
-  file: path={{dst_dnsredir_confdir}}/{{name}}.d/maps.d state=absent
- 
-- name: delete config directory
-  file: path={{dst_dnsredir_confdir}}/{{name}}.d/ state=absent
- 
-- name: copy dnsredir config file
-  file: path={{dst_dnsredir_conf}}/{{name}}.conf state=absent
-  notify: restart dnsredir
-
diff --git a/xos/observers/requestrouter/playbook/roles/delete_redir/vars/main.yml b/xos/observers/requestrouter/playbook/roles/delete_redir/vars/main.yml
deleted file mode 100644
index dc79d35..0000000
--- a/xos/observers/requestrouter/playbook/roles/delete_redir/vars/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# Variables listed here are applicable to all host groups
-
-src_dnsredir: ../../../../temp_config/dnsredir
-dst_dnsredir_conf: /etc/dnsredir/conf.d/
-dst_dnsredir_confdir: /etc/dnsredir
diff --git a/xos/observers/requestrouter/playbook/roles/demux/handlers/main.yml b/xos/observers/requestrouter/playbook/roles/demux/handlers/main.yml
deleted file mode 100644
index a888468..0000000
--- a/xos/observers/requestrouter/playbook/roles/demux/handlers/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: restart dnsdemux
-  service: name=dnsdemux state=restarted
diff --git a/xos/observers/requestrouter/playbook/roles/demux/tasks/main.yml b/xos/observers/requestrouter/playbook/roles/demux/tasks/main.yml
deleted file mode 100644
index 0339aed..0000000
--- a/xos/observers/requestrouter/playbook/roles/demux/tasks/main.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# This playbook contains plays to copy configurations to destination
-- name: copy dnsdemux config files
-  copy: src={{src_dnsdemux}}/{{name}}.conf dest={{dst_dnsdemux_conf}}/{{name}}.conf
-  notify: restart dnsdemux
-
-#- name: Start the dnsdemux service
-  #service: name=dnsdemux state=started enabled=true
-
diff --git a/xos/observers/requestrouter/playbook/roles/demux/vars/main.yml b/xos/observers/requestrouter/playbook/roles/demux/vars/main.yml
deleted file mode 100644
index b457050..0000000
--- a/xos/observers/requestrouter/playbook/roles/demux/vars/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# Variables listed here are applicable to all host groups
-
-src_dnsdemux: ../../../../temp_config/dnsdemux
-dst_dnsdemux_conf: /etc/dnsdemux/default/
-
diff --git a/xos/observers/requestrouter/playbook/roles/redir/handlers/main.yml b/xos/observers/requestrouter/playbook/roles/redir/handlers/main.yml
deleted file mode 100644
index 8aa497e..0000000
--- a/xos/observers/requestrouter/playbook/roles/redir/handlers/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: restart dnsredir
-  service: name=dnsredir state=restarted
diff --git a/xos/observers/requestrouter/playbook/roles/redir/tasks/main.yml b/xos/observers/requestrouter/playbook/roles/redir/tasks/main.yml
deleted file mode 100644
index e390ed9..0000000
--- a/xos/observers/requestrouter/playbook/roles/redir/tasks/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-# This playbook contains plays to copy configurations to destination
-- name: create  maps.d directory ( and creates all intermeadiate directories )
-  #copy: src={{src_dnsredir}}/{{name}}.d dest={{dst_dnsredir_confdir}}
-  file: src={{src_dnsredir}}/{{name}}.d/maps.d dest={{dst_dnsredir_confdir}}/{{name}}.d/maps.d state=directory
- 
-- name: copy map.conf
-  copy: src={{src_dnsredir}}/{{name}}.d/maps.d/map.conf dest={{dst_dnsredir_confdir}}/{{name}}.d/maps.d/map.conf
- 
-- name: copy codeen_nodes.conf
-  copy: src={{src_dnsredir}}/{{name}}.d/codeen_nodes.conf dest={{dst_dnsredir_confdir}}/{{name}}.d/codeen_nodes.conf
-  
-- name: copy node-to-ip.txt
-  copy: src={{src_dnsredir}}/{{name}}.d/node-to-ip.txt dest={{dst_dnsredir_confdir}}/{{name}}.d/node-to-ip.txt
-
-- name: copy dnsredir config file
-  copy: src={{src_dnsredir}}/{{name}}.conf dest={{dst_dnsredir_conf}}/{{name}}.conf
-  notify: restart dnsredir
-
diff --git a/xos/observers/requestrouter/playbook/roles/redir/vars/main.yml b/xos/observers/requestrouter/playbook/roles/redir/vars/main.yml
deleted file mode 100644
index dc79d35..0000000
--- a/xos/observers/requestrouter/playbook/roles/redir/vars/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# Variables listed here are applicable to all host groups
-
-src_dnsredir: ../../../../temp_config/dnsredir
-dst_dnsredir_conf: /etc/dnsredir/conf.d/
-dst_dnsredir_confdir: /etc/dnsredir
diff --git a/xos/observers/requestrouter/playbook/site_demux.yml b/xos/observers/requestrouter/playbook/site_demux.yml
deleted file mode 100644
index b5261dc..0000000
--- a/xos/observers/requestrouter/playbook/site_demux.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# This playbook copies the dnsdemux configuration files from temp directory.
-
-- name: copies the configuration files from temp directory
-  hosts: all
-  #remote_user: {{r_user}}
-
-  roles:
-    - demux
diff --git a/xos/observers/requestrouter/playbook/site_demux_delete.yml b/xos/observers/requestrouter/playbook/site_demux_delete.yml
deleted file mode 100644
index 49a7c87..0000000
--- a/xos/observers/requestrouter/playbook/site_demux_delete.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# This playbook deletes the dnsdemux configuration files from request router instances
-
-- name: deletes the configuration files from request router instances
-  hosts: all
-  #remote_user: {{r_user}}
-
-  roles:
-    - delete_demux
diff --git a/xos/observers/requestrouter/playbook/site_redir.yml b/xos/observers/requestrouter/playbook/site_redir.yml
deleted file mode 100644
index 50a7284..0000000
--- a/xos/observers/requestrouter/playbook/site_redir.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# This playbook copies the dnsredir configuration files from temp directory.
-
-- name: copies the configuration files from temp directory
-  hosts: all
-  #remote_user: {{r_user}}
-
-  roles:
-    - redir
diff --git a/xos/observers/requestrouter/playbook/site_redir_delete.yml b/xos/observers/requestrouter/playbook/site_redir_delete.yml
deleted file mode 100644
index 9a8611d..0000000
--- a/xos/observers/requestrouter/playbook/site_redir_delete.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# This playbook deletes the dnsredir configuration files from request router instances
-
-- name: deletes the configuration files from request router instances
-  hosts: all
-  #remote_user: {{r_user}}
-
-  roles:
-    - delete_redir
diff --git a/xos/observers/requestrouter/rr_observer_config b/xos/observers/requestrouter/rr_observer_config
deleted file mode 100644
index ec3a1ba..0000000
--- a/xos/observers/requestrouter/rr_observer_config
+++ /dev/null
@@ -1,36 +0,0 @@
-
-[plc]
-name=plc
-#deployment=VICCI
-deployment=VINI
-
-[db]
-name=xos
-user=plstackuser
-#password=2uMDYtJK
-password=1HL07C0E
-host=localhost
-port=5432
-
-[api]
-host=128.112.171.237
-port=8000
-ssl_key=None
-ssl_cert=None
-ca_ssl_cert=None
-ratelimit_enabled=0
-omf_enabled=0
-mail_support_address=support@localhost
-nova_enabled=True
-
-[observer]
-dependency_graph=/opt/xos/observers/requestrouter/model-deps
-steps_dir=/opt/xos/observers/requestrouter/steps
-deleters_dir=/opt/xos/observers/requestrouter/deleters
-log_file=console
-#/var/log/hpc.log
-driver=None
-
-#[feefie]
-#client_id='vicci_dev_central'
-#user_id='pl'
diff --git a/xos/observers/requestrouter/rrlib.py b/xos/observers/requestrouter/rrlib.py
deleted file mode 100644
index 9faca4a..0000000
--- a/xos/observers/requestrouter/rrlib.py
+++ /dev/null
@@ -1,203 +0,0 @@
-import os
-import base64
-import string
-import sys
-import socket
-from sets import Set
-if __name__ == '__main__':
-    sys.path.append("/opt/xos")
-    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
-
-from xos.config import Config
-from core.models import Service
-from services.requestrouter.models import RequestRouterService, ServiceMap
-from util.logger import Logger, logging
-import rrlib_config
-
-logger = Logger(level=logging.INFO)
-
-'''
-Conventions:
-1) All dnsredir backend will listen at port 9000+ servicemap.pk ( where pk is the primary key generated in django model)
-'''
-
-class RequestRouterLibrary:
-
-    def __init__(self):
-        pass
-    
-    def gen_slice_info(self, service=None):   
-        """generates instance information from slice of request router
-        """
-
-        if (service is None ):
-            service = RequestRouterService.objects.get()
-
-        mapping = {}
-        #static mapping for demo purpose 
-        #mapping["node47.princeton.vicci.org"] = "128.112.171.112"
-        mapping["node48.princeton.vicci.org"] = "128.112.171.114"
-    
-        '''for slice in service.service.all():
-            name = slice.name
-            for instance in slice.instances.all():
-                mapping[instance.name] = str(instance.ip)
-        '''
-        return mapping
-
-    def gen_servicemap_slice_info(self, servicemap):
-        """generates instance information from slice of servicemap
-        """
-
-        wzone = Set(['arizona', 'stanford', 'on.lab', 'housten']) # zone=1 in cooden.conf
-        ezone = Set(['princeton', 'atlanta', 'new york', 'georgia tech']) # zone=2 in coodeen.conf
-
-        mapping_zone = {}
-        mapping_ip = {}
-        slice = servicemap.slice
-        name = slice.name
-        for instance in slice.instances.all():
-            mapping_ip[instance.node.name] = socket.gethostbyname(instance.node.name)
-            #print "instance name "+instance.name+str(instance.ip)+"\n"
-            site = instance.node.site.name
-            if(site.lower() in wzone):
-                mapping_zone[instance.node.name] = str(1)
-            else:
-                mapping_zone[instance.node.name] = str(2)
-
-        return mapping_ip, mapping_zone
-
-
-
-    def gen_slice_file(self, service):
-        """ generates host file for the slice information
-            to be used by ansible to push configuration files
-        """
-
-        mapping = self.gen_slice_info(service)
-
-        fn = "/etc/ansible/requestrouter/dnsredir/hosts"
-        f = open(fn, "w")
-        for (k,v) in mapping.items():
-            f.write("%s\n" % k)
-
-        fn = "/etc/ansible/requestrouter/dnsdemux/hosts"
-        f = open(fn, "w")
-        for (k,v) in mapping.items():
-            f.write("%s\n" % k)
-
-
-    def get_servicemap_uid(self, servicemap):
-        seq = ("service_", str(servicemap.pk));
-        return "".join(seq)
-
-    def get_service_port(self, servicemap):
-                return str(9000+servicemap.pk)
-
-    def gen_dnsredir_serviceconf(self, servicemap):
-        objname = self.get_servicemap_uid(servicemap)
-    
-        rr_mapping = self.gen_slice_info(None)
-    
-        #generate dnsredir.conf file parameters to be used in static file.
-        mapping = {}
-        mapping["port_listen"] = self.get_service_port(servicemap)
-        mapping["configdir"] = rrlib_config.DNSREDIR_CONFIGDIR_PREFIX+objname+".d/"
-        mapping["logdir"] = rrlib_config.DNSREDIR_LOGDIR_PREFIX+objname+".d"
-        mapping["pidfile"] = rrlib_config.DNSREDIR_PIDFILE_PREFIX+objname+".pid"
-        mapping["domain_name"] = servicemap.prefix      
-        mapping["heartbeat_port"] = rrlib_config.HEARTBEAT_PORT
-
-        #generate dnsredir.conf file 
-
-        fn = "./temp_config/dnsredir/"+objname+".conf"
-        f = open(fn, "w")
-        for (k,v) in rr_mapping.items():
-                        f.write(mapping["domain_name"]+". NS "+k+". "+v+" 3600 \n" % mapping)
-
-
-        f.write("""
-Default_TTL 30
-
-Port %(port_listen)s
-
-ConfigDir %(configdir)s
-
-MapsDir maps.d
-
-HTTPPort %(heartbeat_port)d
-
-PidFile %(pidfile)s
-
-HttpRequestPort 8081
-
-""" % mapping)
-
-        #generate configdirectory
-        
-        os.mkdir("./temp_config/dnsredir/"+objname+".d")
-        
-        #geenrate codeen_nodes.conf
-        mapping_ip, mapping_zone = self.gen_servicemap_slice_info(servicemap)
-
-        codeen_name = "./temp_config/dnsredir/"+objname+".d/codeen_nodes.conf"
-        f = open(codeen_name, "w")
-        for (k,v) in mapping_zone.items():
-            f.write(k+" zone="+v+" \n")
-
-        iptxt = "./temp_config/dnsredir/"+objname+".d/node-to-ip.txt"
-        f = open(iptxt, "w")
-        for (k,v) in mapping_ip.items():
-            f.write(k+" "+v+" \n")
-
-        #generate maps directory
-        os.mkdir("./temp_config/dnsredir/"+objname+".d/maps.d")
-
-        # redirection map
-        map = "./temp_config/dnsredir/"+objname+".d/maps.d/map.conf"
-        f = open(map, "w")
-		#hardcoded probable public IP masks from arizona and princeton region respectively
-        f.write("prefix "+servicemap.prefix+" \n")
-        f.write("map 150.135.211.252/32 zone 1 || zone 2 \n")
-        f.write("map 128.112.171.112/24 zone 2 || zone 1 \n")
-        f.write("map 0.0.0.0/0 zone 1 || zone 2 \n")
-
-
-    def gen_dnsdemux_serviceconf(self, servicemap):
-        '''
-        generates frontend service*.conf file for each of the service
-        It assumes that there is a dnsdemux frontend running on the RR istallation and will
-        just add a conf file for each service in /etc/dnsdemux/default
-        '''
-        objname = self.get_servicemap_uid(servicemap)
-        #generate dnsdemux.conf file parameters to be used in static file.
-       
-        port_listen = self.get_service_port(servicemap)
-        domain_name = servicemap.prefix  
-        #generate service specific .conf file
-
-        rr_mapping = self.gen_slice_info(None)
-
-        fn = "./temp_config/dnsdemux/"+objname+".conf"
-        f = open(fn, "w")
-
-        for (k,v) in rr_mapping.items():
-            f.write("Forward "+v+" "+port_listen+" 8081 "+domain_name+".\n")
-
-    
-    def teardown_temp_configfiles(self, objname):
-        if os.path.exists("./temp_config/dnsdemux/"+objname+".conf"):
-            os.remove("./temp_config/dnsdemux/"+objname+".conf")
-        if os.path.exists("./temp_config/dnsredir/"+objname+".d/maps.d/map.conf"):
-            os.remove("./temp_config/dnsredir/"+objname+".d/maps.d/map.conf")
-        if os.path.exists("./temp_config/dnsredir/"+objname+".d/maps.d"):
-            os.rmdir("./temp_config/dnsredir/"+objname+".d/maps.d")
-        if os.path.exists("./temp_config/dnsredir/"+objname+".d/node-to-ip.txt"):
-            os.remove("./temp_config/dnsredir/"+objname+".d/node-to-ip.txt")
-        if os.path.exists("./temp_config/dnsredir/"+objname+".d/codeen_nodes.conf"):
-            os.remove("./temp_config/dnsredir/"+objname+".d/codeen_nodes.conf")
-        if os.path.exists("./temp_config/dnsredir/"+objname+".d"):
-            os.rmdir("./temp_config/dnsredir/"+objname+".d")
-        if os.path.exists("./temp_config/dnsredir/"+objname+".conf"):
-            os.remove("./temp_config/dnsredir/"+objname+".conf")
-
diff --git a/xos/observers/requestrouter/rrlib_config.py b/xos/observers/requestrouter/rrlib_config.py
deleted file mode 100644
index d0c00c2..0000000
--- a/xos/observers/requestrouter/rrlib_config.py
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env python
-
-DNSREDIR_CONFIGDIR_PREFIX = "/etc/dnsredir/"
-DNSREDIR_LOGDIR_PREFIX = "/var/log/dnsredir/"
-DNSREDIR_PIDFILE_PREFIX = "/var/run/dnsredir."
-REDIR_USER = "princeton_coredirect"
-#REDIR_USER = "arizona_tools1"
-#DEMUX_USER = "arizona_tools1"
-DEMUX_USER = "princeton_codnsdemux"
-HEARTBEAT_PORT = 9000
-#not required as of now, as there will be only one frontend dnsdemux for all the services
-#DNSDEMUX_CONFIGDIR_PREFIX = "/etc/dnsdemux/"
-#DNSDEMUX_LOGDIR_PREFIX = "/var/log/dnsdemux/"
-#DNSDEMUX_PIDFILE_PREFIX = "/var/run/dnsdemux."
-
-TBD = "TBD"
diff --git a/xos/observers/requestrouter/steps/sync_requestrouterservices.py b/xos/observers/requestrouter/steps/sync_requestrouterservices.py
deleted file mode 100644
index 9af3584..0000000
--- a/xos/observers/requestrouter/steps/sync_requestrouterservices.py
+++ /dev/null
@@ -1,46 +0,0 @@
-import os
-import sys
-import base64
-import traceback
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from core.models import Service
-from services.requestrouter.models import RequestRouterService
-from util.logger import Logger, logging
-
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-from rrlib import RequestRouterLibrary
-
-logger = Logger(level=logging.INFO)
-
-class SyncRequestRouterService(SyncStep, RequestRouterLibrary):
-    provides=[RequestRouterService]
-    requested_interval=0
-
-    def __init__(self, **args):
-        SyncStep.__init__(self, **args)
-        RequestRouterLibrary.__init__(self)
-
-    def fetch_pending(self):
-	try:
-        	ret = RequestRouterService.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
-        	return ret
-	except Exception, e:
-        	traceback.print_exc()
-            	return None	
-
-    def sync_record(self, rr_service):
-	try:
-        	print "syncing service!"
-        	logger.info("sync'ing rr_service %s" % str(rr_service))
-        	self.gen_slice_file(rr_service)
-        	rr_service.save()
-		return True
-	except Exception, e:
-                traceback.print_exc()
-                return False
-
-
diff --git a/xos/observers/requestrouter/steps/sync_servicemap.py b/xos/observers/requestrouter/steps/sync_servicemap.py
deleted file mode 100644
index 18bcec4..0000000
--- a/xos/observers/requestrouter/steps/sync_servicemap.py
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import sys
-import base64
-import traceback
-from django.db.models import F, Q
-from xos.config import Config, XOS_DIR
-from synchronizers.base.syncstep import SyncStep
-from core.models import Service
-from services.requestrouter.models import ServiceMap
-from util.logger import Logger, logging
-
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-from rrlib import RequestRouterLibrary
-from configurationPush import ConfigurationPush
-import rrlib_config
-
-logger = Logger(level=logging.INFO)
-
-class SyncServiceMap(SyncStep, RequestRouterLibrary, ConfigurationPush):
-    provides=[ServiceMap]
-    requested_interval=0
-
-    def __init__(self, **args):
-        SyncStep.__init__(self, **args)
-	RequestRouterLibrary.__init__(self)
-	ConfigurationPush.__init__(self)
-
-    def fetch_pending(self):
-	try:
-        	ret = ServiceMap.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
-        	return ret
-	except Exception, e:
-        	traceback.print_exc()
-            	return None
-
-    def sync_record(self, servicemap):
-	try:
-		print "sync! %s " % self.get_servicemap_uid(servicemap)
-		self.gen_dnsredir_serviceconf(servicemap)
-		self.gen_dnsdemux_serviceconf(servicemap)
-        	# push generated files from temp_config
-		service_uid = self.get_servicemap_uid(servicemap)
-		self.config_push(service_uid, rrlib_config.REDIR_USER, XOS_DIR + "/observers/requestrouter/playbook/site_redir.yml", "/etc/ansible/requestrouter/dnsredir/hosts")
-		self.config_push(service_uid, rrlib_config.DEMUX_USER, XOS_DIR + "/observers/requestrouter/playbook/site_demux.yml", "/etc/ansible/requestrouter/dnsdemux/hosts")
-		self.teardown_temp_configfiles(service_uid)
-	except Exception, e:
-                traceback.print_exc()
-                return False
-
-if __name__ == "__main__":
-    sv = SyncServiceMap()
-
-    recs = sv.fetch_pending()
-
-    for rec in recs:
-        sv.sync_record( rec )
diff --git a/xos/observers/syndicate/__init__.py b/xos/observers/syndicate/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/xos/observers/syndicate/__init__.py
+++ /dev/null
diff --git a/xos/observers/syndicate/model-deps b/xos/observers/syndicate/model-deps
deleted file mode 100644
index b15c5d1..0000000
--- a/xos/observers/syndicate/model-deps
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-    "SlicePrivilege": [
-        "User", 
-        "Slice"
-    ], 
-    "Slice": [
-        "Site", 
-        "Service"
-    ], 
-    "VolumeAccessRight": [
-        "Volume"
-    ], 
-    "User": [
-        "Site"
-    ]
-}
diff --git a/xos/observers/syndicate/requirements.py b/xos/observers/syndicate/requirements.py
deleted file mode 100644
index 303fd3d..0000000
--- a/xos/observers/syndicate/requirements.py
+++ /dev/null
@@ -1,5 +0,0 @@
-requests
-gevent
-grequests
-setproctitle
-psutil
diff --git a/xos/observers/syndicate/run.sh b/xos/observers/syndicate/run.sh
deleted file mode 100644
index 82960a9..0000000
--- a/xos/observers/syndicate/run.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-export XOS_DIR=/opt/xos
-python syndicate-backend.py  -C $XOS_DIR/observers/syndicate/syndicate_observer_config
diff --git a/xos/observers/syndicate/start.sh b/xos/observers/syndicate/start.sh
deleted file mode 100644
index 1c408a1..0000000
--- a/xos/observers/syndicate/start.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-export XOS_DIR=/opt/xos
-nohup python syndicate-backend.py  -C $XOS_DIR/observers/syndicate/syndicate_observer_config > /dev/null 2>&1 &
diff --git a/xos/observers/syndicate/steps/sync_volume.py b/xos/observers/syndicate/steps/sync_volume.py
deleted file mode 100644
index e6dc90b..0000000
--- a/xos/observers/syndicate/steps/sync_volume.py
+++ /dev/null
@@ -1,138 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import sys
-import traceback
-import base64
-
-if __name__ == "__main__":
-    # for testing 
-    if os.getenv("OPENCLOUD_PYTHONPATH"):
-        sys.path.append( os.getenv("OPENCLOUD_PYTHONPATH") )
-    else:
-        print >> sys.stderr, "No OPENCLOUD_PYTHONPATH variable set.  Assuming that OpenCloud is in PYTHONPATH"
- 
-    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
-
-
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from core.models import Service
-from services.syndicate_storage.models import Volume
-
-import logging
-from logging import Logger
-logging.basicConfig( format='[%(levelname)s] [%(module)s:%(lineno)d] %(message)s' )
-logger = logging.getLogger()
-logger.setLevel( logging.INFO )
-
-# point to planetstack
-if __name__ != "__main__": 
-    if os.getenv("OPENCLOUD_PYTHONPATH") is not None:
-        sys.path.insert(0, os.getenv("OPENCLOUD_PYTHONPATH"))
-    else:
-        logger.warning("No OPENCLOUD_PYTHONPATH set; assuming your PYTHONPATH works")
-
-# syndicatelib will be in stes/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-import syndicatelib
-
-
-class SyncVolume(SyncStep):
-    provides=[Volume]
-    requested_interval=0
-
-    def __init__(self, **args):
-        SyncStep.__init__(self, **args)
-
-    def sync_record(self, volume):
-        """
-        Synchronize a Volume record with Syndicate.
-        """
-        
-        logger.info( "Sync Volume = %s\n\n" % volume.name )
-    
-        user_email = volume.owner_id.email
-        config = syndicatelib.get_config()
-        
-        volume_principal_id = syndicatelib.make_volume_principal_id( user_email, volume.name )
-
-        # get the observer secret 
-        try:
-            observer_secret = config.SYNDICATE_OPENCLOUD_SECRET
-        except Exception, e:
-            traceback.print_exc()
-            logger.error("config is missing SYNDICATE_OPENCLOUD_SECRET")
-            raise e
-
-        # volume owner must exist as a Syndicate user...
-        try:
-            rc, user = syndicatelib.ensure_principal_exists( volume_principal_id, observer_secret, is_admin=False, max_UGs=1100, max_RGs=1)
-            assert rc == True, "Failed to create or read volume principal '%s'" % volume_principal_id
-        except Exception, e:
-            traceback.print_exc()
-            logger.error("Failed to ensure principal '%s' exists" % volume_principal_id )
-            raise e
-
-        # volume must exist 
-        
-        # create or update the Volume
-        try:
-            new_volume = syndicatelib.ensure_volume_exists( volume_principal_id, volume, user=user )
-        except Exception, e:
-            traceback.print_exc()
-            logger.error("Failed to ensure volume '%s' exists" % volume.name )
-            raise e
-           
-        # did we create the Volume?
-        if new_volume is not None:
-            # we're good
-            pass 
-             
-        # otherwise, just update it 
-        else:
-            try:
-                rc = syndicatelib.update_volume( volume )
-            except Exception, e:
-                traceback.print_exc()
-                logger.error("Failed to update volume '%s', exception = %s" % (volume.name, e.message))
-                raise e
-                    
-        return True
-    
-    def delete_record(self, volume):
-        try:
-            volume_name = volume.name
-            syndicatelib.ensure_volume_absent( volume_name )
-        except Exception, e:
-            traceback.print_exc()
-            logger.exception("Failed to erase volume '%s'" % volume_name)
-            raise e
-
-
-
-
-
-if __name__ == "__main__":
-    sv = SyncVolume()
-
-
-    # first, set all volumes to not-enacted so we can test 
-    for v in Volume.objects.all():
-       v.enacted = None
-       v.save()
-    
-    # NOTE: for resetting only 
-    if len(sys.argv) > 1 and sys.argv[1] == "reset":
-       sys.exit(0)
-
-    recs = sv.fetch_pending()
-
-    for rec in recs:
-        rc = sv.sync_record( rec )
-        if not rc:
-          print "\n\nFailed to sync %s\n\n" % (rec.name)
-
diff --git a/xos/observers/syndicate/steps/sync_volumeaccessright.py b/xos/observers/syndicate/steps/sync_volumeaccessright.py
deleted file mode 100644
index 2889502..0000000
--- a/xos/observers/syndicate/steps/sync_volumeaccessright.py
+++ /dev/null
@@ -1,116 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import sys
-import base64
-import traceback
-
-if __name__ == "__main__":
-    # for testing 
-    if os.getenv("OPENCLOUD_PYTHONPATH"):
-        sys.path.append( os.getenv("OPENCLOUD_PYTHONPATH") )
-    else:
-        print >> sys.stderr, "No OPENCLOUD_PYTHONPATH variable set.  Assuming that OpenCloud is in PYTHONPATH"
- 
-    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
-
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from core.models import Service
-
-import logging
-from logging import Logger
-logging.basicConfig( format='[%(levelname)s] [%(module)s:%(lineno)d] %(message)s' )
-logger = logging.getLogger()
-logger.setLevel( logging.INFO )
-
-# point to planetstack 
-if __name__ != "__main__":
-    if os.getenv("OPENCLOUD_PYTHONPATH") is not None:
-        sys.path.insert(0, os.getenv("OPENCLOUD_PYTHONPATH"))
-    else:
-        logger.warning("No OPENCLOUD_PYTHONPATH set; assuming your PYTHONPATH works")
-
-from services.syndicate_storage.models import VolumeAccessRight
-
-# syndicatelib will be in stes/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-import syndicatelib
-
-class SyncVolumeAccessRight(SyncStep):
-    provides=[VolumeAccessRight]
-    requested_interval=0
-
-    def __init__(self, **args):
-        SyncStep.__init__(self, **args)
-
-    def sync_record(self, vac):
-        
-        syndicate_caps = "UNKNOWN"  # for exception handling
-        
-        # get arguments
-        config = syndicatelib.get_config()
-        user_email = vac.owner_id.email
-        volume_name = vac.volume.name
-        syndicate_caps = syndicatelib.opencloud_caps_to_syndicate_caps( vac.cap_read_data, vac.cap_write_data, vac.cap_host_data ) 
-        
-        logger.info( "Sync VolumeAccessRight for (%s, %s)" % (user_email, volume_name) )
-        
-        # validate config
-        try:
-           RG_port = config.SYNDICATE_RG_DEFAULT_PORT
-           observer_secret = config.SYNDICATE_OPENCLOUD_SECRET
-        except Exception, e:
-           traceback.print_exc()
-           logger.error("syndicatelib config is missing SYNDICATE_RG_DEFAULT_PORT, SYNDICATE_OPENCLOUD_SECRET")
-           raise e
-            
-        # ensure the user exists and has credentials
-        try:
-            rc, user = syndicatelib.ensure_principal_exists( user_email, observer_secret, is_admin=False, max_UGs=1100, max_RGs=1 )
-            assert rc is True, "Failed to ensure principal %s exists (rc = %s,%s)" % (user_email, rc, user)
-        except Exception, e:
-            traceback.print_exc()
-            logger.error("Failed to ensure user '%s' exists" % user_email )
-            raise e
- 
-        # make the access right for the user to create their own UGs, and provision an RG for this user that will listen on localhost.
-        # the user will have to supply their own RG closure.
-        try:
-            rc = syndicatelib.setup_volume_access( user_email, volume_name, syndicate_caps, RG_port, observer_secret )
-            assert rc is True, "Failed to setup volume access for %s in %s" % (user_email, volume_name)
-
-        except Exception, e:
-            traceback.print_exc()
-            logger.error("Faoed to ensure user %s can access Volume %s with rights %s" % (user_email, volume_name, syndicate_caps))
-            raise e
-
-        return True
-    
-    # Jude: this will simply go on to purge the object from
-    # OpenCloud. The previous 'deleter' version was a no-op also.
-    def delete_record(self, obj):
-        pass
-
-
-if __name__ == "__main__":
-
-    # first, set all VolumeAccessRights to not-enacted so we can test 
-    for v in VolumeAccessRight.objects.all():
-       v.enacted = None
-       v.save()
-
-    # NOTE: for resetting only 
-    if len(sys.argv) > 1 and sys.argv[1] == "reset":
-       sys.exit(0)
-
-
-    sv = SyncVolumeAccessRight()
-    recs = sv.fetch_pending()
-
-    for rec in recs:
-        sv.sync_record( rec )
-
diff --git a/xos/observers/syndicate/steps/sync_volumeslice.py b/xos/observers/syndicate/steps/sync_volumeslice.py
deleted file mode 100644
index 1be61b9..0000000
--- a/xos/observers/syndicate/steps/sync_volumeslice.py
+++ /dev/null
@@ -1,158 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import sys
-import base64
-import traceback
-
-if __name__ == "__main__":
-    # for testing 
-    if os.getenv("OPENCLOUD_PYTHONPATH"):
-        sys.path.append( os.getenv("OPENCLOUD_PYTHONPATH") )
-    else:
-        print >> sys.stderr, "No OPENCLOUD_PYTHONPATH variable set.  Assuming that OpenCloud is in PYTHONPATH"
- 
-    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
-
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from core.models import Service, Slice
-
-import logging
-from logging import Logger
-logging.basicConfig( format='[%(levelname)s] [%(module)s:%(lineno)d] %(message)s' )
-logger = logging.getLogger()
-logger.setLevel( logging.INFO )
-
-# point to planetstack 
-if __name__ != "__main__":
-    if os.getenv("OPENCLOUD_PYTHONPATH") is not None:
-        sys.path.insert(0, os.getenv("OPENCLOUD_PYTHONPATH"))
-    else:
-        logger.warning("No OPENCLOUD_PYTHONPATH set; assuming your PYTHONPATH works") 
-
-from services.syndicate_storage.models import VolumeSlice,VolumeAccessRight,Volume
-
-# syndicatelib will be in stes/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-import syndicatelib
-
-
-class SyncVolumeSlice(SyncStep):
-    provides=[VolumeSlice]
-    requested_interval=0
-
-    def __init__(self, **args):
-        SyncStep.__init__(self, **args)
-
-    def sync_record(self, vs):
-        
-        logger.info("Sync VolumeSlice for (%s, %s)" % (vs.volume_id.name, vs.slice_id.name))
-        
-        # extract arguments...
-        user_email = vs.slice_id.creator.email
-        slice_name = vs.slice_id.name
-        volume_name = vs.volume_id.name
-        syndicate_caps = syndicatelib.opencloud_caps_to_syndicate_caps( vs.cap_read_data, vs.cap_write_data, vs.cap_host_data )
-        RG_port = vs.RG_portnum
-        UG_port = vs.UG_portnum
-        slice_secret = None
-        
-        config = syndicatelib.get_config()
-        try:
-           observer_secret = config.SYNDICATE_OPENCLOUD_SECRET
-           RG_closure = config.SYNDICATE_RG_CLOSURE
-           observer_pkey_path = config.SYNDICATE_PRIVATE_KEY
-           syndicate_url = config.SYNDICATE_SMI_URL
-           
-        except Exception, e:
-           traceback.print_exc()
-           logger.error("syndicatelib config is missing one or more of the following: SYNDICATE_OPENCLOUD_SECRET, SYNDICATE_RG_CLOSURE, SYNDICATE_PRIVATE_KEY, SYNDICATE_SMI_URL")
-           raise e
-            
-        # get secrets...
-        try:
-           observer_pkey_pem = syndicatelib.get_private_key_pem( observer_pkey_path )
-           assert observer_pkey_pem is not None, "Failed to load Observer private key"
-           
-           # get/create the slice secret
-           slice_secret = syndicatelib.get_or_create_slice_secret( observer_pkey_pem, slice_name )    
-           assert slice_secret is not None, "Failed to get or create slice secret for %s" % slice_name
-           
-        except Exception, e:
-           traceback.print_exc()
-           logger.error("Failed to load secret credentials")
-           raise e
-        
-        # make sure there's a slice-controlled Syndicate user account for the slice owner
-        slice_principal_id = syndicatelib.make_slice_principal_id( user_email, slice_name )
-        
-        try:
-            rc, user = syndicatelib.ensure_principal_exists( slice_principal_id, observer_secret, is_admin=False, max_UGs=1100, max_RGs=1 )
-            assert rc is True, "Failed to ensure principal %s exists (rc = %s,%s)" % (slice_principal_id, rc, user)
-        except Exception, e:
-            traceback.print_exc()
-            logger.error('Failed to ensure slice user %s exists' % slice_principal_id)
-            raise e
-            
-        # grant the slice-owning user the ability to provision UGs in this Volume, and also provision for the user the (single) RG the slice will instantiate in each VM.
-        try:
-            rc = syndicatelib.setup_volume_access( slice_principal_id, volume_name, syndicate_caps, RG_port, observer_secret, RG_closure=RG_closure )
-            assert rc is True, "Failed to set up Volume access for slice %s in %s" % (slice_principal_id, volume_name)
-            
-        except Exception, e:
-            traceback.print_exc()
-            logger.error("Failed to set up Volume access for slice %s in %s" % (slice_principal_id, volume_name))
-            raise e
-            
-        # generate and save slice credentials....
-        try:
-            slice_cred = syndicatelib.save_slice_credentials( observer_pkey_pem, syndicate_url, slice_principal_id, volume_name, slice_name, observer_secret, slice_secret, UG_port, existing_user=user )
-            assert slice_cred is not None, "Failed to generate slice credential for %s in %s" % (slice_principal_id, volume_name )
-                
-        except Exception, e:
-            traceback.print_exc()
-            logger.error("Failed to generate slice credential for %s in %s" % (slice_principal_id, volume_name))
-            raise e
-             
-        # ... and push them all out.
-        try:
-            rc = syndicatelib.push_credentials_to_slice( slice_name, slice_cred )
-            assert rc is True, "Failed to push credentials to slice %s for volume %s" % (slice_name, volume_name)
-               
-        except Exception, e:
-            traceback.print_exc()
-            logger.error("Failed to push slice credentials to %s for volume %s" % (slice_name, volume_name))
-            raise e
-        
-        return True
-    
-    # This method will simply cause the object to be purged from OpenCloud
-    def delete_record(self, volume_slice):
-        pass
-
-
-if __name__ == "__main__":
-    sv = SyncVolumeSlice()
-
-    # first, set all VolumeSlice to not-enacted so we can test 
-    for v in VolumeSlice.objects.all():
-       v.enacted = None
-       v.save()
-
-    # NOTE: for resetting only 
-    if len(sys.argv) > 1 and sys.argv[1] == "reset":
-       sys.exit(0)
-
-    recs = sv.fetch_pending()
-
-    for rec in recs:
-        if rec.slice_id.creator is None:
-           print "Ignoring slice %s, since it has no creator" % (rec.slice_id)
-           continue
-
-        sv.sync_record( rec )
-
diff --git a/xos/observers/syndicate/stop.sh b/xos/observers/syndicate/stop.sh
deleted file mode 100644
index f4a8e28..0000000
--- a/xos/observers/syndicate/stop.sh
+++ /dev/null
@@ -1 +0,0 @@
-pkill -9 -f syndicate-backend.py
diff --git a/xos/observers/syndicate/syndicate-backend.py b/xos/observers/syndicate/syndicate-backend.py
deleted file mode 100644
index 9b53c77..0000000
--- a/xos/observers/syndicate/syndicate-backend.py
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env python
-import os
-os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
-from synchronizers.base.backend import Backend 
-
-if __name__ == '__main__':
-
-    backend = Backend()
-    backend.run()
- 
diff --git a/xos/observers/syndicate/syndicate_observer_config b/xos/observers/syndicate/syndicate_observer_config
deleted file mode 100644
index 7e6d78a..0000000
--- a/xos/observers/syndicate/syndicate_observer_config
+++ /dev/null
@@ -1,35 +0,0 @@
-
-[plc]
-name=plc
-deployment=VICCI
-
-[db]
-name=xos
-#user=plstackuser
-#password=2uMDYtJK
-user=postgres
-password=password
-host=localhost
-port=5432
-
-[api]
-host=128.112.171.237
-port=8000
-ssl_key=None
-ssl_cert=None
-ca_ssl_cert=None
-ratelimit_enabled=0
-omf_enabled=0
-mail_support_address=support@localhost
-nova_enabled=True
-
-[observer]
-dependency_graph=/opt/xos/observers/syndicate/model-deps
-steps_dir=/opt/xos/observers/syndicate/steps
-deleters_dir=/opt/xos/observers/syndicate/deleters
-log_file=console
-driver=None
-
-[feefie]
-client_id='vicci_dev_central'
-user_id='pl'
diff --git a/xos/observers/syndicate/syndicatelib.py b/xos/observers/syndicate/syndicatelib.py
deleted file mode 100644
index 56bd120..0000000
--- a/xos/observers/syndicate/syndicatelib.py
+++ /dev/null
@@ -1,1353 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Define some common methods for the Syndicate observer.
-"""
-import os
-import sys
-import random
-import json
-import time
-import requests
-import traceback
-import base64
-import BaseHTTPServer
-import setproctitle
-import threading
-import urllib
-
-from Crypto.Hash import SHA256 as HashAlg
-from Crypto.PublicKey import RSA as CryptoKey
-from Crypto import Random
-from Crypto.Signature import PKCS1_PSS as CryptoSigner
-
-import logging
-from logging import Logger
-logging.basicConfig( format='[%(levelname)s] [%(module)s:%(lineno)d] %(message)s' )
-logger = logging.getLogger()
-logger.setLevel( logging.INFO )
-
-# get config package 
-import syndicatelib_config.config as CONFIG
-
-# get the Syndicate modules
-import syndicate
-
-import syndicate.client.bin.syntool as syntool
-import syndicate.client.common.msconfig as msconfig
-import syndicate.client.common.api as api
-import syndicate.util.storage as syndicate_storage
-import syndicate.util.watchdog as syndicate_watchdog
-import syndicate.util.daemonize as syndicate_daemon
-import syndicate.util.crypto as syndicate_crypto
-import syndicate.util.provisioning as syndicate_provisioning
-import syndicate.syndicate as c_syndicate
-
-# for testing 
-TESTING = False
-class FakeObject(object):
-   def __init__(self):
-       pass
-
-if os.getenv("OPENCLOUD_PYTHONPATH") is not None:
-   sys.path.insert(0, os.getenv("OPENCLOUD_PYTHONPATH"))
-else:
-   logger.warning("No OPENCLOUD_PYTHONPATH set.  Assuming Syndicate models are in your PYTHONPATH")
-
-try:
-   os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
-
-   # get our models
-   import services.syndicate_storage.models as models
-
-   # get OpenCloud models 
-   from core.models import Slice,Instance
-   
-   from django.core.exceptions import ObjectDoesNotExist
-   from django.db import IntegrityError
-
-except ImportError, ie:
-   logger.warning("Failed to import models; some tests will not work")
-
-   # create a fake "models" package that has exactly the members we need for testing.
-   models = FakeObject()
-   models.Volume = FakeObject()
-   models.Volume.CAP_READ_DATA = 1
-   models.Volume.CAP_WRITE_DATA = 2
-   models.Volume.CAP_HOST_DATA = 4
-   
-   TESTING = True
-
-
-#-------------------------------
-class SyndicateObserverError( Exception ):
-    pass
-
-#-------------------------------
-def get_config():
-    """
-    Return the imported config
-    """
-    return CONFIG
-
-
-#-------------------------------
-def make_openid_url( email ):
-    """
-    Generate an OpenID identity URL from an email address.
-    """
-    return os.path.join( CONFIG.SYNDICATE_OPENID_TRUSTROOT, "id", email )
-
-
-#-------------------------------
-def connect_syndicate( username=CONFIG.SYNDICATE_OPENCLOUD_USER, password=CONFIG.SYNDICATE_OPENCLOUD_PASSWORD, user_pkey_pem=CONFIG.SYNDICATE_OPENCLOUD_PKEY ):
-    """
-    Connect to the OpenCloud Syndicate SMI, using the OpenCloud user credentials.
-    """
-    debug = True 
-    if hasattr(CONFIG, "DEBUG"):
-       debug = CONFIG.DEBUG
-       
-    client = syntool.Client( username, CONFIG.SYNDICATE_SMI_URL,
-                             password=password,
-                             user_pkey_pem=user_pkey_pem,
-                             debug=debug )
-
-    return client
-
-
-#-------------------------------
-def opencloud_caps_to_syndicate_caps( cap_read, cap_write, cap_host ):
-    """
-    Convert OpenCloud capability bits from the UI into Syndicate's capability bits.
-    """
-    syn_caps = 0
-    
-    if cap_read:
-        syn_caps |= (msconfig.GATEWAY_CAP_READ_DATA | msconfig.GATEWAY_CAP_READ_METADATA)
-    if cap_write:
-        syn_caps |= (msconfig.GATEWAY_CAP_WRITE_DATA | msconfig.GATEWAY_CAP_WRITE_METADATA)
-    if cap_host:
-        syn_caps |= (msconfig.GATEWAY_CAP_COORDINATE)
-
-    return syn_caps
-
-#-------------------------------
-def ensure_user_exists( user_email, **user_kw ):
-    """
-    Given an OpenCloud user, ensure that the corresponding
-    Syndicate user exists on the MS.  This method does NOT 
-    create any OpenCloud-specific data.
-
-    Return the (created, user), where created==True if the user 
-    was created and created==False if the user was read.
-    Raise an exception on error.
-    """
-    
-    client = connect_syndicate()
-    user_openid_url = make_openid_url( user_email )
-    
-    return syndicate_provisioning.ensure_user_exists( client, user_email, user_openid_url, **user_kw )
-
-
-#-------------------------------
-def ensure_user_absent( user_email ):
-    """
-    Ensure that a given OpenCloud user's associated Syndicate user record
-    has been deleted.  This method does NOT delete any OpenCloud-specific data.
-
-    Returns True on success
-    Raises an exception on error
-    """
-
-    client = connect_syndicate()
-
-    return client.delete_user( user_email )
- 
-
-#-------------------------------
-def make_volume_principal_id( user_email, volume_name ):
-    """
-    Create a principal id for a Volume owner.
-    """
-    
-    volume_name_safe = urllib.quote( volume_name )
-    
-    return "volume_%s.%s" % (volume_name_safe, user_email)
- 
- 
-#-------------------------------
-def make_slice_principal_id( user_email, slice_name ):
-    """
-    Create a principal id for a slice owner.
-    """
-    
-    slice_name_safe = urllib.quote( slice_name )
-    
-    return "slice_%s.%s" % (slice_name, user_email)
- 
-
-#-------------------------------
-def ensure_principal_exists( user_email, observer_secret, **user_kw ):
-    """ 
-    Ensure that a Syndicate user exists, as well as its OpenCloud-specific data.
-    
-    Return (True, (None OR user)) on success.  Returns a user if the user was created.
-    Return (False, None) on error
-    """
-    
-    try:
-         created, new_user = ensure_user_exists( user_email, **user_kw )
-    except Exception, e:
-         traceback.print_exc()
-         logger.error("Failed to ensure user '%s' exists" % user_email )
-         return (False, None)
-      
-    # if we created a new user, then save its (sealed) credentials to the Django DB
-    if created:
-         try:
-            rc = put_principal_data( user_email, observer_secret, new_user['signing_public_key'], new_user['signing_private_key'] )
-            assert rc == True, "Failed to save SyndicatePrincipal"
-         except Exception, e:
-            traceback.print_exc()
-            logger.error("Failed to save private key for principal %s" % (user_email))
-            return (False, None)
-
-    return (True, new_user)
-
-
-
-#-------------------------------
-def ensure_principal_absent( user_email ):
-    """
-    Ensure that a Syndicate user does not exists, and remove the OpenCloud-specific data.
-    
-    Return True on success.
-    """
-    
-    ensure_user_absent( user_email )
-    delete_principal_data( user_email )
-    return True
-
-#-------------------------------
-def ensure_volume_exists( user_email, opencloud_volume, user=None ):
-    """
-    Given the email address of a user, ensure that the given
-    Volume exists and is owned by that user.
-    Do not try to ensure that the user exists.
-
-    Return the Volume if we created it, or return None if we did not.
-    Raise an exception on error.
-    """
-    client = connect_syndicate()
-
-    try:
-        volume = client.read_volume( opencloud_volume.name )
-    except Exception, e:
-        # transport error 
-        logger.exception(e)
-        raise e
-
-    if volume is None:
-        # the volume does not exist....try to create it 
-        vol_name = opencloud_volume.name
-        vol_blocksize = opencloud_volume.blocksize
-        vol_description = opencloud_volume.description
-        vol_private = opencloud_volume.private
-        vol_archive = opencloud_volume.archive 
-        vol_default_gateway_caps = opencloud_caps_to_syndicate_caps( opencloud_volume.cap_read_data, opencloud_volume.cap_write_data, opencloud_volume.cap_host_data )
-
-        try:
-            vol_info = client.create_volume( user_email, vol_name, vol_description, vol_blocksize,
-                                             private=vol_private,
-                                             archive=vol_archive,
-                                             active=True,
-                                             default_gateway_caps=vol_default_gateway_caps,
-                                             store_private_key=False,
-                                             metadata_private_key="MAKE_METADATA_KEY" )
-
-        except Exception, e:
-            # transport error
-            logger.exception(e)
-            raise e
-
-        else:
-            # successfully created the volume!
-            return vol_info
-
-    else:
-        
-        # volume already exists.  Verify its owned by this user.
-        if user is None:
-           try:
-               user = client.read_user( volume['owner_id'] )
-           except Exception, e:
-               # transport error, or user doesn't exist (either is unacceptable)
-               logger.exception(e)
-               raise e
-
-        if user is None or user['email'] != user_email:
-            raise Exception("Volume '%s' already exists, but is NOT owned by '%s'" % (opencloud_volume.name, user_email) )
-
-        # we're good!
-        return None
-
-
-#-------------------------------
-def ensure_volume_absent( volume_name ):
-    """
-    Given an OpenCloud volume, ensure that the corresponding Syndicate
-    Volume does not exist.
-    """
-
-    client = connect_syndicate()
-
-    # this is idempotent, and returns True even if the Volume doesn't exist
-    return client.delete_volume( volume_name )
-    
-    
-#-------------------------------
-def update_volume( opencloud_volume ):
-    """
-    Update a Syndicate Volume from an OpenCloud Volume model.
-    Fails if the Volume does not exist in Syndicate.
-    """
-
-    client = connect_syndicate()
-
-    vol_name = opencloud_volume.name
-    vol_description = opencloud_volume.description
-    vol_private = opencloud_volume.private
-    vol_archive = opencloud_volume.archive
-    vol_default_gateway_caps = opencloud_caps_to_syndicate_caps( opencloud_volume.cap_read_data, opencloud_volume.cap_write_data, opencloud_volume.cap_host_data )
-
-    try:
-        rc = client.update_volume( vol_name,
-                                   description=vol_description,
-                                   private=vol_private,
-                                   archive=vol_archive,
-                                   default_gateway_caps=vol_default_gateway_caps )
-
-        if not rc:
-            raise Exception("update_volume(%s) failed!" % vol_name )
-
-    except Exception, e:
-        # transort or method error 
-        logger.exception(e)
-        return False
-
-    else:
-        return True
-
-
-#-------------------------------
-def ensure_volume_access_right_exists( user_email, volume_name, caps, allowed_gateways=[msconfig.GATEWAY_TYPE_UG] ):
-    """
-    Ensure that a particular user has particular access to a particular volume.
-    Do not try to ensure that the user or volume exist, however!
-    """
-    client = connect_syndicate()
-    return syndicate_provisioning.ensure_volume_access_right_exists( client, user_email, volume_name, caps, allowed_gateways )
-
-#-------------------------------
-def ensure_volume_access_right_absent( user_email, volume_name ):
-    """
-    Ensure that acess to a particular volume is revoked.
-    """
-    client = connect_syndicate()
-    return syndicate_provisioning.ensure_volume_access_right_absent( client, user_email, volume_name )
-    
-
-#-------------------------------
-def setup_volume_access( user_email, volume_name, caps, RG_port, slice_secret, RG_closure=None ):
-    """
-    Set up the Volume to allow the slice to provision UGs in it, and to fire up RGs.
-       * create the Volume Access Right for the user, so (s)he can create Gateways.
-       * provision a single Replica Gateway, serving on localhost.
-    """
-    client = connect_syndicate()
-    
-    try:
-       rc = ensure_volume_access_right_exists( user_email, volume_name, caps )
-       assert rc is True, "Failed to create access right for %s in %s" % (user_email, volume_name)
-       
-    except Exception, e:
-       logger.exception(e)
-       return False
-    
-    RG_name = syndicate_provisioning.make_gateway_name( "OpenCloud", "RG", volume_name, "localhost" )
-    RG_key_password = syndicate_provisioning.make_gateway_private_key_password( RG_name, slice_secret )
-    
-    try:
-       rc = syndicate_provisioning.ensure_RG_exists( client, user_email, volume_name, RG_name, "localhost", RG_port, RG_key_password, closure=RG_closure )
-    except Exception, e:
-       logger.exception(e)
-       return False
-    
-    return True
-       
-
-#-------------------------------
-def teardown_volume_access( user_email, volume_name ):
-    """
-    Revoke access to a Volume for a User.
-      * remove the user's Volume Access Right
-      * remove the use'rs gateways
-    """
-    client = connect_syndicate()
-    
-    # block the user from creating more gateways, and delete the gateways
-    try:
-       rc = client.remove_user_from_volume( user_email, volume_name )
-       assert rc is True, "Failed to remove access right for %s in %s" % (user_email, volume_name)
-       
-    except Exception, e:
-       logger.exception(e)
-       return False
-    
-    return True
-    
-
-#-------------------------------
-def create_sealed_and_signed_blob( private_key_pem, secret, data ):
-    """
-    Create a sealed and signed message.
-    """
-    
-    # seal it with the password 
-    logger.info("Sealing credential data")
-    
-    rc, sealed_data = c_syndicate.password_seal( data, secret )
-    if rc != 0:
-       logger.error("Failed to seal data with the secret, rc = %s" % rc)
-       return None
-    
-    msg = syndicate_crypto.sign_and_serialize_json( private_key_pem, sealed_data )
-    if msg is None:
-       logger.error("Failed to sign credential")
-       return None 
-    
-    return msg 
-
-
-#-------------------------------
-def verify_and_unseal_blob( public_key_pem, secret, blob_data ):
-    """
-    verify and unseal a serialized string of JSON
-    """
-
-    # verify it 
-    rc, sealed_data = syndicate_crypto.verify_and_parse_json( public_key_pem, blob_data )
-    if rc != 0:
-        logger.error("Failed to verify and parse blob, rc = %s" % rc)
-        return None
-
-    logger.info("Unsealing credential data")
-
-    rc, data = c_syndicate.password_unseal( sealed_data, secret )
-    if rc != 0:
-        logger.error("Failed to unseal blob, rc = %s" % rc )
-        return None
-
-    return data
-
-
-#-------------------------------
-def create_volume_list_blob( private_key_pem, slice_secret, volume_list ):
-    """
-    Create a sealed volume list, signed with the private key.
-    """
-    list_data = {
-       "volumes": volume_list
-    }
-    
-    list_data_str = json.dumps( list_data )
-    
-    msg = create_sealed_and_signed_blob( private_key_pem, slice_secret, list_data_str )
-    if msg is None:
-       logger.error("Failed to seal volume list")
-       return None 
-    
-    return msg
- 
-
-#-------------------------------
-def create_slice_credential_blob( private_key_pem, slice_name, slice_secret, syndicate_url, volume_name, volume_owner, UG_port, user_pkey_pem ):
-    """
-    Create a sealed, signed, encoded slice credentials blob.
-    """
-    
-    # create and serialize the data 
-    cred_data = {
-       "syndicate_url":   syndicate_url,
-       "volume_name":     volume_name,
-       "volume_owner":    volume_owner,
-       "slice_name":      slice_name,
-       "slice_UG_port":   UG_port,
-       "principal_pkey_pem": user_pkey_pem,
-    }
-    
-    cred_data_str = json.dumps( cred_data )
-    
-    msg = create_sealed_and_signed_blob( private_key_pem, slice_secret, cred_data_str )
-    if msg is None:
-       logger.error("Failed to seal volume list")
-       return None 
-    
-    return msg 
-
-
-#-------------------------------
-def put_principal_data( user_email, observer_secret, public_key_pem, private_key_pem ):
-    """
-    Seal and store the principal's private key into the database, in a SyndicatePrincipal object,
-    so the instance-side Syndicate daemon syndicated.py can get them later.
-    Overwrite an existing principal if one exists.
-    """
-    
-    sealed_private_key = create_sealed_and_signed_blob( private_key_pem, observer_secret, private_key_pem )
-    if sealed_private_key is None:
-        return False
-
-    try:
-       sp = models.SyndicatePrincipal( sealed_private_key=sealed_private_key, public_key_pem=public_key_pem, principal_id=user_email )
-       sp.save()
-    except IntegrityError, e:
-       logger.error("WARN: overwriting existing principal %s" % user_email)
-       sp.delete()
-       sp.save()
-    
-    return True
-
-
-#-------------------------------
-def delete_principal_data( user_email ):
-    """
-    Delete an OpenCloud SyndicatePrincipal object.
-    """
-    
-    sp = get_principal_data( user_email )
-    if sp is not None:
-      sp.delete()
-    
-    return True
-
-
-#-------------------------------
-def get_principal_data( user_email ):
-    """
-    Get a SyndicatePrincipal record from the database 
-    """
-    
-    try:
-        sp = models.SyndicatePrincipal.objects.get( principal_id=user_email )
-        return sp
-    except ObjectDoesNotExist:
-        logger.error("No SyndicatePrincipal record for %s" % user_email)
-        return None
-    
-
-
-#-------------------------------
-def get_principal_pkey( user_email, observer_secret ):
-    """
-    Fetch and unseal the private key of a SyndicatePrincipal.
-    """
-    
-    sp = get_principal_data( user_email )
-    if sp is None:
-        logger.error("Failed to find private key for principal %s" % user_email )
-        return None 
-     
-    public_key_pem = sp.public_key_pem
-    sealed_private_key_pem = sp.sealed_private_key
-
-    # unseal
-    private_key_pem = verify_and_unseal_blob(public_key_pem, observer_secret, sealed_private_key_pem)
-    if private_key_pem is None:
-        logger.error("Failed to unseal private key")
-
-    return private_key_pem
-
-
-#-------------------------------
-def get_private_key_pem( pkey_path ):
-    """
-    Get a private key from storage, PEM-encoded.
-    """
-    
-    # get the OpenCloud private key 
-    observer_pkey = syndicate_storage.read_private_key( pkey_path )
-    if observer_pkey is None:
-       logger.error("Failed to load Observer private key")
-       return None
-    
-    observer_pkey_pem = observer_pkey.exportKey()
-    
-    return observer_pkey_pem
-
-
-#-------------------------------
-def encrypt_slice_secret( observer_pkey_pem, slice_secret ):
-    """
-    Encrypt and serialize the slice secret with the Observer private key
-    """
-    
-    # get the public key
-    try:
-       observer_pubkey_pem = CryptoKey.importKey( observer_pkey_pem ).publickey().exportKey()
-    except Exception, e:
-       logger.exception(e)
-       logger.error("Failed to derive public key from private key")
-       return None 
-    
-    # encrypt the data 
-    rc, sealed_slice_secret = c_syndicate.encrypt_data( observer_pkey_pem, observer_pubkey_pem, slice_secret )
-    
-    if rc != 0:
-       logger.error("Failed to encrypt slice secret")
-       return None 
-    
-    sealed_slice_secret_b64 = base64.b64encode( sealed_slice_secret )
-    
-    return sealed_slice_secret_b64
-    
-
-#-------------------------------
-def decrypt_slice_secret( observer_pkey_pem, sealed_slice_secret_b64 ):
-    """
-    Unserialize and decrypt a slice secret
-    """
-        
-    # get the public key
-    try:
-       observer_pubkey_pem = CryptoKey.importKey( observer_pkey_pem ).publickey().exportKey()
-    except Exception, e:
-       logger.exception(e)
-       logger.error("Failed to derive public key from private key")
-       return None 
-    
-    sealed_slice_secret = base64.b64decode( sealed_slice_secret_b64 )
-    
-    # decrypt it 
-    rc, slice_secret = c_syndicate.decrypt_data( observer_pubkey_pem, observer_pkey_pem, sealed_slice_secret )
-    
-    if rc != 0:
-       logger.error("Failed to decrypt '%s', rc = %d" % (sealed_slice_secret_b64, rc))
-       return None
-    
-    return slice_secret
- 
-
-#--------------------------------
-def get_slice_secret( observer_pkey_pem, slice_name, slice_fk=None ):
-    """
-    Get the shared secret for a slice.
-    """
-    
-    ss = None 
-    
-    # get the sealed slice secret from Django
-    try:
-       if slice_fk is not None:
-          ss = models.SliceSecret.objects.get( slice_id=slice_fk )
-       else:
-          ss = models.SliceSecret.objects.get( slice_id__name=slice_name )
-    except ObjectDoesNotExist, e:
-       logger.error("Failed to load slice secret for (%s, %s)" % (slice_fk, slice_name) )
-       return None 
-
-    return ss.secret 
- 
-
-#-------------------------------
-def put_slice_secret( observer_pkey_pem, slice_name, slice_secret, slice_fk=None, opencloud_slice=None ):
-    """
-    Put the shared secret for a slice, encrypting it first.
-    """
-    
-    ss = None 
-    
-    if opencloud_slice is None:
-       # look up the slice 
-       try:
-          if slice_fk is None:
-             opencloud_slice = models.Slice.objects.get( name=slice_name )
-          else:
-             opencloud_slice = models.Slice.objects.get( id=slice_fk.id )
-       except Exception, e:
-          logger.exception(e)
-          logger.error("Failed to load slice (%s, %s)" % (slice_fk, slice_name) )
-          return False 
-    
-    ss = models.SliceSecret( slice_id=opencloud_slice, secret=slice_secret )
-    
-    ss.save()
-    
-    return True
-
-
-#-------------------------------
-def get_or_create_slice_secret( observer_pkey_pem, slice_name, slice_fk=None ):
-   """
-   Get a slice secret if it already exists, or generate a slice secret if one does not.
-   """
-   
-   slice_secret = get_slice_secret( observer_pkey_pem, slice_name, slice_fk=slice_fk )
-   if slice_secret is None or len(slice_secret) == 0:
-      
-      # generate a slice secret 
-      slice_secret = "".join( random.sample("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789", 32) )
-      
-      # store it 
-      rc = put_slice_secret( observer_pkey_pem, slice_name, slice_secret, slice_fk=slice_fk )
-      
-      if not rc:
-         raise SyndicateObserverError("Failed to create slice secret for (%s, %s)" % (slice_fk, slice_name))
-      
-   return slice_secret
-
-
-#-------------------------------
-def generate_slice_credentials( observer_pkey_pem, syndicate_url, user_email, volume_name, slice_name, observer_secret, slice_secret, UG_port, existing_user=None ):
-    """
-    Generate and return the set of credentials to be sent off to the slice VMs.
-    exisitng_user is a Syndicate user, as a dictionary.
-    
-    Return None on failure
-    """
-    
-    # get the user's private key 
-    logger.info("Obtaining private key for %s" % user_email)
-    
-    # it might be in the existing_user...
-    user_pkey_pem = None
-    if existing_user is not None:
-       user_pkey_pem = existing_user.get('signing_private_key', None)
-       
-    # no luck?
-    if user_pkey_pem is None:
-      try:
-         # get it from Django DB
-         user_pkey_pem = get_principal_pkey( user_email, observer_secret )
-         assert user_pkey_pem is not None, "No private key for %s" % user_email
-         
-      except:
-         traceback.print_exc()
-         logger.error("Failed to get private key; cannot generate credentials for %s in %s" % (user_email, volume_name) )
-         return None
-    
-    # generate a credetials blob 
-    logger.info("Generating credentials for %s's slice" % (user_email))
-    try:
-       creds = create_slice_credential_blob( observer_pkey_pem, slice_name, slice_secret, syndicate_url, volume_name, user_email, UG_port, user_pkey_pem )
-       assert creds is not None, "Failed to create credentials for %s" % user_email 
-    
-    except:
-       traceback.print_exc()
-       logger.error("Failed to generate credentials for %s in %s" % (user_email, volume_name))
-       return None
-    
-    return creds
-
-
-#-------------------------------
-def save_slice_credentials( observer_pkey_pem, syndicate_url, user_email, volume_name, slice_name, observer_secret, slice_secret, UG_port, existing_user=None ): 
-    """
-    Create and save a credentials blob to a VolumeSlice.
-    Return the creds on success.
-    Return None on failure
-    """
-    
-    creds = generate_slice_credentials( observer_pkey_pem, syndicate_url, user_email, volume_name, slice_name, observer_secret, slice_secret, UG_port, existing_user=existing_user )
-    ret = None
-    
-    if creds is not None:
-       # save it 
-       vs = get_volumeslice( volume_name, slice_name )
-       
-       if vs is not None:
-          vs.credentials_blob = creds
-          vs.save()
-          
-          # success!
-          ret = creds
-       else:
-          logger.error("Failed to look up VolumeSlice(%s, %s)" % (volume_name, slice_name))
-       
-    else:
-       logger.error("Failed to generate credentials for %s, %s" % (volume_name, slice_name))
-       
-    return ret
-
-
-#-------------------------------
-def get_volumeslice_volume_names( slice_name ):
-    """
-    Get the list of Volume names from the datastore.
-    """
-    try:
-        all_vs = models.VolumeSlice.objects.filter( slice_id__name = slice_name )
-        volume_names = []
-        for vs in all_vs:
-           volume_names.append( vs.volume_id.name )
-           
-        return volume_names
-    except Exception, e:
-        logger.exception(e)
-        logger.error("Failed to query datastore for volumes mounted in %s" % slice_name)
-        return None 
- 
-
-#-------------------------------
-def get_volumeslice( volume_name, slice_name ):
-    """
-    Get a volumeslice record from the datastore.
-    """
-    try:
-        vs = models.VolumeSlice.objects.get( volume_id__name = volume_name, slice_id__name = slice_name )
-        return vs
-    except Exception, e:
-        logger.exception(e)
-        logger.error("Failed to query datastore for volumes (mounted in %s)" % (slice_name if (slice_name is not None or len(slice_name) > 0) else "UNKNOWN"))
-        return None 
-
-
-#-------------------------------
-def do_push( instance_hosts, portnum, payload ):
-    """
-    Push a payload to a list of instances.
-    NOTE: this has to be done in one go, since we can't import grequests
-    into the global namespace (without wrecking havoc on the credential server),
-    but it has to stick around for the push to work.
-    """
-    
-    global TESTING, CONFIG
-    
-    from gevent import monkey
-    
-    if TESTING:
-       monkey.patch_all()
-    
-    else:
-       # make gevents runnabale from multiple threads (or Django will complain)
-       monkey.patch_all(socket=True, dns=True, time=True, select=True, thread=False, os=True, ssl=True, httplib=False, aggressive=True)
-    
-    import grequests
-    
-    # fan-out 
-    requests = []
-    for sh in instance_hosts:
-      rs = grequests.post( "http://" + sh + ":" + str(portnum), data={"observer_message": payload}, timeout=getattr(CONFIG, "SYNDICATE_HTTP_PUSH_TIMEOUT", 60) )
-      requests.append( rs )
-      
-    # fan-in
-    responses = grequests.map( requests )
-    
-    assert len(responses) == len(requests), "grequests error: len(responses) != len(requests)"
-    
-    for i in xrange(0,len(requests)):
-       resp = responses[i]
-       req = requests[i]
-       
-       if resp is None:
-          logger.error("Failed to connect to %s" % (req.url))
-          continue 
-       
-       # verify they all worked 
-       if resp.status_code != 200:
-          logger.error("Failed to POST to %s, status code = %s" % (resp.url, resp.status_code))
-          continue
-          
-    return True
-   
-
-#-------------------------------
-def get_slice_hostnames( slice_name ):
-   """
-   Query the Django DB and get the list of hosts running in a slice.
-   """
-
-   openstack_slice = Slice.objects.get( name=slice_name )
-   if openstack_slice is None:
-       logger.error("No such slice '%s'" % slice_name)
-       return None
-
-   hostnames = [s.node.name for s in openstack_slice.instances.all()]
-
-   return hostnames
-
-   
-#-------------------------------
-def push_credentials_to_slice( slice_name, payload ):
-   """
-   Push a credentials payload to the VMs in a slice.
-   """
-   hostnames = get_slice_hostnames( slice_name )
-   return do_push( hostnames, CONFIG.SYNDICATE_SLIVER_PORT, payload )
-
-   
-#-------------------------------
-class CredentialServerHandler( BaseHTTPServer.BaseHTTPRequestHandler ):
-   """
-   HTTP server handler that allows syndicated.py instances to poll
-   for volume state.
-   
-   NOTE: this is a fall-back mechanism.  The observer should push new 
-   volume state to the slices' instances.  However, if that fails, the 
-   instances are configured to poll for volume state periodically.  This 
-   server allows them to do just that.
-   
-   Responses:
-      GET /<slicename>              -- Reply with the signed sealed list of volume names, encrypted by the slice secret
-      GET /<slicename>/<volumename> -- Reply with the signed sealed volume access credentials, encrypted by the slice secret
-      
-      !!! TEMPORARY !!!
-      GET /<slicename>/SYNDICATE_SLICE_SECRET    -- Reply with the slice secret (TEMPORARY)
-   
-   
-   NOTE: We want to limit who can learn which Volumes a slice can access, so we'll seal its instances'
-   credentials with the SliceSecret secret.  The instances (which have the slice-wide secret) can then decrypt it.
-   However, sealing the listing is a time-consuming process (on the order of 10s), so we only want 
-   to do it when we have to.  Since *anyone* can ask for the ciphertext of the volume list,
-   we will cache the list ciphertext for each slice for a long-ish amount of time, so we don't
-   accidentally DDoS this server.  This necessarily means that the instance might see a stale
-   volume listing, but that's okay, since the Observer is eventually consistent anyway.
-   """
-   
-   cached_volumes_json = {}             # map slice_name --> (volume name, timeout)
-   cached_volumes_json_lock = threading.Lock()
-   
-   CACHED_VOLUMES_JSON_LIFETIME = 3600          # one hour
-   
-   SLICE_SECRET_NAME = "SYNDICATE_SLICE_SECRET"
-   
-   def parse_request_path( self, path ):
-      """
-      Parse the URL path into a slice name and (possibly) a volume name or SLICE_SECRET_NAME
-      """
-      path_parts = path.strip("/").split("/")
-      
-      if len(path_parts) == 0:
-         # invalid 
-         return (None, None)
-      
-      if len(path_parts) > 2:
-         # invalid
-         return (None, None)
-      
-      slice_name = path_parts[0]
-      if len(slice_name) == 0:
-         # empty string is invalid 
-         return (None, None)
-      
-      volume_name = None
-      
-      if len(path_parts) > 1:
-         volume_name = path_parts[1]
-         
-      return slice_name, volume_name
-   
-   
-   def reply_data( self, data, datatype="application/json" ):
-      """
-      Give back a 200 response with data.
-      """
-      self.send_response( 200 )
-      self.send_header( "Content-Type", datatype )
-      self.send_header( "Content-Length", len(data) )
-      self.end_headers()
-      
-      self.wfile.write( data )
-      return 
-   
-   
-   def get_volumes_message( self, private_key_pem, observer_secret, slice_name ):
-      """
-      Get the json-ized list of volumes this slice is attached to.
-      Check the cache, evict stale data if necessary, and on miss, 
-      regenerate the slice volume list.
-      """
-      
-      # block the cache.
-      # NOTE: don't release the lock until we've generated credentials.
-      # Chances are, there's a thundering herd of instances coming online.
-      # Block them all until we've generated their slice's credentials,
-      # and then serve them the cached one.
-      
-      self.cached_volumes_json_lock.acquire()
-      
-      ret = None
-      volume_list_json, cache_timeout = self.cached_volumes_json.get( slice_name, (None, None) )
-      
-      if (cache_timeout is not None) and cache_timeout < time.time():
-         # expired
-         volume_list_json = None
-      
-      if volume_list_json is None:
-         # generate a new list and cache it.
-         
-         volume_names = get_volumeslice_volume_names( slice_name )
-         if volume_names is None:
-            # nothing to do...
-            ret = None
-         
-         else:
-            # get the slice secret 
-            slice_secret = get_slice_secret( private_key_pem, slice_name )
-            
-            if slice_secret is None:
-               # no such slice 
-               logger.error("No slice secret for %s" % slice_name)
-               ret = None
-            
-            else:
-               # seal and sign 
-               ret = create_volume_list_blob( private_key_pem, slice_secret, volume_names )
-         
-         # cache this 
-         if ret is not None:
-            self.cached_volumes_json[ slice_name ] = (ret, time.time() + self.CACHED_VOLUMES_JSON_LIFETIME )
-      
-      else:
-         # hit the cache
-         ret = volume_list_json
-      
-      self.cached_volumes_json_lock.release()
-      
-      return ret
-      
-   
-   def do_GET( self ):
-      """
-      Handle one GET
-      """
-      slice_name, volume_name = self.parse_request_path( self.path )
-      
-      # valid request?
-      if volume_name is None and slice_name is None:
-         self.send_error( 400 )
-      
-      # slice secret request?
-      elif volume_name == self.SLICE_SECRET_NAME and slice_name is not None:
-         
-         # get the slice secret 
-         ret = get_slice_secret( self.server.private_key_pem, slice_name )
-         
-         if ret is not None:
-            self.reply_data( ret )
-            return 
-         else:
-            self.send_error( 404 )
-      
-      # volume list request?
-      elif volume_name is None and slice_name is not None:
-         
-         # get the list of volumes for this slice
-         ret = self.get_volumes_message( self.server.private_key_pem, self.server.observer_secret, slice_name )
-         
-         if ret is not None:
-            self.reply_data( ret )
-            return
-         else:
-            self.send_error( 404 )
-      
-      # volume credential request?
-      elif volume_name is not None and slice_name is not None:
-         
-         # get the VolumeSlice record
-         vs = get_volumeslice( volume_name, slice_name )
-         if vs is None:
-            # not found
-            self.send_error( 404 )
-            return
-         
-         else:
-            ret = vs.credentials_blob 
-            if ret is not None:
-               self.reply_data( vs.credentials_blob )
-            else:
-               # not generated???
-               print ""
-               print vs
-               print ""
-               self.send_error( 503 )
-            return
-         
-      else:
-         # shouldn't get here...
-         self.send_error( 500 )
-         return 
-   
-   
-#-------------------------------
-class CredentialServer( BaseHTTPServer.HTTPServer ):
-   
-   def __init__(self, private_key_pem, observer_secret, server, req_handler ):
-      self.private_key_pem = private_key_pem
-      self.observer_secret = observer_secret
-      BaseHTTPServer.HTTPServer.__init__( self, server, req_handler )
-
-
-#-------------------------------
-def credential_server_spawn( old_exit_status ):
-   """
-   Start our credential server (i.e. in a separate process, started by the watchdog)
-   """
-   
-   setproctitle.setproctitle( "syndicate-credential-server" )
-   
-   private_key = syndicate_storage.read_private_key( CONFIG.SYNDICATE_PRIVATE_KEY )
-   if private_key is None:
-      # exit code 255 will be ignored...
-      logger.error("Cannot load private key.  Exiting...")
-      sys.exit(255)
-   
-   logger.info("Starting Syndicate Observer credential server on port %s" % CONFIG.SYNDICATE_HTTP_PORT)
-               
-   srv = CredentialServer( private_key.exportKey(), CONFIG.SYNDICATE_OPENCLOUD_SECRET, ('', CONFIG.SYNDICATE_HTTP_PORT), CredentialServerHandler)
-   srv.serve_forever()
-
-
-#-------------------------------
-def ensure_credential_server_running( foreground=False, run_once=False ):
-   """
-   Instantiate our credential server and keep it running.
-   """
-   
-   # is the watchdog running?
-   pids = syndicate_watchdog.find_by_attrs( "syndicate-credential-server-watchdog", {} )
-   if len(pids) > 0:
-      # it's running
-      return True
-   
-   if foreground:
-      # run in foreground 
-      
-      if run_once:
-         return credential_server_spawn( 0 )
-      
-      else:
-         return syndicate_watchdog.main( credential_server_spawn, respawn_exit_statuses=range(1,254) )
-      
-   
-   # not running, and not foregrounding.  fork a new one
-   try:
-      watchdog_pid = os.fork()
-   except OSError, oe:
-      logger.error("Failed to fork, errno = %s" % oe.errno)
-      return False
-   
-   if watchdog_pid != 0:
-      
-      # child--become watchdog
-      setproctitle.setproctitle( "syndicate-credential-server-watchdog" )
-      
-      if run_once:
-         syndicate_daemon.daemonize( lambda: credential_server_spawn(0), logfile_path=getattr(CONFIG, "SYNDICATE_HTTP_LOGFILE", None) )
-      
-      else:
-         syndicate_daemon.daemonize( lambda: syndicate_watchdog.main( credential_server_spawn, respawn_exit_statuses=range(1,254) ), logfile_path=getattr(CONFIG, "SYNDICATE_HTTP_LOGFILE", None) )
-
-
-#-------------------------------
-# Begin functional tests.
-# Any method starting with ft_ is a functional test.
-#-------------------------------
-  
-#-------------------------------
-def ft_syndicate_access():
-    """
-    Functional tests for ensuring objects exist and don't exist in Syndicate.
-    """
-    
-    fake_user = FakeObject()
-    fake_user.email = "fakeuser@opencloud.us"
-
-    print "\nensure_user_exists(%s)\n" % fake_user.email
-    ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )
-
-    print "\nensure_user_exists(%s)\n" % fake_user.email
-    ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )
-
-    fake_volume = FakeObject()
-    fake_volume.name = "fakevolume"
-    fake_volume.description = "This is a fake volume, created for funtional testing"
-    fake_volume.blocksize = 1024
-    fake_volume.cap_read_data = True 
-    fake_volume.cap_write_data = True 
-    fake_volume.cap_host_data = False
-    fake_volume.archive = False
-    fake_volume.private = True
-    
-    # test idempotency
-    print "\nensure_volume_exists(%s)\n" % fake_volume.name
-    ensure_volume_exists( fake_user.email, fake_volume )
-
-    print "\nensure_volume_exists(%s)\n" % fake_volume.name
-    ensure_volume_exists( fake_user.email, fake_volume )
-    
-    print "\nensure_volume_access_right_exists(%s,%s)\n" % (fake_user.email, fake_volume.name)
-    ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )
-    
-    print "\nensure_volume_access_right_exists(%s,%s)\n" % (fake_user.email, fake_volume.name)
-    ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )
-    
-    print "\nensure_volume_access_right_absent(%s,%s)\n" % (fake_user.email, fake_volume.name)
-    ensure_volume_access_right_absent( fake_user.email, fake_volume.name )
-    
-    print "\nensure_volume_access_right_absent(%s,%s)\n" % (fake_user.email, fake_volume.name)
-    ensure_volume_access_right_absent( fake_user.email, fake_volume.name )
- 
-    print "\nensure_volume_absent(%s)\n" % fake_volume.name
-    ensure_volume_absent( fake_volume.name )
-
-    print "\nensure_volume_absent(%s)\n" % fake_volume.name
-    ensure_volume_absent( fake_volume.name )
-
-    print "\nensure_user_absent(%s)\n" % fake_user.email
-    ensure_user_absent( fake_user.email )
-
-    print "\nensure_user_absent(%s)\n" % fake_user.email
-    ensure_user_absent( fake_user.email )
-    
-    
-    
-    
-    print "\nensure_principal_exists(%s)\n" % fake_user.email
-    ensure_principal_exists( fake_user.email, "asdf", is_admin=False, max_UGs=1100, max_RGs=1 )
-    
-    print "\nensure_principal_exists(%s)\n" % fake_user.email
-    ensure_principal_exists( fake_user.email, "asdf", is_admin=False, max_UGs=1100, max_RGs=1 )
-
-    print "\nensure_volume_exists(%s)\n" % fake_volume.name
-    ensure_volume_exists( fake_user.email, fake_volume )
-
-    print "\nsetup_volume_access(%s, %s)\n" % (fake_user.email, fake_volume.name)
-    setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, "abcdef" )
-    
-    print "\nsetup_volume_access(%s, %s)\n" % (fake_user.email, fake_volume.name)
-    setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, "abcdef" )
-    
-    print "\nteardown_volume_access(%s, %s)\n" % (fake_user.email, fake_volume.name )
-    teardown_volume_access( fake_user.email, fake_volume.name )
-    
-    print "\nteardown_volume_access(%s, %s)\n" % (fake_user.email, fake_volume.name )
-    teardown_volume_access( fake_user.email, fake_volume.name )
-    
-    print "\nensure_volume_absent(%s)\n" % fake_volume.name
-    ensure_volume_absent( fake_volume.name )
-
-    print "\nensure_principal_absent(%s)\n" % fake_user.email
-    ensure_principal_absent( fake_user.email )
-    
-
-
-#-------------------------------
-def ft_volumeslice( slice_name ):
-    """
-    Functional tests for reading VolumeSlice information
-    """
-    print "slice: %s" % slice_name
-    
-    volumes = get_volumeslice_volume_names( slice_name )
-    
-    print "volumes mounted in slice %s:" % slice_name
-    for v in volumes:
-       print "   %s:" % v
-      
-       vs = get_volumeslice( v, slice_name )
-       
-       print "      %s" % dir(vs)
-          
-
-#-------------------------------
-def ft_get_slice_hostnames( slice_name ):
-   """
-   Functional tests for getting slice hostnames
-   """
-   
-   print "Get slice hostnames for %s" % slice_name
-   
-   hostnames = get_slice_hostnames( slice_name )
-   import pprint 
-   
-   pp = pprint.PrettyPrinter()
-   
-   pp.pprint( hostnames )
-
-
-#-------------------------------
-def ft_syndicate_principal():
-   """
-   Functional tests for creating, reading, and deleting SyndicatePrincipals.
-   """
-   print "generating key pair"
-   pubkey_pem, privkey_pem = api.generate_key_pair( 4096 )
-   
-   user_email = "fakeuser@opencloud.us"
-   
-   print "saving principal"
-   put_principal_data( user_email, "asdf", pubkey_pem, privkey_pem )
-   
-   print "fetching principal private key"
-   saved_privkey_pem = get_principal_pkey( user_email, "asdf" )
-   
-   assert saved_privkey_pem is not None, "Could not fetch saved private key"
-   assert saved_privkey_pem == privkey_pem, "Saved private key does not match actual private key"
-   
-   print "delete principal"
-   
-   delete_principal_data( user_email )
-   
-   print "make sure its deleted..."
-   
-   saved_privkey_pem = get_principal_pkey( user_email, "asdf" )
-   
-   assert saved_privkey_pem is None, "Principal key not deleted"
-   
-
-#-------------------------------
-def ft_credential_server():
-   """
-   Functional test for the credential server
-   """
-   ensure_credential_server_running( run_once=True, foreground=True )
-
-
-#-------------------------------
-def ft_seal_and_unseal():
-    """
-    Functional test for sealing/unsealing data
-    """
-    print "generating key pair"
-    pubkey_pem, privkey_pem = api.generate_key_pair( 4096 )
-    
-    sealed_buf = create_sealed_and_signed_blob( privkey_pem, "foo", "hello world")
-    print "sealed data is:\n\n%s\n\n" % sealed_buf
-
-    buf = verify_and_unseal_blob( pubkey_pem, "foo", sealed_buf )
-    print "unsealed data is: \n\n%s\n\n" % buf
-    
-
-# run functional tests
-if __name__ == "__main__":
-    sys.path.append("/opt/xos")
-    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
-
-    if len(sys.argv) < 2:
-      print "Usage: %s testname [args]" % sys.argv[0]
-      
-    # call a method starting with ft_, and then pass the rest of argv as its arguments
-    testname = sys.argv[1]
-    ft_testname = "ft_%s" % testname
-    
-    test_call = "%s(%s)" % (ft_testname, ",".join(sys.argv[2:]))
-   
-    print "calling %s" % test_call
-   
-    rc = eval( test_call )
-   
-    print "result = %s" % rc
-      
-    
diff --git a/xos/observers/syndicate/syndicatelib_config/__init__.py b/xos/observers/syndicate/syndicatelib_config/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/xos/observers/syndicate/syndicatelib_config/__init__.py
+++ /dev/null
diff --git a/xos/observers/syndicate/syndicatelib_config/config-jude.py b/xos/observers/syndicate/syndicatelib_config/config-jude.py
deleted file mode 100644
index 9e0f1fd..0000000
--- a/xos/observers/syndicate/syndicatelib_config/config-jude.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python
-
-# configuration for syndicatelib
-SYNDICATE_SMI_URL="http://localhost:8080"
-
-SYNDICATE_OPENID_TRUSTROOT="http://localhost:8081"
-
-SYNDICATE_OPENCLOUD_USER="judecn@gmail.com"
-SYNDICATE_OPENCLOUD_PASSWORD="nya"
-
-SYNDICATE_PYTHONPATH="/home/jude/Desktop/research/git/syndicate/build/out/python"
-
-SYNDICATE_PRIVATE_KEY="/home/jude/Desktop/research/git/syndicate/opencloud/observers/syndicate/syndicatelib_config/pollserver.pem"
-SYNDICATE_OPENCLOUD_SECRET="e4988309a5005edb8ea185f16f607938c0fb7657e4d7609853bcb7c4884d1c92"
-
-SYNDICATE_HTTP_PORT=65321
-
-SYNDICATE_RG_CLOSURE="/home/jude/Desktop/research/git/syndicate/build/out/python/syndicate/rg/drivers/disk"
-SYNDICATE_RG_DEFAULT_PORT=38800
-
-DEBUG=True
diff --git a/xos/observers/syndicate/syndicatelib_config/config-opencloud.py b/xos/observers/syndicate/syndicatelib_config/config-opencloud.py
deleted file mode 100644
index b3add16..0000000
--- a/xos/observers/syndicate/syndicatelib_config/config-opencloud.py
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/env python
-
-# ---------------------------------
-# This is the configuration file used by the Syndicate observer.
-# It is a well-formed Python file, and will be imported into the
-# observer as a Python module.  This means you can run any config-
-# generation code here you like, but all of the following global 
-# variables must be defined.
-# ---------------------------------
-
-# URL to the Syndicate SMI.  For example, https://syndicate-metadata.appspot.com
-SYNDICATE_SMI_URL="http://localhost:8080"
-
-# If you are going to use OpenID to authenticate the Syndicate instance daemon,
-# this is the OpenID provider URL.  It is currently used only to generate 
-# identity pages for users, so you can put whatever you want here for now.
-SYNDICATE_OPENID_TRUSTROOT="http://localhost:8081"
-
-# This is the observer's user account on Syndicate.  You must create it out-of-band
-# prior to using the observer, and it must be an admin user since it will
-# create other users (i.e. for slices).
-SYNDICATE_OPENCLOUD_USER="jcnelson@cs.princeton.edu"
-
-# This is the password for the observer to authenticate itself to Syndicate.
-SYNDICATE_OPENCLOUD_PASSWORD="nya"
-
-# If the observer uses public-key authentication with Syndicate, you will 
-# need to identify the absolute path to its private key here.  It must be 
-# a 4096-bit PEM-encoded RSA key, and the Syndicate observer's user account
-# must have been given the public key on activation.
-SYNDICATE_OPENCLOUD_PKEY=None
-
-# This is the location on disk where Syndicate observer code can be found, 
-# if it is not already in the Python path.  This is optional.
-SYNDICATE_PYTHONPATH="/root/syndicate/build/out/python"
-
-# This is the location of the observer's private key.  It must be an absolute
-# path, and refer to a 4096-bit PEM-encoded RSA key.
-SYNDICATE_PRIVATE_KEY="/opt/xos/observers/syndicate/syndicatelib_config/pollserver.pem"
-
-# This is the master secret used to generate secrets to seal sensitive information sent to the 
-# Syndicate instance mount daemons.  It is also used to seal sensitive information
-# stored to the Django database.  
-# TODO: think of a way to not have to store this on disk.  Maybe we feed into the
-# observer when it starts up?
-SYNDICATE_OPENCLOUD_SECRET="e4988309a5005edb8ea185f16f607938c0fb7657e4d7609853bcb7c4884d1c92"
-
-# This is the default port number on which a Syndicate Replica Gateway
-# will be provisioned.  It's a well-known port, and can be the same across
-# instances, since in OpenCloud, an RG instance only listens to localhost.
-SYNDICATE_RG_DEFAULT_PORT=38800
-
-# This is the absolute path to the RG's storage driver (which will be automatically
-# pushed to instances by Syndicate).  See https://github.com/jcnelson/syndicate/wiki/Replica-Gateways
-SYNDICATE_RG_CLOSURE=None
-
-# This is the port number the observer listens on for GETs from the Syndicate instance mount 
-# daemons.  Normally, the oserver pushes (encrypted) commands to the daemons, but if the 
-# daemons are NAT'ed or temporarily partitioned, they will pull commands instead.
-SYNDICATE_HTTP_PORT=65321
-
-# This is the path to the logfile for the observer's HTTP server.
-SYNDICATE_HTTP_LOGFILE="/tmp/syndicate-observer.log"
-
-# This is the number of seconds to wait for pushing a slice credential before timing out.
-SYNDICATE_HTTP_PUSH_TIMEOUT=60
-
-# This is the port number the Syndicate instance mount daemons listen on.  The observer will 
-# push commands to them on this port.
-SYNDICATE_SLIVER_PORT=65322
-
-# If true, print verbose debug messages.
-DEBUG=True
diff --git a/xos/observers/syndicate/syndicatelib_config/config.py b/xos/observers/syndicate/syndicatelib_config/config.py
deleted file mode 100644
index b3add16..0000000
--- a/xos/observers/syndicate/syndicatelib_config/config.py
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/env python
-
-# ---------------------------------
-# This is the configuration file used by the Syndicate observer.
-# It is a well-formed Python file, and will be imported into the
-# observer as a Python module.  This means you can run any config-
-# generation code here you like, but all of the following global 
-# variables must be defined.
-# ---------------------------------
-
-# URL to the Syndicate SMI.  For example, https://syndicate-metadata.appspot.com
-SYNDICATE_SMI_URL="http://localhost:8080"
-
-# If you are going to use OpenID to authenticate the Syndicate instance daemon,
-# this is the OpenID provider URL.  It is currently used only to generate 
-# identity pages for users, so you can put whatever you want here for now.
-SYNDICATE_OPENID_TRUSTROOT="http://localhost:8081"
-
-# This is the observer's user account on Syndicate.  You must create it out-of-band
-# prior to using the observer, and it must be an admin user since it will
-# create other users (i.e. for slices).
-SYNDICATE_OPENCLOUD_USER="jcnelson@cs.princeton.edu"
-
-# This is the password for the observer to authenticate itself to Syndicate.
-SYNDICATE_OPENCLOUD_PASSWORD="nya"
-
-# If the observer uses public-key authentication with Syndicate, you will 
-# need to identify the absolute path to its private key here.  It must be 
-# a 4096-bit PEM-encoded RSA key, and the Syndicate observer's user account
-# must have been given the public key on activation.
-SYNDICATE_OPENCLOUD_PKEY=None
-
-# This is the location on disk where Syndicate observer code can be found, 
-# if it is not already in the Python path.  This is optional.
-SYNDICATE_PYTHONPATH="/root/syndicate/build/out/python"
-
-# This is the location of the observer's private key.  It must be an absolute
-# path, and refer to a 4096-bit PEM-encoded RSA key.
-SYNDICATE_PRIVATE_KEY="/opt/xos/observers/syndicate/syndicatelib_config/pollserver.pem"
-
-# This is the master secret used to generate secrets to seal sensitive information sent to the 
-# Syndicate instance mount daemons.  It is also used to seal sensitive information
-# stored to the Django database.  
-# TODO: think of a way to not have to store this on disk.  Maybe we feed into the
-# observer when it starts up?
-SYNDICATE_OPENCLOUD_SECRET="e4988309a5005edb8ea185f16f607938c0fb7657e4d7609853bcb7c4884d1c92"
-
-# This is the default port number on which a Syndicate Replica Gateway
-# will be provisioned.  It's a well-known port, and can be the same across
-# instances, since in OpenCloud, an RG instance only listens to localhost.
-SYNDICATE_RG_DEFAULT_PORT=38800
-
-# This is the absolute path to the RG's storage driver (which will be automatically
-# pushed to instances by Syndicate).  See https://github.com/jcnelson/syndicate/wiki/Replica-Gateways
-SYNDICATE_RG_CLOSURE=None
-
-# This is the port number the observer listens on for GETs from the Syndicate instance mount 
-# daemons.  Normally, the oserver pushes (encrypted) commands to the daemons, but if the 
-# daemons are NAT'ed or temporarily partitioned, they will pull commands instead.
-SYNDICATE_HTTP_PORT=65321
-
-# This is the path to the logfile for the observer's HTTP server.
-SYNDICATE_HTTP_LOGFILE="/tmp/syndicate-observer.log"
-
-# This is the number of seconds to wait for pushing a slice credential before timing out.
-SYNDICATE_HTTP_PUSH_TIMEOUT=60
-
-# This is the port number the Syndicate instance mount daemons listen on.  The observer will 
-# push commands to them on this port.
-SYNDICATE_SLIVER_PORT=65322
-
-# If true, print verbose debug messages.
-DEBUG=True
diff --git a/xos/observers/syndicate/syndicatelib_config/pollserver.pem b/xos/observers/syndicate/syndicatelib_config/pollserver.pem
deleted file mode 100644
index cb50de7..0000000
--- a/xos/observers/syndicate/syndicatelib_config/pollserver.pem
+++ /dev/null
@@ -1,51 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIJKwIBAAKCAgEA6yeoVsm+Yf4sgW/9kzk2NfwkbHloIKXUOpi5x5LkEbnohNRC
-RIGjiMJJp/OefduU3c14h/K6Qefi9j4dw4pyvh2QP36K4lJObIpKAdohHjVjxHqK
-12bVXvCpCAbJHkiX8VK5HGJPDr6sJke1vPTrP6RSWxT7ZRawIBInKuT/OVshskzh
-kOwVXb5ct0AXjRZ6FBdXvNcJnNONKRCIuFHOx2roWLsdgTlPI3+bJim2dQ0JKyHh
-uaTPguZ4s23sCuSKyXCh/X9yVloxIraY6KdaAKQZLyANLfXQdsyyH69kQvvBEZ2R
-EXD0c1qIZwuIm68UH+60BwTPdXGWSL73C0Zsq36vZOadFPb0pmz/o4CuguILNA3i
-560MKcVvQ8HVqA56z+v8pE0TRp0ajTDtpW2ee+t1cXE8VzCwthkIxmneFk+ityoD
-o1N+fBUu4lXJ3kl2qGk+0KECqJ6sc/QN+Ft97JTTRshSzn1kqIlKQoZo3u0Jeo/G
-PFZ0b13/DxYA7nvjt2az48h0VL4mNf5tzDr8GxOK4lYoWIGzKjZLDeJRyxLOCK5j
-F/AvbbSnegT0O/vamn5EoN8HfooH5qiJdPDerriPsgN3HlcY6QjrY5phmAFiGa5T
-X1j1VNb5oamRslgPv3rwTvioTaY/wUmrHLBuU6Sqg/WGrLO2Bocg0USMbG8CAwEA
-AQKCAgEArxuO7WG5lXsSZSih6Rm3Vqf175jQg085JJFJ9mVZ1CFeFluBJUZsIpCb
-DKgLI6l5x1kUIhgLvrwQdFF5FH1qSEv3eHCgtzuXDphD1/E4rCgRrOObtB7tUI9h
-L4ruBNEF5Dw3f/1s5Yvy4WaQ3K58551TfmO3eGVWresWo4h2zZ0hEIbTiXljx7TT
-kdn2L6fHLGLdgM+YZuHZwfR/+tFga3sencRoiivE1KhXPindpngYlbfbQMSLiexZ
-gTOfi9T3zF1FI2HeIJN092aFounLyaJo5oC1j732iCCRm6qdvIuAD8AHoLc+MQ//
-dsxN47CSCd1Uzc01Nz1oLa+WgxzkGbsGNO2eVkRj/xzB0Rld/+Q1YQvn1TnWZFuG
-nXXKi+VwSX8htpDaZL5+hWVy39YXUQcBkTzBIS69tdfd7HCZS0JnKeOMYQFgvANH
-0/J529l8m0oHpex4DdW1scHXgcBOq6oD6KVkiNurfWZu/mWwdEnxAKtRFZc/xFfh
-a4kbTgNk3unGL+gZzeWL1YuIM843Ia4J8V0PYH7GueeZBaXyiT7r4x5hgQ57ObkX
-K9wlgrvSHBNq4OhzygTNs37vJwu38ry2AGmA8LuiFBeVCsVhMk3yVz4A6fXjwWH8
-266gNuODIiHelahvz/IBGLdrjnbA4SYaYQh1SYKfCRdlA2yNlBECggEBAPcqiWP/
-XUCsCy9vMbCvZNCUyr4nqTSvWkGsie6t+yIC0pYCynKdrL9WNF5zee9SVmtcBO0Q
-z+aff8TZAax3tWvD5GzlQOQh1l4QBj30uatklQ0nvwbw+gf5EFG2QerPakwyfC59
-dSagxchzpjloBGniq7jqc6vL3xlZ62vnOLHf+nOQXzDcZ7QK/uLRKj2r01D5+9lh
-08Ah42QID5VQL/NMyg2lylXaPXx6TnSMjJVjzNmLRCIRlOstAOt3isBJ21sT0LOk
-lCGvuF//cwS7VABRMa0TspSEkuMbgFw0XEZStkh68eEUVqax+HHfa1rlxobSIwib
-1Oa9s7KbQNaozgUCggEBAPOPOSLazItJ4OOFu8/69M33Ag8hpPZNkMw1fw99p2fD
-KnZYlEWHgF4Z76gkozHh+nk8HhohvYfIhmFYEHQOXfmgljsz3jFJKwTEnfN7IsZA
-C3TVl6OVuY2rjhBOe3137CYHn9d8KRaJAKdyd038LK29Yy+FvUVw6LD4QUoRiA21
-9sOrhO/Wgcohnqk5yVnXtBkp7j7qGN9d+GLZVAVOqKjniQqy9ir3IdLYmB801t9P
-TcbucmgEzs/cmx7d/jv1kx9/O0HHIm959Ox66hPkcG3bssJk41F6PDMOVEWiC/hc
-E5a7Mlr6M4YhuDjO1zoyQpy4Sj/MKpasnotNSL51JuMCggEBALhYkYBzximmJ/GJ
-DZaqOpcXYt/Q1PLmlnrFJVtPiC8ly8r26efykhVjRkvr9NX6o1oPl9z43Rc1fyZi
-dE0eO8HUqVpO4sdENY6ShRVQoeqjakgVjPSwZsvrh7BqL1/is3WBcf16tRXKc7m+
-CAxo+GHBHjMdKojH1e4ikuQ34KFKXJI068qVmQM/8DtbphW5QjLzQFQyEq0KmX7S
-RE0pMZpVe54SOYcu7w0Ya8uhyHjjprXamUaPtnJxbm4xCtvAOksDzHUwGwvE888l
-x7OPxGc4J8TfHCKJfsTEjkg3BVut9Sa6DA3EDZzmwFauPHPfTOLheB/Dmlc+xfhA
-s2tnG8ECggEBAKiLbFaaYwHg1iec3CNI3y/IxzwBZE6tzo4CVzM5GSfM/w12ruSO
-qF52REpvUB+s6dALsikTQD0+nv+uGXS2nIGqh0vg0Nn6cDKUfVmI1L+sgkEPrigd
-7JIFLgJKzVo+KsUGca6E1Uoq9LDrnXPyFlkEviacviXXxK7ynPvMtgIG8gTmJNBz
-+M0QBuPEgXoSsybWxW/0P9ITDVgaXPJvRHfeAg/NWFzTOCzYhizSO/+8uW34hGNH
-MHbXiuEJbm2/u1gIi9ExJLtQAhXD2Uh6xPLBHis39bbkh9QtDlRBl1b/IO8mC+q5
-Sf6ARyPIv1gef8pEHd2YQ8CRJAXyLWzfVVECggEBANrvnE2hQaYqe/BM9QGN9Cam
-CUTTBhvQDTBnpBYv8iQCgy0nmmVZ07j0yjR/I5wipcWAB7Bskv1rfh/3VpWUGCcR
-2MnPobZnvL1Dl22G7P8HBUiIA+NGWNdR5FyIL/yLy2BVEs7dNeK5WolD8IQP+fTw
-E9Mvd6ns2TIveXMZFtqRja3H426iv38QqWg0RmmhcmnSkD7SqAZWGI+OoRsUJ2Et
-bg4N9Cb46Gjqdh8SQF+rXYfL1AWnjMM7/AhJLMoWWb0sBzqA7UeJxLlAt1Lgnnsl
-P2nszH+Ia9V0dSlr79haGo80FALM8TiKBAQ/bTktqP5vOWSlCzHj7K30Bil36TQ=
------END RSA PRIVATE KEY-----
diff --git a/xos/observers/vbng/model-deps b/xos/observers/vbng/model-deps
deleted file mode 100644
index 0967ef4..0000000
--- a/xos/observers/vbng/model-deps
+++ /dev/null
@@ -1 +0,0 @@
-{}
diff --git a/xos/observers/vbng/run.sh b/xos/observers/vbng/run.sh
deleted file mode 100755
index efb586f..0000000
--- a/xos/observers/vbng/run.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#if [[ ! -e ./vbng-observer.py ]]; then
-#    ln -s ../../xos-observer.py vbng-observer.py
-#fi
-
-export XOS_DIR=/opt/xos
-python vbng-observer.py  -C $XOS_DIR/observers/vbng/vbng_observer_config
diff --git a/xos/observers/vbng/start.sh b/xos/observers/vbng/start.sh
deleted file mode 100755
index 98008f4..0000000
--- a/xos/observers/vbng/start.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#if [[ ! -e ./vbng-observer.py ]]; then
-#    ln -s ../../xos-observer.py vbng-observer.py
-#fi
-
-export XOS_DIR=/opt/xos
-nohup python vbng-observer.py  -C $XOS_DIR/observers/vbng/vbng_observer_config > /dev/null 2>&1 &
diff --git a/xos/observers/vbng/steps/sync_vbngtenant.py b/xos/observers/vbng/steps/sync_vbngtenant.py
deleted file mode 100644
index c997f4f..0000000
--- a/xos/observers/vbng/steps/sync_vbngtenant.py
+++ /dev/null
@@ -1,142 +0,0 @@
-import os
-import requests
-import socket
-import sys
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from synchronizers.base.ansible import run_template_ssh
-from core.models import Service
-from services.cord.models import VCPEService, VCPETenant, VBNGTenant, VBNGService
-from services.hpc.models import HpcService, CDNPrefix
-from util.logger import Logger, logging
-
-# VBNG_API = "http://10.0.3.136:8181/onos/virtualbng/privateip/"
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-logger = Logger(level=logging.INFO)
-
-class SyncVBNGTenant(SyncStep):
-    provides=[VCPETenant]
-    observes=VCPETenant
-    requested_interval=0
-
-    def __init__(self, **args):
-        SyncStep.__init__(self, **args)
-
-    def fetch_pending(self, deleted):
-        if (not deleted):
-            objs = VBNGTenant.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
-        else:
-            objs = VBNGTenant.get_deleted_tenant_objects()
-
-        return objs
-
-    def defer_sync(self, o, reason):
-        logger.info("defer object %s due to %s" % (str(o), reason))
-        raise Exception("defer object %s due to %s" % (str(o), reason))
-
-    def get_vbng_service(self, o):
-        if not o.provider_service:
-             raise Exception("vBNG tenant %s has no provider_service" % str(o.id))
-        services = VBNGService.get_service_objects().filter(id = o.provider_service.id)
-        if not services:
-             raise Exception("vBNG tenant %s is associated with the wrong kind of provider_service" % str(o.id))
-        return services[0]
-
-    def get_vbng_url(self, o):
-        service = self.get_vbng_service(o)
-
-        # if the service object specifies a vbng_url, then use it
-        if service.vbng_url:
-            return service.vbng_url
-
-        # otherwise, see if the service has tenancy in ONOS
-        for tenant in service.subscribed_tenants.all():
-            if tenant.provider_service and tenant.provider_service.kind == "onos":
-                onos_service = tenant.provider_service
-                if not onos_service.slices.exists():
-                    raise Exception("vBNG service is linked to an ONOSApp, but the App's Service has no slices")
-                onos_slice = onos_service.slices.all()[0]
-                if not onos_slice.instances.exists():
-                    raise Exception("vBNG service is linked to an ONOSApp, but the App's Service's Slice has no instances")
-                instance = onos_slice.instances.all()[0]
-
-                #onos_app = ONOSApp.objects.filter(id = tenant.id)
-                #instance = onos_app.instance
-                #if not instance:
-                #    raise Exception("ONOSApp has no instance")
-
-                if not instance.instance_name:
-                    raise Exception("vBNG service is linked to an ONOSApp, but the App's Service's Slice's first instance is not instantiated")
-                ip = instance.get_network_ip("nat")
-                if not ip:
-                    raise Exception("vBNG service is linked to an ONOSApp, but the App's Service's Slice's first instance does not have an ip")
-
-                logger.info("Using ip %s from ONOS Instance %s" % (ip, instance))
-
-                return "http://%s:8181/onos/virtualbng/" % ip
-
-        raise Exception("vBNG service does not have vbng_url set, and is not linked to an ONOSApp")
-
-    def get_private_interface(self, o):
-        vcpes = VCPETenant.get_tenant_objects().all()
-        vcpes = [x for x in vcpes if (x.vbng is not None) and (x.vbng.id == o.id)]
-        if not vcpes:
-            raise Exception("No vCPE tenant is associated with vBNG %s" % str(o.id))
-        if len(vcpes)>1:
-            raise Exception("More than one vCPE tenant is associated with vBNG %s" % str(o.id))
-
-        vcpe = vcpes[0]
-        instance = vcpe.instance
-
-        if not instance:
-            raise Exception("No instance associated with vBNG %s" % str(o.id))
-
-        if not vcpe.wan_ip:
-            self.defer_sync(o, "does not have a WAN IP yet")
-
-        if not vcpe.wan_container_mac:
-            # this should never happen; container MAC is computed from WAN IP
-            self.defer_sync(o, "does not have a WAN container MAC yet")
-
-        return (vcpe.wan_ip, vcpe.wan_container_mac, vcpe.instance.node.name)
-
-    def sync_record(self, o):
-        logger.info("sync'ing VBNGTenant %s" % str(o))
-
-        if not o.routeable_subnet:
-            (private_ip, private_mac, private_hostname) = self.get_private_interface(o)
-            logger.info("contacting vBNG service to request mapping for private ip %s mac %s host %s" % (private_ip, private_mac, private_hostname) )
-
-            url = self.get_vbng_url(o) + "privateip/%s/%s/%s" % (private_ip, private_mac, private_hostname)
-            logger.info( "vbng url: %s" % url )
-            r = requests.post(url )
-            if (r.status_code != 200):
-                raise Exception("Received error from bng service (%d)" % r.status_code)
-            logger.info("received public IP %s from private IP %s" % (r.text, private_ip))
-
-            if r.text == "0":
-                raise Exception("VBNG service failed to return a routeable_subnet (probably ran out)")
-
-            o.routeable_subnet = r.text
-            o.mapped_ip = private_ip
-            o.mapped_mac = private_mac
-            o.mapped_hostname = private_hostname
-
-        o.save()
-
-    def delete_record(self, o):
-        logger.info("deleting VBNGTenant %s" % str(o))
-
-        if o.mapped_ip:
-            private_ip = o.mapped_ip
-            logger.info("contacting vBNG service to delete private ip %s" % private_ip)
-            r = requests.delete(self.get_vbng_url(o) + "privateip/%s" % private_ip, )
-            if (r.status_code != 200):
-                raise Exception("Received error from bng service (%d)" % r.status_code)
-
diff --git a/xos/observers/vbng/stop.sh b/xos/observers/vbng/stop.sh
deleted file mode 100755
index d49591e..0000000
--- a/xos/observers/vbng/stop.sh
+++ /dev/null
@@ -1 +0,0 @@
-pkill -9 -f vbng-observer.py
diff --git a/xos/observers/vbng/supervisor/vbng-observer.conf b/xos/observers/vbng/supervisor/vbng-observer.conf
deleted file mode 100644
index cff77b8..0000000
--- a/xos/observers/vbng/supervisor/vbng-observer.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-[program:vbng-observer]
-command=python /opt/xos/observers/vbng/vbng-observer.py -C /opt/xos/observers/vbng/vbng_observer_config
diff --git a/xos/observers/vbng/vbng-observer.py b/xos/observers/vbng/vbng-observer.py
deleted file mode 100755
index d6a71ff..0000000
--- a/xos/observers/vbng/vbng-observer.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-
-# This imports and runs ../../xos-observer.py
-
-import importlib
-import os
-import sys
-observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../..")
-sys.path.append(observer_path)
-mod = importlib.import_module("xos-observer")
-mod.main()
diff --git a/xos/observers/vbng/vbng_observer_config b/xos/observers/vbng/vbng_observer_config
deleted file mode 100644
index b75d498..0000000
--- a/xos/observers/vbng/vbng_observer_config
+++ /dev/null
@@ -1,38 +0,0 @@
-
-[plc]
-name=plc
-deployment=VICCI
-
-[db]
-name=xos
-user=postgres
-password=password
-host=localhost
-port=5432
-
-[api]
-host=128.112.171.237
-port=8000
-ssl_key=None
-ssl_cert=None
-ca_ssl_cert=None
-ratelimit_enabled=0
-omf_enabled=0
-mail_support_address=support@localhost
-nova_enabled=True
-
-[observer]
-name=vbng
-dependency_graph=/opt/xos/observers/vbng/model-deps
-steps_dir=/opt/xos/observers/vbng/steps
-sys_dir=/opt/xos/observers/vbng/sys
-deleters_dir=/opt/xos/observers/vbng/deleters
-log_file=console
-#/var/log/hpc.log
-driver=None
-pretend=False
-backoff_disabled=True
-
-[feefie]
-client_id='vicci_dev_central'
-user_id='pl'
diff --git a/xos/observers/vcpe/broadbandshield.py b/xos/observers/vcpe/broadbandshield.py
deleted file mode 100644
index dd2f00b..0000000
--- a/xos/observers/vcpe/broadbandshield.py
+++ /dev/null
@@ -1,396 +0,0 @@
-import requests
-import logging
-import json
-import sys
-from rest_framework.exceptions import APIException
-
-""" format of settings
-
-    ["settings"]
-        ["watershed"]
-        ["rating"]
-        ["categories"]
-        ["blocklist"]
-        ["allowlist"]
-
-    ["users"]
-        array
-            ["account_id"] - 58
-            ["reporting"] - False
-            ["name"] - Scott1
-            ["devices"]
-            ["settings"] -
-                ["watershed"]
-                ["rating"]
-                ["categories"]
-                ["blocklist"]
-                ["allowlist"]
-
-    ["devices"]
-        array
-            ["username"] - "Scott1" or "" if whole-house
-            ["uuid"] - empty
-            ["mac_address"] - mac address as hex digits in ascii
-            ["type"] - "laptop"
-            ["name"] - human readable name of device ("Scott's laptop")
-            ["settings"]
-                 ["watershed"]
-                     array
-                         array
-                             ["rating"]
-                             ["category"]
-                 ["rating"] - ["G" | "NONE"]
-                 ["categories"] - list of categories set by rating
-                 ["blocklist"] - []
-                 ["allowlist"] - []
-"""
-
-class BBS_Failure(APIException):
-    status_code=400
-    def __init__(self, why="broadbandshield error", fields={}):
-        APIException.__init__(self, {"error": "BBS_Failure",
-                            "specific_error": why,
-                            "fields": fields})
-
-
-class BBS:
-    level_map = {"PG_13": "PG13",
-                 "NONE": "OFF",
-                 "ALL": "NONE",
-                 None: "NONE"}
-
-    def __init__(self, username, password, bbs_hostname=None, bbs_port=None):
-        self.username = username
-        self.password = password
-
-        # XXX not tested on port 80
-        #self.bbs_hostname = "www.broadbandshield.com"
-        #self.bbs_port = 80
-
-        if not bbs_hostname:
-            bbs_hostname = "cordcompute01.onlab.us"
-        if not bbs_port:
-            bbs_port = 8018
-
-        self.bbs_hostname = bbs_hostname
-        self.bbs_port = int(bbs_port)
-
-        self.api = "http://%s:%d/api" % (self.bbs_hostname, self.bbs_port)
-        self.nic_update = "http://%s:%d/nic/update" % (self.bbs_hostname, self.bbs_port)
-
-        self.session = None
-        self.settings = None
-
-    def login(self):
-        self.session = requests.Session()
-        r = self.session.post(self.api + "/login", data = json.dumps({"email": self.username, "password": self.password}))
-        if (r.status_code != 200):
-            raise BBS_Failure("Failed to login (%d)" % r.status_code)
-
-    def get_account(self):
-        if not self.session:
-            self.login()
-
-        r = self.session.get(self.api + "/account")
-        if (r.status_code != 200):
-            raise BBS_Failure("Failed to get account settings (%d)" % r.status_code)
-        self.settings = r.json()
-
-        return self.settings
-
-    def post_account(self):
-        if not self.settings:
-             raise XOSProgrammingError("no settings to post")
-
-        r = self.session.post(self.api + "/account/settings", data= json.dumps(self.settings))
-        if (r.status_code != 200):
-            raise BBS_Failure("Failed to set account settings (%d)" % r.status_code)
-
-    def add_device(self, name, mac, type="tablet", username=""):
-        data = {"name": name, "mac_address": mac, "type": type, "username": username}
-        r = self.session.post(self.api + "/device", data = json.dumps(data))
-        if (r.status_code != 200):
-            raise BBS_Failure("Failed to add device (%d)" % r.status_code)
-
-    def delete_device(self, data):
-        r = self.session.delete(self.api + "/device", data = json.dumps(data))
-        if (r.status_code != 200):
-            raise BBS_Failure("Failed to delete device (%d)" % r.status_code)
-
-    def add_user(self, name, rating="NONE", categories=[]):
-        data = {"name": name, "settings": {"rating": rating, "categories": categories}}
-        r = self.session.post(self.api + "/users", data = json.dumps(data))
-        if (r.status_code != 200):
-            raise BBS_Failure("Failed to add user (%d)" % r.status_code)
-
-    def delete_user(self, data):
-        r = self.session.delete(self.api + "/users", data = json.dumps(data))
-        if (r.status_code != 200):
-            raise BBS_Failure("Failed to delete user (%d)" % r.status_code)
-
-    def clear_users_and_devices(self):
-        if not self.settings:
-            self.get_account()
-
-        for device in self.settings["devices"]:
-            self.delete_device(device)
-
-        for user in self.settings["users"]:
-            self.delete_user(user)
-
-    def get_whole_home_level(self):
-        if not self.settings:
-            self.get_account()
-
-        return self.settings["settings"]["rating"]
-
-    def sync(self, whole_home_level, users):
-        if not self.settings:
-            self.get_account()
-
-        vcpe_users = {}
-        for user in users:
-            user = user.copy()
-            user["level"] = self.level_map.get(user["level"], user["level"])
-            user["mac"] = user.get("mac", "")
-            vcpe_users[user["name"]] = user
-
-        whole_home_level = self.level_map.get(whole_home_level, whole_home_level)
-
-        if (whole_home_level != self.settings["settings"]["rating"]):
-            print "*** set whole_home", whole_home_level, "***"
-            self.settings["settings"]["rating"] = whole_home_level
-            self.post_account()
-
-        bbs_usernames = [bbs_user["name"] for bbs_user in self.settings["users"]]
-        bbs_devicenames = [bbs_device["name"] for bbs_device in self.settings["devices"]]
-
-        add_users = []
-        add_devices = []
-        delete_users = []
-        delete_devices = []
-
-        for bbs_user in self.settings["users"]:
-             bbs_username = bbs_user["name"]
-             if bbs_username in vcpe_users.keys():
-                 vcpe_user = vcpe_users[bbs_username]
-                 if bbs_user["settings"]["rating"] != vcpe_user["level"]:
-                     print "set user", vcpe_user["name"], "rating", vcpe_user["level"]
-                     #bbs_user["settings"]["rating"] = vcpe_user["level"]
-                     # add can be used as an update
-                     add_users.append(vcpe_user)
-             else:
-                 delete_users.append(bbs_user)
-
-        for bbs_device in self.settings["devices"]:
-             bbs_devicename = bbs_device["name"]
-             if bbs_devicename in vcpe_users.keys():
-                 vcpe_user = vcpe_users[bbs_devicename]
-                 if bbs_device["mac_address"] != vcpe_user["mac"]:
-                     print "set device", vcpe_user["name"], "mac", vcpe_user["mac"]
-                     #bbs_device["mac_address"] = vcpe_user["mac"]
-                     # add of a device can't be used as an update, as you'll end
-                     # up with two of them.
-                     delete_devices.append(bbs_device)
-                     add_devices.append(vcpe_user)
-             else:
-                 delete_devices.append(bbs_device)
-
-        for (username, user) in vcpe_users.iteritems():
-            if not username in bbs_usernames:
-                add_users.append(user)
-            if not username in bbs_devicenames:
-                add_devices.append(user)
-
-        for bbs_user in delete_users:
-            print "delete user", bbs_user["name"]
-            self.delete_user(bbs_user)
-
-        for bbs_device in delete_devices:
-            print "delete device", bbs_device["name"]
-            self.delete_device(bbs_device)
-
-        for vcpe_user in add_users:
-            print "add user", vcpe_user["name"], "level", vcpe_user["level"]
-            self.add_user(vcpe_user["name"], vcpe_user["level"])
-
-        for vcpe_user in add_devices:
-            print "add device", vcpe_user["name"], "mac", vcpe_user["mac"]
-            self.add_device(vcpe_user["name"], vcpe_user["mac"], "tablet", vcpe_user["name"])
-
-    def get_whole_home_rating(self):
-        return self.settings["settings"]["rating"]
-
-    def get_user(self, name):
-        for user in self.settings["users"]:
-            if user["name"]==name:
-                return user
-        return None
-
-    def get_device(self, name):
-        for device in self.settings["devices"]:
-             if device["name"]==name:
-                 return device
-        return None
-
-    def dump(self):
-        if not self.settings:
-            self.get_account()
-
-        print "whole_home_rating:", self.settings["settings"]["rating"]
-        print "users:"
-        for user in self.settings["users"]:
-            print "  user", user["name"], "rating", user["settings"]["rating"]
-
-        print "devices:"
-        for device in self.settings["devices"]:
-            print "  device", device["name"], "user", device["username"], "rating", device["settings"]["rating"], "mac", device["mac_address"]
-
-    def associate(self, ip):
-        bbs_hostname = "cordcompute01.onlab.us"
-        r = requests.get(self.nic_update, params={"hostname": "onlab.us"}, headers={"X-Forwarded-For": ip}, auth=requests.auth.HTTPBasicAuth(self.username,self.password))
-        if (r.status_code != 200):
-            raise BBS_Failure("Failed to associate account with ip (%d)" % r.status_code)
-
-def dump():
-    bbs = BBS(sys.argv[2], sys.argv[3])
-    bbs.dump()
-
-def associate():
-    if len(sys.argv)<5:
-        print "you need to specify IP address"
-        sys.exit(-1)
-
-    bbs = BBS(sys.argv[2], sys.argv[3])
-    bbs.associate(sys.argv[4])
-
-def self_test():
-    bbs = BBS(sys.argv[2], sys.argv[3])
-
-    print "*** initial ***"
-    bbs.dump()
-
-    open("bbs.json","w").write(json.dumps(bbs.settings))
-
-    # a new BBS account will throw a 500 error if it has no rating
-    bbs.settings["settings"]["rating"] = "R"
-    #bbs.settings["settings"]["category"] = [u'PORNOGRAPHY', u'ADULT', u'ILLEGAL', u'WEAPONS', u'DRUGS', u'GAMBLING', u'CYBERBULLY', u'ANONYMIZERS', u'SUICIDE', u'MALWARE']
-    #bbs.settings["settings"]["blocklist"] = []
-    #bbs.settings["settings"]["allowlist"] = []
-    #for water in bbs.settings["settings"]["watershed"];
-    #    water["categories"]=[]
-    # delete everything
-    bbs.post_account()
-    bbs.clear_users_and_devices()
-
-    print "*** cleared ***"
-    bbs.settings=None
-    bbs.dump()
-
-    users = [{"name": "Moms pc", "level": "R", "mac": "010203040506"},
-             {"name": "Dads pc", "level": "R", "mac": "010203040507"},
-             {"name": "Jacks ipad", "level": "PG", "mac": "010203040508"},
-             {"name": "Jills iphone", "level": "G", "mac": "010203040509"}]
-
-    print "*** syncing mom-R, Dad-R, jack-PG, Jill-G, wholehome-PG-13 ***"
-
-    bbs.settings = None
-    bbs.sync("PG-13", users)
-
-    print "*** after sync ***"
-    bbs.settings=None
-    bbs.dump()
-    assert(bbs.get_whole_home_rating() == "PG-13")
-    assert(bbs.get_user("Moms pc")["settings"]["rating"] == "R")
-    assert(bbs.get_user("Dads pc")["settings"]["rating"] == "R")
-    assert(bbs.get_user("Jacks ipad")["settings"]["rating"] == "PG")
-    assert(bbs.get_user("Jills iphone")["settings"]["rating"] == "G")
-    assert(bbs.get_device("Moms pc")["mac_address"] == "010203040506")
-    assert(bbs.get_device("Dads pc")["mac_address"] == "010203040507")
-    assert(bbs.get_device("Jacks ipad")["mac_address"] == "010203040508")
-    assert(bbs.get_device("Jills iphone")["mac_address"] == "010203040509")
-
-    print "*** update whole home level ***"
-    bbs.settings=None
-    bbs.get_account()
-    bbs.settings["settings"]["rating"] = "PG"
-    bbs.post_account()
-
-    print "*** after sync ***"
-    bbs.settings=None
-    bbs.dump()
-    assert(bbs.get_whole_home_rating() == "PG")
-    assert(bbs.get_user("Moms pc")["settings"]["rating"] == "R")
-    assert(bbs.get_user("Dads pc")["settings"]["rating"] == "R")
-    assert(bbs.get_user("Jacks ipad")["settings"]["rating"] == "PG")
-    assert(bbs.get_user("Jills iphone")["settings"]["rating"] == "G")
-    assert(bbs.get_device("Moms pc")["mac_address"] == "010203040506")
-    assert(bbs.get_device("Dads pc")["mac_address"] == "010203040507")
-    assert(bbs.get_device("Jacks ipad")["mac_address"] == "010203040508")
-    assert(bbs.get_device("Jills iphone")["mac_address"] == "010203040509")
-
-    print "*** delete dad, change moms IP, change jills level to PG, change whole home to PG-13 ***"
-    users = [{"name": "Moms pc", "level": "R", "mac": "010203040511"},
-             {"name": "Jacks ipad", "level": "PG", "mac": "010203040508"},
-             {"name": "Jills iphone", "level": "PG", "mac": "010203040509"}]
-
-    bbs.settings = None
-    bbs.sync("PG-13", users)
-
-    print "*** after sync ***"
-    bbs.settings=None
-    bbs.dump()
-    assert(bbs.get_whole_home_rating() == "PG-13")
-    assert(bbs.get_user("Moms pc")["settings"]["rating"] == "R")
-    assert(bbs.get_user("Dads pc") == None)
-    assert(bbs.get_user("Jacks ipad")["settings"]["rating"] == "PG")
-    assert(bbs.get_user("Jills iphone")["settings"]["rating"] == "PG")
-    assert(bbs.get_device("Moms pc")["mac_address"] == "010203040511")
-    assert(bbs.get_device("Dads pc") == None)
-    assert(bbs.get_device("Jacks ipad")["mac_address"] == "010203040508")
-
-    print "add dad's laptop"
-    users = [{"name": "Moms pc", "level": "R", "mac": "010203040511"},
-             {"name": "Dads laptop", "level": "PG-13", "mac": "010203040512"},
-             {"name": "Jacks ipad", "level": "PG", "mac": "010203040508"},
-             {"name": "Jills iphone", "level": "PG", "mac": "010203040509"}]
-
-    bbs.settings = None
-    bbs.sync("PG-13", users)
-
-    print "*** after sync ***"
-    bbs.settings=None
-    bbs.dump()
-    assert(bbs.get_whole_home_rating() == "PG-13")
-    assert(bbs.get_user("Moms pc")["settings"]["rating"] == "R")
-    assert(bbs.get_user("Dads pc") == None)
-    assert(bbs.get_user("Dads laptop")["settings"]["rating"] == "PG-13")
-    assert(bbs.get_user("Jacks ipad")["settings"]["rating"] == "PG")
-    assert(bbs.get_user("Jills iphone")["settings"]["rating"] == "PG")
-    assert(bbs.get_device("Moms pc")["mac_address"] == "010203040511")
-    assert(bbs.get_device("Dads pc") == None)
-    assert(bbs.get_device("Dads laptop")["mac_address"] == "010203040512")
-    assert(bbs.get_device("Jacks ipad")["mac_address"] == "010203040508")
-
-    #bbs.add_user("tom", "G", [u'PORNOGRAPHY', u'ADULT', u'ILLEGAL', u'WEAPONS', u'DRUGS', u'GAMBLING', u'SOCIAL', u'CYBERBULLY', u'GAMES', u'ANONYMIZERS', u'SUICIDE', u'MALWARE'])
-    #bbs.add_device(name="tom's iphone", mac="010203040506", type="tablet", username="tom")
-
-def main():
-    if len(sys.argv)<4:
-        print "syntax: broadbandshield.py <operation> <email> <password>"
-        print "        operation = [dump | selftest | assocate"
-        sys.exit(-1)
-
-    operation = sys.argv[1]
-
-    if operation=="dump":
-        dump()
-    elif operation=="selftest":
-        self_test()
-    elif operation=="associate":
-        associate()
-
-if __name__ == "__main__":
-    main()
-
-
diff --git a/xos/observers/vcpe/files/docker.list b/xos/observers/vcpe/files/docker.list
deleted file mode 100644
index 0ee9ae0..0000000
--- a/xos/observers/vcpe/files/docker.list
+++ /dev/null
@@ -1 +0,0 @@
-deb https://get.docker.com/ubuntu docker main
diff --git a/xos/observers/vcpe/files/etc/rc.local b/xos/observers/vcpe/files/etc/rc.local
deleted file mode 100755
index 24c8f8d..0000000
--- a/xos/observers/vcpe/files/etc/rc.local
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/sh -e
-#
-# rc.local
-#
-# This script is executed at the end of each multiuser runlevel.
-# Make sure that the script will "exit 0" on success or any other
-# value on error.
-#
-# In order to enable or disable this script just change the execution
-# bits.
-#
-# By default this script does nothing.
-
-ufw enable
-ufw allow bootps
-ufw allow from 192.168.0.0/24
-ufw route allow in on eth1 out on eth0
-ufw route allow in on eth1 out on eth2
-
-# service dnsmasq start
-
-exit 0
diff --git a/xos/observers/vcpe/files/etc/ufw/after.init b/xos/observers/vcpe/files/etc/ufw/after.init
deleted file mode 100644
index e89217d..0000000
--- a/xos/observers/vcpe/files/etc/ufw/after.init
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/sh
-#
-# after.init: if executable, called by ufw-init. See 'man ufw-framework' for
-#             details. Note that output from these scripts is not seen via the
-#             the ufw command, but instead via ufw-init.
-#
-# Copyright 2013 Canonical Ltd.
-#
-#    This program is free software: you can redistribute it and/or modify
-#    it under the terms of the GNU General Public License version 3,
-#    as published by the Free Software Foundation.
-#
-#    This program is distributed in the hope that it will be useful,
-#    but WITHOUT ANY WARRANTY; without even the implied warranty of
-#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#    GNU General Public License for more details.
-#
-#    You should have received a copy of the GNU General Public License
-#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
-#
-set -e
-
-case "$1" in
-start)
-    # typically required
-    ;;
-stop)
-    # typically required
-    ;;
-status)
-    # optional
-    ;;
-flush-all)
-    # optional
-    ;;
-*)
-    echo "'$1' not supported"
-    echo "Usage: after.init {start|stop|flush-all|status}"
-    ;;
-esac
diff --git a/xos/observers/vcpe/files/etc/ufw/after.rules b/xos/observers/vcpe/files/etc/ufw/after.rules
deleted file mode 100644
index 0d6c646..0000000
--- a/xos/observers/vcpe/files/etc/ufw/after.rules
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-# rules.input-after
-#
-# Rules that should be run after the ufw command line added rules. Custom
-# rules should be added to one of these chains:
-#   ufw-after-input
-#   ufw-after-output
-#   ufw-after-forward
-#
-
-# Don't delete these required lines, otherwise there will be errors
-*filter
-:ufw-after-input - [0:0]
-:ufw-after-output - [0:0]
-:ufw-after-forward - [0:0]
-# End required lines
-
-# don't log noisy services by default
--A ufw-after-input -p udp --dport 137 -j ufw-skip-to-policy-input
--A ufw-after-input -p udp --dport 138 -j ufw-skip-to-policy-input
--A ufw-after-input -p tcp --dport 139 -j ufw-skip-to-policy-input
--A ufw-after-input -p tcp --dport 445 -j ufw-skip-to-policy-input
--A ufw-after-input -p udp --dport 67 -j ufw-skip-to-policy-input
--A ufw-after-input -p udp --dport 68 -j ufw-skip-to-policy-input
-
-# don't log noisy broadcast
--A ufw-after-input -m addrtype --dst-type BROADCAST -j ufw-skip-to-policy-input
-
-# don't delete the 'COMMIT' line or these rules won't be processed
-COMMIT
diff --git a/xos/observers/vcpe/files/etc/ufw/after6.rules b/xos/observers/vcpe/files/etc/ufw/after6.rules
deleted file mode 100644
index 0d99672..0000000
--- a/xos/observers/vcpe/files/etc/ufw/after6.rules
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-# rules.input-after
-#
-# Rules that should be run after the ufw command line added rules. Custom
-# rules should be added to one of these chains:
-#   ufw6-after-input
-#   ufw6-after-output
-#   ufw6-after-forward
-#
-
-# Don't delete these required lines, otherwise there will be errors
-*filter
-:ufw6-after-input - [0:0]
-:ufw6-after-output - [0:0]
-:ufw6-after-forward - [0:0]
-# End required lines
-
-# don't log noisy services by default
--A ufw6-after-input -p udp --dport 137 -j ufw6-skip-to-policy-input
--A ufw6-after-input -p udp --dport 138 -j ufw6-skip-to-policy-input
--A ufw6-after-input -p tcp --dport 139 -j ufw6-skip-to-policy-input
--A ufw6-after-input -p tcp --dport 445 -j ufw6-skip-to-policy-input
--A ufw6-after-input -p udp --dport 546 -j ufw6-skip-to-policy-input
--A ufw6-after-input -p udp --dport 547 -j ufw6-skip-to-policy-input
-
-# don't delete the 'COMMIT' line or these rules won't be processed
-COMMIT
diff --git a/xos/observers/vcpe/files/etc/ufw/applications.d/openssh-server b/xos/observers/vcpe/files/etc/ufw/applications.d/openssh-server
deleted file mode 100644
index 9bbe906..0000000
--- a/xos/observers/vcpe/files/etc/ufw/applications.d/openssh-server
+++ /dev/null
@@ -1,4 +0,0 @@
-[OpenSSH]
-title=Secure shell server, an rshd replacement
-description=OpenSSH is a free implementation of the Secure Shell protocol.
-ports=22/tcp
diff --git a/xos/observers/vcpe/files/etc/ufw/before.init b/xos/observers/vcpe/files/etc/ufw/before.init
deleted file mode 100644
index 1348cb1..0000000
--- a/xos/observers/vcpe/files/etc/ufw/before.init
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/sh
-#
-# before.init: if executable, called by ufw-init. See 'man ufw-framework' for
-#              details. Note that output from these scripts is not seen via the
-#              the ufw command, but instead via ufw-init.
-#
-# Copyright 2013 Canonical Ltd.
-#
-#    This program is free software: you can redistribute it and/or modify
-#    it under the terms of the GNU General Public License version 3,
-#    as published by the Free Software Foundation.
-#
-#    This program is distributed in the hope that it will be useful,
-#    but WITHOUT ANY WARRANTY; without even the implied warranty of
-#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#    GNU General Public License for more details.
-#
-#    You should have received a copy of the GNU General Public License
-#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
-#
-set -e
-
-case "$1" in
-start)
-    # typically required
-    ;;
-stop)
-    # typically required
-    ;;
-status)
-    # optional
-    ;;
-flush-all)
-    # optional
-    ;;
-*)
-    echo "'$1' not supported"
-    echo "Usage: before.init {start|stop|flush-all|status}"
-    ;;
-esac
diff --git a/xos/observers/vcpe/files/etc/ufw/before6.rules b/xos/observers/vcpe/files/etc/ufw/before6.rules
deleted file mode 100644
index 0b26ed8..0000000
--- a/xos/observers/vcpe/files/etc/ufw/before6.rules
+++ /dev/null
@@ -1,73 +0,0 @@
-#
-# rules.before
-#
-# Rules that should be run before the ufw command line added rules. Custom
-# rules should be added to one of these chains:
-#   ufw6-before-input
-#   ufw6-before-output
-#   ufw6-before-forward
-#
-
-# Don't delete these required lines, otherwise there will be errors
-*filter
-:ufw6-before-input - [0:0]
-:ufw6-before-output - [0:0]
-:ufw6-before-forward - [0:0]
-# End required lines
-
-
-# allow all on loopback
--A ufw6-before-input -i lo -j ACCEPT
--A ufw6-before-output -o lo -j ACCEPT
-
-# drop packets with RH0 headers
--A ufw6-before-input -m rt --rt-type 0 -j DROP
--A ufw6-before-forward -m rt --rt-type 0 -j DROP
--A ufw6-before-output -m rt --rt-type 0 -j DROP
-
-# for stateless autoconfiguration (restrict NDP messages to hop limit of 255)
--A ufw6-before-input -p icmpv6 --icmpv6-type neighbor-solicitation -m hl --hl-eq 255 -j ACCEPT
--A ufw6-before-output -p icmpv6 --icmpv6-type neighbor-solicitation -m hl --hl-eq 255 -j ACCEPT
--A ufw6-before-input -p icmpv6 --icmpv6-type neighbor-advertisement -m hl --hl-eq 255 -j ACCEPT
--A ufw6-before-output -p icmpv6 --icmpv6-type neighbor-advertisement -m hl --hl-eq 255 -j ACCEPT
--A ufw6-before-input -p icmpv6 --icmpv6-type router-solicitation -m hl --hl-eq 255 -j ACCEPT
--A ufw6-before-input -p icmpv6 --icmpv6-type router-advertisement -m hl --hl-eq 255 -j ACCEPT
-
-# quickly process packets for which we already have a connection
--A ufw6-before-input -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
--A ufw6-before-output -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
--A ufw6-before-forward -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-
-# for multicast ping replies from link-local addresses (these don't have an
-# associated connection and would otherwise be marked INVALID)
--A ufw6-before-input -p icmpv6 --icmpv6-type echo-reply -s fe80::/10 -j ACCEPT
-
-# drop INVALID packets (logs these in loglevel medium and higher)
--A ufw6-before-input -m conntrack --ctstate INVALID -j ufw6-logging-deny
--A ufw6-before-input -m conntrack --ctstate INVALID -j DROP
-
-# ok icmp codes for INPUT
--A ufw6-before-input -p icmpv6 --icmpv6-type destination-unreachable -j ACCEPT
--A ufw6-before-input -p icmpv6 --icmpv6-type packet-too-big -j ACCEPT
--A ufw6-before-input -p icmpv6 --icmpv6-type time-exceeded -j ACCEPT
--A ufw6-before-input -p icmpv6 --icmpv6-type parameter-problem -j ACCEPT
--A ufw6-before-input -p icmpv6 --icmpv6-type echo-request -j ACCEPT
-
-# ok icmp code for FORWARD
--A ufw6-before-forward -p icmpv6 --icmpv6-type destination-unreachable -j ACCEPT
--A ufw6-before-forward -p icmpv6 --icmpv6-type packet-too-big -j ACCEPT
--A ufw6-before-forward -p icmpv6 --icmpv6-type time-exceeded -j ACCEPT
--A ufw6-before-forward -p icmpv6 --icmpv6-type parameter-problem -j ACCEPT
--A ufw6-before-forward -p icmpv6 --icmpv6-type echo-request -j ACCEPT
-
-# allow dhcp client to work
--A ufw6-before-input -p udp -s fe80::/10 --sport 547 -d fe80::/10 --dport 546 -j ACCEPT
-
-# allow MULTICAST mDNS for service discovery
--A ufw6-before-input -p udp -d ff02::fb --dport 5353 -j ACCEPT
-
-# allow MULTICAST UPnP for service discovery
--A ufw6-before-input -p udp -d ff02::f --dport 1900 -j ACCEPT
-
-# don't delete the 'COMMIT' line or these rules won't be processed
-COMMIT
diff --git a/xos/observers/vcpe/files/etc/ufw/sysctl.conf b/xos/observers/vcpe/files/etc/ufw/sysctl.conf
deleted file mode 100644
index 8707032..0000000
--- a/xos/observers/vcpe/files/etc/ufw/sysctl.conf
+++ /dev/null
@@ -1,57 +0,0 @@
-#
-# Configuration file for setting network variables. Please note these settings
-# override /etc/sysctl.conf. If you prefer to use /etc/sysctl.conf, please
-# adjust IPT_SYSCTL in /etc/default/ufw.
-#
-
-# Uncomment this to allow this host to route packets between interfaces
-#net/ipv4/ip_forward=1
-#net/ipv6/conf/default/forwarding=1
-#net/ipv6/conf/all/forwarding=1
-
-# Turn on Source Address Verification in all interfaces to prevent some
-# spoofing attacks
-net/ipv4/conf/default/rp_filter=1
-net/ipv4/conf/all/rp_filter=1
-
-# Do not accept IP source route packets (we are not a router)
-net/ipv4/conf/default/accept_source_route=0
-net/ipv4/conf/all/accept_source_route=0
-net/ipv6/conf/default/accept_source_route=0
-net/ipv6/conf/all/accept_source_route=0
-
-# Disable ICMP redirects. ICMP redirects are rarely used but can be used in
-# MITM (man-in-the-middle) attacks. Disabling ICMP may disrupt legitimate
-# traffic to those sites.
-net/ipv4/conf/default/accept_redirects=0
-net/ipv4/conf/all/accept_redirects=0
-net/ipv6/conf/default/accept_redirects=0
-net/ipv6/conf/all/accept_redirects=0
-
-# Ignore bogus ICMP errors
-net/ipv4/icmp_echo_ignore_broadcasts=1
-net/ipv4/icmp_ignore_bogus_error_responses=1
-net/ipv4/icmp_echo_ignore_all=0
-
-# Don't log Martian Packets (impossible packets)
-net/ipv4/conf/default/log_martians=0
-net/ipv4/conf/all/log_martians=0
-
-# Change to '1' to enable TCP/IP SYN cookies This disables TCP Window Scaling
-# (http://lkml.org/lkml/2008/2/5/167)
-net/ipv4/tcp_syncookies=0
-
-#net/ipv4/tcp_fin_timeout=30
-#net/ipv4/tcp_keepalive_intvl=1800
-
-# normally allowing tcp_sack is ok, but if going through OpenBSD 3.8 RELEASE or
-# earlier pf firewall, should set this to 0
-net/ipv4/tcp_sack=1
-
-# Uncomment this to turn off ipv6 autoconfiguration
-#net/ipv6/conf/default/autoconf=0
-#net/ipv6/conf/all/autoconf=0
-
-# Uncomment this to enable ipv6 privacy addressing
-#net/ipv6/conf/default/use_tempaddr=2
-#net/ipv6/conf/all/use_tempaddr=2
diff --git a/xos/observers/vcpe/files/etc/ufw/ufw.conf b/xos/observers/vcpe/files/etc/ufw/ufw.conf
deleted file mode 100644
index 28fe534..0000000
--- a/xos/observers/vcpe/files/etc/ufw/ufw.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-# /etc/ufw/ufw.conf
-#
-
-# Set to yes to start on boot. If setting this remotely, be sure to add a rule
-# to allow your remote connection before starting ufw. Eg: 'ufw allow 22/tcp'
-ENABLED=yes
-
-# Please use the 'ufw' command to set the loglevel. Eg: 'ufw logging medium'.
-# See 'man ufw' for details.
-LOGLEVEL=low
diff --git a/xos/observers/vcpe/files/vcpe.conf b/xos/observers/vcpe/files/vcpe.conf
deleted file mode 100644
index 752c57a..0000000
--- a/xos/observers/vcpe/files/vcpe.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-# Upstart script for vCPE 
-description "vCPE container"
-author "andy@onlab.us"
-start on filesystem and started docker
-stop on runlevel [!2345]
-respawn
-
-script
-  /usr/local/sbin/start-vcpe.sh
-end script
diff --git a/xos/observers/vcpe/files/vcpe.dnsmasq b/xos/observers/vcpe/files/vcpe.dnsmasq
deleted file mode 100644
index 2b2687b..0000000
--- a/xos/observers/vcpe/files/vcpe.dnsmasq
+++ /dev/null
@@ -1,2 +0,0 @@
-listen-address=192.168.0.1
-dhcp-range=192.168.0.2,192.168.0.254,6
diff --git a/xos/observers/vcpe/files/vm-resolv.conf b/xos/observers/vcpe/files/vm-resolv.conf
deleted file mode 100644
index cae093a..0000000
--- a/xos/observers/vcpe/files/vm-resolv.conf
+++ /dev/null
@@ -1 +0,0 @@
-nameserver 8.8.8.8
diff --git a/xos/observers/vcpe/model-deps b/xos/observers/vcpe/model-deps
deleted file mode 100644
index 0967ef4..0000000
--- a/xos/observers/vcpe/model-deps
+++ /dev/null
@@ -1 +0,0 @@
-{}
diff --git a/xos/observers/vcpe/observer_ansible_test.py b/xos/observers/vcpe/observer_ansible_test.py
deleted file mode 100644
index b28da63..0000000
--- a/xos/observers/vcpe/observer_ansible_test.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env python
-import os
-import sys
-
-sys.path.append("../..")
-import synchronizers.base.ansible
-
-print sys.argv
-
-private_key="""-----BEGIN RSA PRIVATE KEY-----
-MIIEpQIBAAKCAQEAtJiuarud5S4Y2quDeWyaS0UCQGQtfuSzzNhplFwujYnJGL65
-e14REtv+UuHGymyr/SfkTrBd8vH5NI2UZ/4sZW13ieI/1d97OeVe2+ct0Y4BaFEI
-3Hja6DIpsY3Q2cBQsWUwcQzbMIF9jIq8SzwR1zk8UtZi09fNxqjCchRPlBvbiLKX
-g0/yirN237WbaKzK++8EPy3nuv83216MXHFFSjuxfICe/RhjaqMzVp7isSbv1suU
-geyvNNzU71c/K13gTggdcIXeRQBiJYio2Sn3h2nsV6AFqFH4fjERxWG55Q4e3jeE
-tWM/Dw+hqYKg/25UcmM862a6sUmuDCmM5S3VEQIDAQABAoIBACH88iZoNOki6c6N
-pOq/Q7KSxD+2GtHc3PHacNRZHPHKUqxziJjtNS1lddHHaBBEr4GvdkpQ6v2ECLCZ
-TKrdrSFRnsO2bukjbB+TSWz9byQXI7CsP4yuuhQlDK+7zuiMRyN7tcgw8TeJx0Uh
-/xnxrjHhNbcpXeQcoz+WFzI9HFT1MEGmMS4Lyp/zLB/pmfY9h7V9d+EeRZDi78jq
-Vir6MI6iCTa0T02dvHUFOg+wXLb0nb8V1xKDL+6cAJla7LzwoG8lTnvp5DSYCojI
-5JrILYafeO8RbBV2GWmaE5mkHgeBkFZ+qZQ7K0MjR30Yh6tajB7P3+F/Max8FUgW
-xLHr8AECgYEA2+o0ge3HtZcepEFBKKYnLTwoEpPCfLElWZHzUJYDz259s4JLsfak
-tROANFEdsJUjpmWG52MCL+bgKFFOedDkt4p1jgcIneaHk0jvoU11wG7W3jZZVy1q
-WjQNH5vDU+hg5tm/CREwm7lbUxR9Xuj9K63CNAAGp8KO7h2tOH8woIECgYEA0jrb
-LUg30RxO3+vrq9dUYohrDRisk5zKXuRLfxRA+E+ruvZ7CctG2OpM+658/qZM/w95
-7pOj6zz3//w7tAvH9erY+JOISnzaYKx04sYC1MfbFiFkq5j0gpuYm/MULDYNvFqr
-NU2Buj4dW+ZB+SeficsQOqm5QeNxh1kgiDCs7JECgYEAjSLGCAzeesA9vhTTCI95
-3SIaZbHGw9e8rLtqeHGOiHXU3nvksJYmJsAZK3pTn5xXgNbvuVhlcvCtM7LatntG
-DjUiNMB22z+0CuZoRBE+XP3FkF84/yX6d2Goenyw4wzkA8QDQoJxu789yRgBTgQh
-VwLw/AZ4PvoyWMdbAENApgECgYEAvFikosYP09XTyIPaKaOKY5iqqBoSC1GucSOB
-jAG+T3k5dxB6nQS0nYQUomvqak7drqnT6O33Lrr5ySrW5nCjnmvgJZwvv+Rp1bDM
-K5uRT8caPpJ+Wcp4TUdPi3BVA2MOHVDyEJg3AH/D1+DL/IgGQ/JcwOHsKt61iLhO
-EBXj5zECgYEAk+HuwksUPkSxg/AiJGbapGDK6XGymEUzo2duWlnofRqGcZ3NT3bB
-/kDI1KxQdlpODXSi4/BuTpbQiFOrzcEq5e5ytoMxlCHh3Fl3Jxl+JlgO21vAUvP6
-4SET7Q/6LxmfBlCVRg0dXDwcfJLgbnWxyvprIcz4e0FSFVZTBs/6tFk=
------END RSA PRIVATE KEY-----
-"""
-
-observer.ansible.run_template_ssh("test.yaml",
-                                  {"instance_name": "onlab_test405-378",
-                                   "instance_id": "instance-0000004d",
-                                   "hostname": "node67.washington.vicci.org",
-                                   "private_key": private_key})
-
diff --git a/xos/observers/vcpe/run.sh b/xos/observers/vcpe/run.sh
deleted file mode 100755
index f180e66..0000000
--- a/xos/observers/vcpe/run.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#if [[ ! -e ./vcpe-observer.py ]]; then
-#    ln -s ../../xos-observer.py vcpe-observer.py
-#fi
-
-export XOS_DIR=/opt/xos
-python vcpe-observer.py  -C $XOS_DIR/observers/vcpe/vcpe_observer_config
diff --git a/xos/observers/vcpe/start-bbs.sh b/xos/observers/vcpe/start-bbs.sh
deleted file mode 100755
index c8ee147..0000000
--- a/xos/observers/vcpe/start-bbs.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#! /bin/bash
-
-# put this in /opt/xerocole/start-bbs.sh
-# make sure it's executable
-# set it up in crontab
-#   @reboot /opt/xerocole/start-bbs.sh
-
-ulimit -n 200000
-cd /opt/xerocole/answerx
-/opt/xerocole/answerx/startStop checkconfig answerx
-/opt/xerocole/answerx/startStop start answerx
-cd /opt/xerocole/namecontrols
-nohup /opt/xerocole/namecontrols/broadbandshield &
-nohup socat TCP-LISTEN:80,bind=0.0.0.0,fork TCP4:127.0.0.1:8018 &  
diff --git a/xos/observers/vcpe/start.sh b/xos/observers/vcpe/start.sh
deleted file mode 100755
index b402e5d..0000000
--- a/xos/observers/vcpe/start.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#if [[ ! -e ./vcpe-observer.py ]]; then
-#    ln -s ../../xos-observer.py vcpe-observer.py
-#fi
-
-export XOS_DIR=/opt/xos
-nohup python vcpe-observer.py  -C $XOS_DIR/observers/vcpe/vcpe_observer_config > /dev/null 2>&1 &
diff --git a/xos/observers/vcpe/steps/ansible_test/README b/xos/observers/vcpe/steps/ansible_test/README
deleted file mode 100644
index d3b2c54..0000000
--- a/xos/observers/vcpe/steps/ansible_test/README
+++ /dev/null
@@ -1,4 +0,0 @@
-Some scripts used while testing the Ansible instance configuraiton observer
-
-xos.py was probably the prototype of an XOS SSH module for Ansible, that understood how to SSH into the instances
-without needing to play config file and environment tricks. 
diff --git a/xos/observers/vcpe/steps/ansible_test/inventory.txt b/xos/observers/vcpe/steps/ansible_test/inventory.txt
deleted file mode 100644
index bd5b542..0000000
--- a/xos/observers/vcpe/steps/ansible_test/inventory.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-[onlab_hpc-355]
-node67.washington.vicci.org instance_id=instance-00000045 instance_name=onlab_hpc-355
-
-[onlab_test405-372]
-node67.washington.vicci.org instance_id=instance-0000004c instance_name=onlab_test405-372
-
-[onlab_test405-376]
-node1.cs.arizona.edu
-
-[onlab_test405-378]
-node67.washington.vicci.org ansible_ssh_private_key_file=/home/smbaker/.ssh/id_rsa
-#/home/smbaker/projects/vicci/keys/test_service_key_rsa
-
-[mysite_test2-48]
-cordcompute02.onlab.us ansible_ssh_private_key_file=/home/smbaker/projects/vicci/keys/demo_admin.rsa
-
diff --git a/xos/observers/vcpe/steps/ansible_test/test.sh b/xos/observers/vcpe/steps/ansible_test/test.sh
deleted file mode 100755
index 157ba9c..0000000
--- a/xos/observers/vcpe/steps/ansible_test/test.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#! /bin/bash
-ansible-playbook --private-key /home/smbaker/.ssh/id_rsa -i ./inventory.txt test.yaml
diff --git a/xos/observers/vcpe/steps/ansible_test/test.yaml b/xos/observers/vcpe/steps/ansible_test/test.yaml
deleted file mode 100644
index 6a29d56..0000000
--- a/xos/observers/vcpe/steps/ansible_test/test.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- hosts: onlab_test405-372
-  connection: xos
-  user: ubuntu
-  vars:
-     foo: 25
-#  instance_name: instance-00000045
-#  slice_name: onlab_hpc-355
-
-  tasks:
-    - name: foobar
-      shell: echo foo > /tmp/foobar
diff --git a/xos/observers/vcpe/steps/ansible_test/xos.py b/xos/observers/vcpe/steps/ansible_test/xos.py
deleted file mode 100755
index eb4f3eb..0000000
--- a/xos/observers/vcpe/steps/ansible_test/xos.py
+++ /dev/null
@@ -1,444 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.
-#
-
-import os
-import re
-import subprocess
-import shlex
-import pipes
-import random
-import select
-import fcntl
-import hmac
-import pwd
-import gettext
-import pty
-from hashlib import sha1
-import ansible.constants as C
-from ansible.callbacks import vvv
-from ansible import errors
-from ansible import utils
-
-class Connection(object):
-    ''' ssh based connections '''
-
-    def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
-        self.runner = runner
-        self.host = host
-        self.ipv6 = ':' in self.host
-        self.port = port
-        self.user = str(user)
-        self.password = password
-        self.private_key_file = private_key_file
-        self.HASHED_KEY_MAGIC = "|1|"
-        self.has_pipelining = True
-        #self.instance_id = "instance-00000045" # C.get_config(C.p, "xos", "instance_id", "INSTANCE_ID", None)
-        #self.instance_name = "onlab_hpc-355" # C.get_config(C.p, "xos", "instance_name", "SLIVER_NAME", None)
-
-        inject={}
-        inject= utils.combine_vars(inject, self.runner.inventory.get_variables(self.host))
-
-        self.instance_id = inject["instance_id"]
-        self.instance_name = inject["instance_name"]
-
-        fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
-        self.cp_dir = utils.prepare_writeable_dir('$HOME/.ansible/cp',mode=0700)
-        fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
-
-    def connect(self):
-        ''' connect to the remote host '''
-
-        vvv("ESTABLISH CONNECTION FOR USER: %s" % self.user, host=self.host)
-
-        self.common_args = []
-        extra_args = C.ANSIBLE_SSH_ARGS
-        if extra_args is not None:
-            # make sure there is no empty string added as this can produce weird errors
-            self.common_args += [x.strip() for x in shlex.split(extra_args) if x.strip()]
-        else:
-            self.common_args += ["-o", "ControlMaster=auto",
-                                 "-o", "ControlPersist=60s",
-                                 "-o", "ControlPath=%s" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))]
-
-        self.common_args += ["-o", "ProxyCommand ssh -q -i %s %s@%s" % (self.private_key_file, self.instance_id, self.host)]
-
-        cp_in_use = False
-        cp_path_set = False
-        for arg in self.common_args:
-            if "ControlPersist" in arg:
-                cp_in_use = True
-            if "ControlPath" in arg:
-                cp_path_set = True
-
-        if cp_in_use and not cp_path_set:
-            self.common_args += ["-o", "ControlPath=%s" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))]
-
-        if not C.HOST_KEY_CHECKING:
-            self.common_args += ["-o", "StrictHostKeyChecking=no"]
-
-        if self.port is not None:
-            self.common_args += ["-o", "Port=%d" % (self.port)]
-        if self.private_key_file is not None:
-            self.common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.private_key_file)]
-        elif self.runner.private_key_file is not None:
-            self.common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.runner.private_key_file)]
-        if self.password:
-            self.common_args += ["-o", "GSSAPIAuthentication=no",
-                                 "-o", "PubkeyAuthentication=no"]
-        else:
-            self.common_args += ["-o", "KbdInteractiveAuthentication=no",
-                                 "-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
-                                 "-o", "PasswordAuthentication=no"]
-        if self.user != pwd.getpwuid(os.geteuid())[0]:
-            self.common_args += ["-o", "User="+self.user]
-        self.common_args += ["-o", "ConnectTimeout=%d" % self.runner.timeout]
-
-        return self
-
-    def _run(self, cmd, indata):
-        if indata:
-            # do not use pseudo-pty
-            p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
-                                     stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-            stdin = p.stdin
-        else:
-            # try to use upseudo-pty
-            try:
-                # Make sure stdin is a proper (pseudo) pty to avoid: tcgetattr errors
-                master, slave = pty.openpty()
-                p = subprocess.Popen(cmd, stdin=slave,
-                                     stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-                stdin = os.fdopen(master, 'w', 0)
-                os.close(slave)
-            except:
-                p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
-                                     stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-                stdin = p.stdin
-
-        return (p, stdin)
-
-    def _password_cmd(self):
-        if self.password:
-            try:
-                p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE,
-                    stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-                p.communicate()
-            except OSError:
-                raise errors.AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program")
-            (self.rfd, self.wfd) = os.pipe()
-            return ["sshpass", "-d%d" % self.rfd]
-        return []
-
-    def _send_password(self):
-        if self.password:
-            os.close(self.rfd)
-            os.write(self.wfd, "%s\n" % self.password)
-            os.close(self.wfd)
-
-    def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None):
-        fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
-        fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
-        # We can't use p.communicate here because the ControlMaster may have stdout open as well
-        stdout = ''
-        stderr = ''
-        rpipes = [p.stdout, p.stderr]
-        if indata:
-            try:
-                stdin.write(indata)
-                stdin.close()
-            except:
-                raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
-        # Read stdout/stderr from process
-        while True:
-            rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
-
-            # fail early if the sudo/su password is wrong
-            if self.runner.sudo and sudoable:
-                if self.runner.sudo_pass:
-                    incorrect_password = gettext.dgettext(
-                        "sudo", "Sorry, try again.")
-                    if stdout.endswith("%s\r\n%s" % (incorrect_password,
-                                                     prompt)):
-                        raise errors.AnsibleError('Incorrect sudo password')
-
-                if stdout.endswith(prompt):
-                    raise errors.AnsibleError('Missing sudo password')
-
-            if self.runner.su and su and self.runner.su_pass:
-                incorrect_password = gettext.dgettext(
-                    "su", "Sorry")
-                if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
-                    raise errors.AnsibleError('Incorrect su password')
-
-            if p.stdout in rfd:
-                dat = os.read(p.stdout.fileno(), 9000)
-                stdout += dat
-                if dat == '':
-                    rpipes.remove(p.stdout)
-            if p.stderr in rfd:
-                dat = os.read(p.stderr.fileno(), 9000)
-                stderr += dat
-                if dat == '':
-                    rpipes.remove(p.stderr)
-            # only break out if no pipes are left to read or
-            # the pipes are completely read and
-            # the process is terminated
-            if (not rpipes or not rfd) and p.poll() is not None:
-                break
-            # No pipes are left to read but process is not yet terminated
-            # Only then it is safe to wait for the process to be finished
-            # NOTE: Actually p.poll() is always None here if rpipes is empty
-            elif not rpipes and p.poll() == None:
-                p.wait()
-                # The process is terminated. Since no pipes to read from are
-                # left, there is no need to call select() again.
-                break
-        # close stdin after process is terminated and stdout/stderr are read
-        # completely (see also issue #848)
-        stdin.close()
-        return (p.returncode, stdout, stderr)
-
-    def not_in_host_file(self, host):
-        if 'USER' in os.environ:
-            user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
-        else:
-            user_host_file = "~/.ssh/known_hosts"
-        user_host_file = os.path.expanduser(user_host_file)
-        
-        host_file_list = []
-        host_file_list.append(user_host_file)
-        host_file_list.append("/etc/ssh/ssh_known_hosts")
-        host_file_list.append("/etc/ssh/ssh_known_hosts2")
-        
-        hfiles_not_found = 0
-        for hf in host_file_list:
-            if not os.path.exists(hf):
-                hfiles_not_found += 1
-                continue
-            try:
-                host_fh = open(hf)
-            except IOError, e:
-                hfiles_not_found += 1
-                continue
-            else:
-                data = host_fh.read()
-                host_fh.close()
-                
-            for line in data.split("\n"):
-                if line is None or " " not in line:
-                    continue
-                tokens = line.split()
-                if tokens[0].find(self.HASHED_KEY_MAGIC) == 0:
-                    # this is a hashed known host entry
-                    try:
-                        (kn_salt,kn_host) = tokens[0][len(self.HASHED_KEY_MAGIC):].split("|",2)
-                        hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
-                        hash.update(host)
-                        if hash.digest() == kn_host.decode('base64'):
-                            return False
-                    except:
-                        # invalid hashed host key, skip it
-                        continue
-                else:
-                    # standard host file entry
-                    if host in tokens[0]:
-                        return False
-
-        if (hfiles_not_found == len(host_file_list)):
-            vvv("EXEC previous known host file not found for %s" % host)
-        return True
-
-    def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su_user=None, su=False):
-        ''' run a command on the remote host '''
-
-        ssh_cmd = self._password_cmd()
-        ssh_cmd += ["ssh", "-C"]
-        if not in_data:
-            # we can only use tty when we are not pipelining the modules. piping data into /usr/bin/python
-            # inside a tty automatically invokes the python interactive-mode but the modules are not
-            # compatible with the interactive-mode ("unexpected indent" mainly because of empty lines)
-            ssh_cmd += ["-tt"]
-        if utils.VERBOSITY > 3:
-            ssh_cmd += ["-vvv"]
-        else:
-            ssh_cmd += ["-q"]
-        ssh_cmd += self.common_args
-
-        if self.ipv6:
-            ssh_cmd += ['-6']
-        #ssh_cmd += [self.host]
-        ssh_cmd += [self.instance_name]
-
-        if su and su_user:
-            sudocmd, prompt, success_key = utils.make_su_cmd(su_user, executable, cmd)
-            prompt_re = re.compile(prompt)
-            ssh_cmd.append(sudocmd)
-        elif not self.runner.sudo or not sudoable:
-            prompt = None
-            if executable:
-                ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd))
-            else:
-                ssh_cmd.append(cmd)
-        else:
-            sudocmd, prompt, success_key = utils.make_sudo_cmd(sudo_user, executable, cmd)
-            ssh_cmd.append(sudocmd)
-
-        vvv("EXEC %s" % ssh_cmd, host=self.host)
-
-        not_in_host_file = self.not_in_host_file(self.host)
-
-        if C.HOST_KEY_CHECKING and not_in_host_file:
-            # lock around the initial SSH connectivity so the user prompt about whether to add 
-            # the host to known hosts is not intermingled with multiprocess output.
-            fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
-            fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
-
-        # create process
-        (p, stdin) = self._run(ssh_cmd, in_data)
-
-        self._send_password()
-
-        if (self.runner.sudo and sudoable and self.runner.sudo_pass) or \
-                (self.runner.su and su and self.runner.su_pass):
-            # several cases are handled for sudo privileges with password
-            # * NOPASSWD (tty & no-tty): detect success_key on stdout
-            # * without NOPASSWD:
-            #   * detect prompt on stdout (tty)
-            #   * detect prompt on stderr (no-tty)
-            fcntl.fcntl(p.stdout, fcntl.F_SETFL,
-                        fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
-            fcntl.fcntl(p.stderr, fcntl.F_SETFL,
-                        fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
-            sudo_output = ''
-            sudo_errput = ''
-
-            while True:
-                if success_key in sudo_output or \
-                    (self.runner.sudo_pass and sudo_output.endswith(prompt)) or \
-                    (self.runner.su_pass and prompt_re.match(sudo_output)):
-                    break
-
-                rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
-                                              [p.stdout], self.runner.timeout)
-                if p.stderr in rfd:
-                    chunk = p.stderr.read()
-                    if not chunk:
-                        raise errors.AnsibleError('ssh connection closed waiting for sudo or su password prompt')
-                    sudo_errput += chunk
-                    incorrect_password = gettext.dgettext(
-                        "sudo", "Sorry, try again.")
-                    if sudo_errput.strip().endswith("%s%s" % (prompt, incorrect_password)):
-                        raise errors.AnsibleError('Incorrect sudo password')
-                    elif sudo_errput.endswith(prompt):
-                        stdin.write(self.runner.sudo_pass + '\n')
-
-                if p.stdout in rfd:
-                    chunk = p.stdout.read()
-                    if not chunk:
-                        raise errors.AnsibleError('ssh connection closed waiting for sudo or su password prompt')
-                    sudo_output += chunk
-
-                if not rfd:
-                    # timeout. wrap up process communication
-                    stdout = p.communicate()
-                    raise errors.AnsibleError('ssh connection error waiting for sudo or su password prompt')
-
-            if success_key not in sudo_output:
-                if sudoable:
-                    stdin.write(self.runner.sudo_pass + '\n')
-                elif su:
-                    stdin.write(self.runner.su_pass + '\n')
-
-        (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, su=su, sudoable=sudoable, prompt=prompt)
-
-        if C.HOST_KEY_CHECKING and not_in_host_file:
-            # lock around the initial SSH connectivity so the user prompt about whether to add 
-            # the host to known hosts is not intermingled with multiprocess output.
-            fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
-            fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
-        controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or \
-                              'unknown configuration option: ControlPersist' in stderr
-
-        if C.HOST_KEY_CHECKING:
-            if ssh_cmd[0] == "sshpass" and p.returncode == 6:
-                raise errors.AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this.  Please add this host\'s fingerprint to your known_hosts file to manage this host.')
-
-        if p.returncode != 0 and controlpersisterror:
-            raise errors.AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again')
-        if p.returncode == 255 and (in_data or self.runner.module_name == 'raw'):
-            raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
-
-        return (p.returncode, '', stdout, stderr)
-
-    def put_file(self, in_path, out_path):
-        ''' transfer a file from local to remote '''
-        vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
-        if not os.path.exists(in_path):
-            raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
-        cmd = self._password_cmd()
-
-        host = self.host
-        if self.ipv6:
-            host = '[%s]' % host
-
-        if C.DEFAULT_SCP_IF_SSH:
-            cmd += ["scp"] + self.common_args
-            cmd += [in_path,host + ":" + pipes.quote(out_path)]
-            indata = None
-        else:
-            cmd += ["sftp"] + self.common_args + [host]
-            indata = "put %s %s\n" % (pipes.quote(in_path), pipes.quote(out_path))
-
-        (p, stdin) = self._run(cmd, indata)
-
-        self._send_password()
-
-        (returncode, stdout, stderr) = self._communicate(p, stdin, indata)
-
-        if returncode != 0:
-            raise errors.AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr))
-
-    def fetch_file(self, in_path, out_path):
-        ''' fetch a file from remote to local '''
-        vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
-        cmd = self._password_cmd()
-
-        host = self.host
-        if self.ipv6:
-            host = '[%s]' % host
-
-        if C.DEFAULT_SCP_IF_SSH:
-            cmd += ["scp"] + self.common_args
-            cmd += [host + ":" + in_path, out_path]
-            indata = None
-        else:
-            cmd += ["sftp"] + self.common_args + [host]
-            indata = "get %s %s\n" % (in_path, out_path)
-
-        p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
-                             stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-        self._send_password()
-        stdout, stderr = p.communicate(indata)
-
-        if p.returncode != 0:
-            raise errors.AnsibleError("failed to transfer file from %s:\n%s\n%s" % (in_path, stdout, stderr))
-
-    def close(self):
-        ''' not applicable since we're executing openssh binaries '''
-        pass
-
diff --git a/xos/observers/vcpe/steps/sync_vcpetenant.py b/xos/observers/vcpe/steps/sync_vcpetenant.py
deleted file mode 100644
index d35a628..0000000
--- a/xos/observers/vcpe/steps/sync_vcpetenant.py
+++ /dev/null
@@ -1,235 +0,0 @@
-import hashlib
-import os
-import socket
-import sys
-import base64
-import time
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from synchronizers.base.ansible import run_template_ssh
-from observers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
-from core.models import Service, Slice
-from services.cord.models import VCPEService, VCPETenant, VOLTTenant
-from services.hpc.models import HpcService, CDNPrefix
-from util.logger import Logger, logging
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-from broadbandshield import BBS
-
-logger = Logger(level=logging.INFO)
-
-PARENTAL_MECHANISM="dnsmasq"
-ENABLE_QUICK_UPDATE=False
-
-class SyncVCPETenant(SyncInstanceUsingAnsible):
-    provides=[VCPETenant]
-    observes=VCPETenant
-    requested_interval=0
-    template_name = "sync_vcpetenant.yaml"
-    service_key_name = "/opt/xos/observers/vcpe/vcpe_private_key"
-
-    def __init__(self, *args, **kwargs):
-        super(SyncVCPETenant, self).__init__(*args, **kwargs)
-
-    def fetch_pending(self, deleted):
-        if (not deleted):
-            objs = VCPETenant.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
-        else:
-            objs = VCPETenant.get_deleted_tenant_objects()
-
-        return objs
-
-    def get_vcpe_service(self, o):
-        if not o.provider_service:
-            return None
-
-        vcpes = VCPEService.get_service_objects().filter(id=o.provider_service.id)
-        if not vcpes:
-            return None
-
-        return vcpes[0]
-
-    def get_extra_attributes(self, o):
-        # This is a place to include extra attributes that aren't part of the
-        # object itself. In the case of vCPE, we need to know:
-        #   1) the addresses of dnsdemux, to setup dnsmasq in the vCPE
-        #   2) CDN prefixes, so we know what URLs to send to dnsdemux
-        #   3) BroadBandShield server addresses, for parental filtering
-        #   4) vlan_ids, for setting up networking in the vCPE VM
-
-        vcpe_service = self.get_vcpe_service(o)
-
-        dnsdemux_ip = None
-        if vcpe_service.backend_network_label:
-            # Connect to dnsdemux using the network specified by
-            #     vcpe_service.backend_network_label
-            for service in HpcService.objects.all():
-                for slice in service.slices.all():
-                    if "dnsdemux" in slice.name:
-                        for instance in slice.instances.all():
-                            for ns in instance.ports.all():
-                                if ns.ip and ns.network.labels and (vcpe_service.backend_network_label in ns.network.labels):
-                                    dnsdemux_ip = ns.ip
-            if not dnsdemux_ip:
-                logger.info("failed to find a dnsdemux on network %s" % vcpe_service.backend_network_label)
-        else:
-            # Connect to dnsdemux using the instance's public address
-            for service in HpcService.objects.all():
-                for slice in service.slices.all():
-                    if "dnsdemux" in slice.name:
-                        for instance in slice.instances.all():
-                            if dnsdemux_ip=="none":
-                                try:
-                                    dnsdemux_ip = socket.gethostbyname(instance.node.name)
-                                except:
-                                    pass
-            if not dnsdemux_ip:
-                logger.info("failed to find a dnsdemux with a public address")
-
-        dnsdemux_ip = dnsdemux_ip or "none"
-
-        cdn_prefixes = []
-        for prefix in CDNPrefix.objects.all():
-            cdn_prefixes.append(prefix.prefix)
-
-        # Broadbandshield can either be set up internally, using vcpe_service.bbs_slice,
-        # or it can be setup externally using vcpe_service.bbs_server.
-
-        bbs_addrs = []
-        if vcpe_service.bbs_slice:
-            if vcpe_service.backend_network_label:
-                for bbs_instance in vcpe_service.bbs_slice.instances.all():
-                    for ns in bbs_instance.ports.all():
-                        if ns.ip and ns.network.labels and (vcpe_service.backend_network_label in ns.network.labels):
-                            bbs_addrs.append(ns.ip)
-            else:
-                logger.info("unsupported configuration -- bbs_slice is set, but backend_network_label is not")
-            if not bbs_addrs:
-                logger.info("failed to find any usable addresses on bbs_slice")
-        elif vcpe_service.bbs_server:
-            bbs_addrs.append(vcpe_service.bbs_server)
-        else:
-            logger.info("neither bbs_slice nor bbs_server is configured in the vCPE")
-
-        vlan_ids = []
-        s_tags = []
-        c_tags = []
-        if o.volt:
-            vlan_ids.append(o.volt.vlan_id)  # XXX remove this
-            s_tags.append(o.volt.s_tag)
-            c_tags.append(o.volt.c_tag)
-
-        try:
-            full_setup = Config().observer_full_setup
-        except:
-            full_setup = True
-
-        safe_macs=[]
-        if o.volt and o.volt.subscriber:
-            for user in o.volt.subscriber.users:
-                level = user.get("level",None)
-                mac = user.get("mac",None)
-                if level in ["G", "PG"]:
-                    if mac:
-                        safe_macs.append(mac)
-
-        fields = {"vlan_ids": vlan_ids,   # XXX remove this
-                "s_tags": s_tags,
-                "c_tags": c_tags,
-                "dnsdemux_ip": dnsdemux_ip,
-                "cdn_prefixes": cdn_prefixes,
-                "bbs_addrs": bbs_addrs,
-                "full_setup": full_setup,
-                "isolation": o.instance.isolation,
-                "safe_browsing_macs": safe_macs}
-
-        # add in the sync_attributes that come from the SubscriberRoot object
-
-        if o.volt and o.volt.subscriber and hasattr(o.volt.subscriber, "sync_attributes"):
-            for attribute_name in o.volt.subscriber.sync_attributes:
-                fields[attribute_name] = getattr(o.volt.subscriber, attribute_name)
-
-        return fields
-
-    def sync_fields(self, o, fields):
-        # the super causes the playbook to be run
-
-        super(SyncVCPETenant, self).sync_fields(o, fields)
-
-        # now do all of our broadbandshield stuff...
-
-        service = self.get_vcpe_service(o)
-        if not service:
-            # Ansible uses the service's keypair in order to SSH into the
-            # instance. It would be bad if the slice had no service.
-
-            raise Exception("Slice %s is not associated with a service" % instance.slice.name)
-
-        # Make sure the slice is configured properly
-        if (service != o.instance.slice.service):
-            raise Exception("Slice %s is associated with some service that is not %s" % (str(instance.slice), str(service)))
-
-        # only enable filtering if we have a subscriber object (see below)
-        url_filter_enable = False
-
-        # for attributes that come from CordSubscriberRoot
-        if o.volt and o.volt.subscriber:
-            url_filter_enable = o.volt.subscriber.url_filter_enable
-            url_filter_level = o.volt.subscriber.url_filter_level
-            url_filter_users = o.volt.subscriber.users
-
-        if PARENTAL_MECHANISM=="broadbandshield":
-            # disable url_filter if there are no bbs_addrs
-            if url_filter_enable and (not fields.get("bbs_addrs",[])):
-                logger.info("disabling url_filter because there are no bbs_addrs")
-                url_filter_enable = False
-
-            if url_filter_enable:
-                bbs_hostname = None
-                if service.bbs_api_hostname and service.bbs_api_port:
-                    bbs_hostname = service.bbs_api_hostname
-                else:
-                    # TODO: extract from slice
-                    bbs_hostname = "cordcompute01.onlab.us"
-
-                if service.bbs_api_port:
-                    bbs_port = service.bbs_api_port
-                else:
-                    bbs_port = 8018
-
-                if not bbs_hostname:
-                    logger.info("broadbandshield is not configured")
-                else:
-                    tStart = time.time()
-                    bbs = BBS(o.bbs_account, "123", bbs_hostname, bbs_port)
-                    bbs.sync(url_filter_level, url_filter_users)
-
-                    if o.hpc_client_ip:
-                        logger.info("associate account %s with ip %s" % (o.bbs_account, o.hpc_client_ip))
-                        bbs.associate(o.hpc_client_ip)
-                    else:
-                        logger.info("no hpc_client_ip to associate")
-
-                    logger.info("bbs update time %d" % int(time.time()-tStart))
-
-
-    def run_playbook(self, o, fields):
-        ansible_hash = hashlib.md5(repr(sorted(fields.items()))).hexdigest()
-        quick_update = (o.last_ansible_hash == ansible_hash)
-
-        if ENABLE_QUICK_UPDATE and quick_update:
-            logger.info("quick_update triggered; skipping ansible recipe")
-        else:
-            if o.instance.isolation in ["container", "container_vm"]:
-                super(SyncVCPETenant, self).run_playbook(o, fields, "sync_vcpetenant_new.yaml")
-            else:
-                super(SyncVCPETenant, self).run_playbook(o, fields)
-
-        o.last_ansible_hash = ansible_hash
-
-    def delete_record(self, m):
-        pass
diff --git a/xos/observers/vcpe/steps/sync_vcpetenant.yaml b/xos/observers/vcpe/steps/sync_vcpetenant.yaml
deleted file mode 100644
index fdc21da..0000000
--- a/xos/observers/vcpe/steps/sync_vcpetenant.yaml
+++ /dev/null
@@ -1,174 +0,0 @@
----
-- hosts: {{ instance_name }}
-  gather_facts: False
-  connection: ssh
-  user: ubuntu
-  sudo: yes
-  vars:
-      cdn_enable: {{ cdn_enable }}
-      dnsdemux_ip: {{ dnsdemux_ip }}
-      firewall_enable: {{ firewall_enable }}
-      url_filter_enable: {{ url_filter_enable }}
-      vlan_ids:
-        {% for vlan_id in vlan_ids %}
-        - {{ vlan_id }}
-        {% endfor %}
-      c_tags:
-        {% for c_tag in c_tags %}
-        - {{ c_tag }}
-        {% endfor %}
-      s_tags:
-        {% for s_tag in s_tags %}
-        - {{ s_tag }}
-        {% endfor %}
-      firewall_rules:
-        {% for firewall_rule in firewall_rules.split("\n") %}
-        - {{ firewall_rule }}
-        {% endfor %}
-      cdn_prefixes:
-        {% for prefix in cdn_prefixes %}
-        - {{ prefix }}
-        {% endfor %}
-      bbs_addrs:
-        {% for bbs_addr in bbs_addrs %}
-        - {{ bbs_addr }}
-        {% endfor %}
-      nat_ip: {{ nat_ip }}
-      nat_mac: {{ nat_mac }}
-      lan_ip: {{ lan_ip }}
-      lan_mac: {{ lan_mac }}
-      wan_ip: {{ wan_ip }}
-      wan_mac: {{ wan_mac }}
-      wan_container_mac: {{ wan_container_mac }}
-      wan_next_hop: 10.0.1.253   # FIX ME
-      private_ip: {{ private_ip }}
-      private_mac: {{ private_mac }}
-      hpc_client_ip: {{ hpc_client_ip }}
-      hpc_client_mac: {{ hpc_client_mac }}
-      keystone_tenant_id: {{ keystone_tenant_id }}
-      keystone_user_id: {{ keystone_user_id }}
-      rabbit_user: {{ rabbit_user }}
-      rabbit_password: {{ rabbit_password }}
-      rabbit_host: {{ rabbit_host }}
-      safe_browsing:
-        {% for mac in safe_browsing_macs %}
-        - {{ mac }}
-        {% endfor %}
-
-  tasks:
-{% if full_setup %}
-  - name: Docker repository
-    copy: src=/opt/xos/observers/vcpe/files/docker.list
-      dest=/etc/apt/sources.list.d/docker.list
-
-  - name: Import the repository key
-    apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9
-
-  - name: install Docker
-    apt: name=lxc-docker state=present update_cache=yes
-
-  - name: install python-setuptools
-    apt: name=python-setuptools state=present
-
-  - name: install pip
-    easy_install: name=pip
-
-  - name: install docker-py
-    pip: name=docker-py version=0.5.3
-
-  - name: install Pipework
-    get_url: url=https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework
-       dest=/usr/local/bin/pipework
-       mode=0755
-
-  - name: make sure /etc/dnsmasq.d exists
-    file: path=/etc/dnsmasq.d state=directory owner=root group=root
-
-  - name: Disable resolvconf service
-    shell: service resolvconf stop
-    shell: echo manual > /etc/init/resolvconf.override
-    shell: rm -f /etc/resolv.conf
-
-  - name: Install resolv.conf
-    copy: src=/opt/xos/observers/vcpe/files/vm-resolv.conf
-      dest=/etc/resolv.conf
-
-  - name: Verify if vcpe_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
-    shell: pgrep -f [v]cpe_stats_notifier | wc -l
-    register: cron_job_pids_count
-
-#  - name: DEBUG
-#    debug: var=cron_job_pids_count.stdout
-
-  - name: make sure ~/bin exists
-    file: path=~/bin state=directory owner=root group=root
-    when: cron_job_pids_count.stdout == "0"
-
-  - name: Copy cron job to destination
-    copy: src=/opt/xos/observers/vcpe/vcpe_stats_notifier.py
-      dest=~/bin/vcpe_stats_notifier.py
-    when: cron_job_pids_count.stdout == "0"
-
-  - name: install python-kombu
-    apt: name=python-kombu state=present
-    when: cron_job_pids_count.stdout == "0"
-
-  - name: Initiate vcpe_stats_notifier cron job
-    command: python ~/bin/vcpe_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vcpeservice_rabbit_exchange='vcpeservice'
-    async: 9999999999999999
-    poll: 0
-    when: cron_job_pids_count.stdout == "0"
-{% endif %}
-
-  - name: vCPE upstart
-    template: src=/opt/xos/observers/vcpe/templates/vcpe.conf.j2 dest=/etc/init/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}.conf
-
-  - name: vCPE startup script
-    template: src=/opt/xos/observers/vcpe/templates/start-vcpe.sh.j2 dest=/usr/local/sbin/start-vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}.sh mode=0755
-    notify:
-#    - restart vcpe
-     - stop vcpe
-     - remove container
-     - start vcpe
-
-  - name: create /etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d
-    file: path=/etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d state=directory owner=root group=root
-
-  - name: vCPE basic dnsmasq config
-    copy: src=/opt/xos/observers/vcpe/files/vcpe.dnsmasq dest=/etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d/vcpe.conf owner=root group=root
-    notify:
-    - restart dnsmasq
-
-  - name: dnsmasq config
-    template: src=/opt/xos/observers/vcpe/templates/dnsmasq_servers.j2 dest=/etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d/servers.conf owner=root group=root
-    notify:
-    - restart dnsmasq
-
-# These are samples, not necessary for correct function of demo
-
-#  - name: networking info
-#    template: src=/opt/xos/observers/vcpe/templates/vlan_sample.j2 dest=/etc/vlan_sample owner=root group=root
-
-#  - name: firewall info
-#    template: src=/opt/xos/observers/vcpe/templates/firewall_sample.j2 dest=/etc/firewall_sample owner=root group=root
-
-  - name: Make sure vCPE service is running
-    service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=started
-
-  handlers:
-  # Dnsmasq is automatically restarted in the container
-  - name: restart dnsmasq
-    shell: docker exec vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} killall dnsmasq
-
-  - name: restart vcpe
-    shell: service vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} stop; sleep 1; service vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} start
-
-  - name: stop vcpe
-    service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=stopped
-
-  - name: remove container
-    docker: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=absent image=docker-vcpe
-
-  - name: start vcpe
-    service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=started
-
diff --git a/xos/observers/vcpe/steps/sync_vcpetenant_new.yaml b/xos/observers/vcpe/steps/sync_vcpetenant_new.yaml
deleted file mode 100644
index 6c7166f..0000000
--- a/xos/observers/vcpe/steps/sync_vcpetenant_new.yaml
+++ /dev/null
@@ -1,131 +0,0 @@
----
-- hosts: {{ instance_name }}
-  gather_facts: False
-  connection: ssh
-  user: {{ username }}
-  sudo: yes
-  vars:
-      container_name: {{ container_name }}
-      cdn_enable: {{ cdn_enable }}
-      dnsdemux_ip: {{ dnsdemux_ip }}
-      firewall_enable: {{ firewall_enable }}
-      url_filter_enable: {{ url_filter_enable }}
-      vlan_ids:
-        {% for vlan_id in vlan_ids %}
-        - {{ vlan_id }}
-        {% endfor %}
-      c_tags:
-        {% for c_tag in c_tags %}
-        - {{ c_tag }}
-        {% endfor %}
-      s_tags:
-        {% for s_tag in s_tags %}
-        - {{ s_tag }}
-        {% endfor %}
-      firewall_rules:
-        {% for firewall_rule in firewall_rules.split("\n") %}
-        - {{ firewall_rule }}
-        {% endfor %}
-      cdn_prefixes:
-        {% for prefix in cdn_prefixes %}
-        - {{ prefix }}
-        {% endfor %}
-      bbs_addrs:
-        {% for bbs_addr in bbs_addrs %}
-        - {{ bbs_addr }}
-        {% endfor %}
-      nat_ip: {{ nat_ip }}
-      nat_mac: {{ nat_mac }}
-      lan_ip: {{ lan_ip }}
-      lan_mac: {{ lan_mac }}
-      wan_ip: {{ wan_ip }}
-      wan_mac: {{ wan_mac }}
-      wan_container_mac: {{ wan_container_mac }}
-      wan_next_hop: 10.0.1.253   # FIX ME
-      private_ip: {{ private_ip }}
-      private_mac: {{ private_mac }}
-      hpc_client_ip: {{ hpc_client_ip }}
-      hpc_client_mac: {{ hpc_client_mac }}
-      keystone_tenant_id: {{ keystone_tenant_id }}
-      keystone_user_id: {{ keystone_user_id }}
-      rabbit_user: {{ rabbit_user }}
-      rabbit_password: {{ rabbit_password }}
-      rabbit_host: {{ rabbit_host }}
-      safe_browsing:
-        {% for mac in safe_browsing_macs %}
-        - {{ mac }}
-        {% endfor %}
-
-  tasks:
-  - name: Verify if vcpe_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
-    shell: pgrep -f [v]cpe_stats_notifier | wc -l
-    register: cron_job_pids_count
-
-#  - name: DEBUG
-#    debug: var=cron_job_pids_count.stdout
-
-  - name: make sure ~/bin exists
-    file: path=~/bin state=directory owner=root group=root
-    when: cron_job_pids_count.stdout == "0"
-
-  - name: Copy cron job to destination
-    copy: src=/opt/xos/observers/vcpe/vcpe_stats_notifier.py
-      dest=~/bin/vcpe_stats_notifier.py
-    when: cron_job_pids_count.stdout == "0"
-
-  - name: install python-kombu
-    apt: name=python-kombu state=present
-    when: cron_job_pids_count.stdout == "0"
-
-  - name: Initiate vcpe_stats_notifier cron job
-    command: python ~/bin/vcpe_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vcpeservice_rabbit_exchange='vcpeservice'
-    async: 9999999999999999
-    poll: 0
-    when: cron_job_pids_count.stdout == "0"
-
-  - name: vCPE basic dnsmasq config
-    copy: src=/opt/xos/observers/vcpe/files/vcpe.dnsmasq dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/vcpe.conf owner=root group=root
-    notify:
-    - restart dnsmasq
-
-  - name: dnsmasq config
-    template: src=/opt/xos/observers/vcpe/templates/dnsmasq_servers.j2 dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/servers.conf owner=root group=root
-    notify:
-    - restart dnsmasq
-
-  - name: create directory for "safe" config
-    file: path=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe state=directory
-
-  - name: dnsmasq "safe" config
-    template: src=/opt/xos/observers/vcpe/templates/dnsmasq_safe_servers.j2 dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe/servers.conf owner=root group=root
-    notify:
-    - restart dnsmasq
-
-  - name: copy base ufw files
-    synchronize: src=/opt/xos/observers/vcpe/files/etc/ufw/ dest=/var/container_volumes/{{ container_name }}/etc/ufw/
-    notify:
-    - reload ufw
-
-  - name: redirection rules for safe DNS
-    template: src=/opt/xos/observers/vcpe/templates/before.rules.j2 dest=/var/container_volumes/{{ container_name }}/etc/ufw/before.rules owner=root group=root
-    notify:
-    - reload ufw
-
-  - name: base ufw setup uses /etc/rc.local
-    copy: src=/opt/xos/observers/vcpe/files/etc/rc.local dest=/var/container_volumes/{{ container_name }}/etc/ owner=root group=root
-    notify:
-    - copy in /etc/rc.local
-
-  handlers:
-  # Dnsmasq is automatically restarted in the container
-  - name: restart dnsmasq
-    shell: docker exec {{ container_name }} /usr/bin/killall dnsmasq
-
-  - name: reload ufw
-    shell: docker exec {{ container_name }} bash -c "/sbin/iptables -t nat -F PREROUTING; /usr/sbin/ufw reload"
-
-  # Use docker cp instead of single-file volume
-  # The reason is that changes to external file volume don't show up inside the container
-  # Probably Ansible deletes and then recreates the external file, and container has old version
-  - name: copy in /etc/rc.local
-    shell: docker cp /var/container_volumes/{{ container_name }}/etc/rc.local {{ container_name }}:/etc/
diff --git a/xos/observers/vcpe/steps/test.yaml b/xos/observers/vcpe/steps/test.yaml
deleted file mode 100644
index fc8251d..0000000
--- a/xos/observers/vcpe/steps/test.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- hosts: {{ instance_name }}
-  connection: ssh
-  user: ubuntu
-  tasks:
-    - name: foobar
-      shell: echo foo > /tmp/foobar
diff --git a/xos/observers/vcpe/stop.sh b/xos/observers/vcpe/stop.sh
deleted file mode 100755
index e90e16c..0000000
--- a/xos/observers/vcpe/stop.sh
+++ /dev/null
@@ -1 +0,0 @@
-pkill -9 -f vcpe-observer.py
diff --git a/xos/observers/vcpe/supervisor/vcpe-observer.conf b/xos/observers/vcpe/supervisor/vcpe-observer.conf
deleted file mode 100644
index 27d2796..0000000
--- a/xos/observers/vcpe/supervisor/vcpe-observer.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-[program:vcpe-observer]
-command=python /opt/xos/observers/vcpe/vcpe-observer.py -C /opt/xos/observers/vcpe/vcpe_observer_config
diff --git a/xos/observers/vcpe/templates/before.rules.j2 b/xos/observers/vcpe/templates/before.rules.j2
deleted file mode 100644
index e6f7d4a..0000000
--- a/xos/observers/vcpe/templates/before.rules.j2
+++ /dev/null
@@ -1,97 +0,0 @@
-#
-# rules.before
-#
-# Rules that should be run before the ufw command line added rules. Custom
-# rules should be added to one of these chains:
-#   ufw-before-input
-#   ufw-before-output
-#   ufw-before-forward
-#
-
-# nat Table rules
-*nat
-:POSTROUTING ACCEPT [0:0]
-
-# Forward traffic from eth1 through eth0.
--A POSTROUTING -o eth0 -j MASQUERADE
-
-# Set up NAT for CDN services
--A POSTROUTING -o eth2 -j MASQUERADE
-
-# DNS safe browsing
-{% if safe_browsing %}
-{% for mac in safe_browsing %}
--A PREROUTING -i eth1 -m mac --mac-source {{ mac }} -p udp --dport 53 -j REDIRECT --to-port 5353
--A PREROUTING -i eth1 -m mac --mac-source {{ mac }} -p tcp --dport 53 -j REDIRECT --to-port 5353
-{% endfor %}
-{% endif %}
-
-# don't delete the 'COMMIT' line or these nat table rules won't be processed
-COMMIT
-
-# Don't delete these required lines, otherwise there will be errors
-*filter
-:ufw-before-input - [0:0]
-:ufw-before-output - [0:0]
-:ufw-before-forward - [0:0]
-:ufw-not-local - [0:0]
-# End required lines
-
-# allow all on loopback
--A ufw-before-input -i lo -j ACCEPT
--A ufw-before-output -o lo -j ACCEPT
-
-# quickly process packets for which we already have a connection
--A ufw-before-input -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
--A ufw-before-output -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
--A ufw-before-forward -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-
-# drop INVALID packets (logs these in loglevel medium and higher)
--A ufw-before-input -m conntrack --ctstate INVALID -j ufw-logging-deny
--A ufw-before-input -m conntrack --ctstate INVALID -j DROP
-
-# ok icmp codes for INPUT
--A ufw-before-input -p icmp --icmp-type destination-unreachable -j ACCEPT
--A ufw-before-input -p icmp --icmp-type source-quench -j ACCEPT
--A ufw-before-input -p icmp --icmp-type time-exceeded -j ACCEPT
--A ufw-before-input -p icmp --icmp-type parameter-problem -j ACCEPT
--A ufw-before-input -p icmp --icmp-type echo-request -j ACCEPT
-
-# ok icmp code for FORWARD
--A ufw-before-forward -p icmp --icmp-type destination-unreachable -j ACCEPT
--A ufw-before-forward -p icmp --icmp-type source-quench -j ACCEPT
--A ufw-before-forward -p icmp --icmp-type time-exceeded -j ACCEPT
--A ufw-before-forward -p icmp --icmp-type parameter-problem -j ACCEPT
--A ufw-before-forward -p icmp --icmp-type echo-request -j ACCEPT
-
-# allow dhcp client to work
--A ufw-before-input -p udp --sport 67 --dport 68 -j ACCEPT
-
-#
-# ufw-not-local
-#
--A ufw-before-input -j ufw-not-local
-
-# if LOCAL, RETURN
--A ufw-not-local -m addrtype --dst-type LOCAL -j RETURN
-
-# if MULTICAST, RETURN
--A ufw-not-local -m addrtype --dst-type MULTICAST -j RETURN
-
-# if BROADCAST, RETURN
--A ufw-not-local -m addrtype --dst-type BROADCAST -j RETURN
-
-# all other non-local packets are dropped
--A ufw-not-local -m limit --limit 3/min --limit-burst 10 -j ufw-logging-deny
--A ufw-not-local -j DROP
-
-# allow MULTICAST mDNS for service discovery (be sure the MULTICAST line above
-# is uncommented)
--A ufw-before-input -p udp -d 224.0.0.251 --dport 5353 -j ACCEPT
-
-# allow MULTICAST UPnP for service discovery (be sure the MULTICAST line above
-# is uncommented)
--A ufw-before-input -p udp -d 239.255.255.250 --dport 1900 -j ACCEPT
-
-# don't delete the 'COMMIT' line or these rules won't be processed
-COMMIT
diff --git a/xos/observers/vcpe/templates/dnsmasq_safe_servers.j2 b/xos/observers/vcpe/templates/dnsmasq_safe_servers.j2
deleted file mode 100644
index 2f93777..0000000
--- a/xos/observers/vcpe/templates/dnsmasq_safe_servers.j2
+++ /dev/null
@@ -1,14 +0,0 @@
-# This file autogenerated by vCPE observer
-# It contains a list of DNS servers for dnsmasq to use.
-no-resolv
-
-{% if cdn_enable %}
-# CDN
-{% for prefix in cdn_prefixes %}
-server=/{{ prefix }}/{{ dnsdemux_ip }}
-{% endfor %}
-{% endif %}
-
-# use OpenDNS service
-server=208.67.222.123
-server=208.67.220.123
diff --git a/xos/observers/vcpe/templates/dnsmasq_servers.j2 b/xos/observers/vcpe/templates/dnsmasq_servers.j2
deleted file mode 100644
index c89c762..0000000
--- a/xos/observers/vcpe/templates/dnsmasq_servers.j2
+++ /dev/null
@@ -1,14 +0,0 @@
-# This file autogenerated by vCPE observer
-# It contains a list of DNS servers for dnsmasq to use.
-no-resolv
-
-{% if cdn_enable %}
-# CDN
-{% for prefix in cdn_prefixes %}
-server=/{{ prefix }}/{{ dnsdemux_ip }}
-{% endfor %}
-{% endif %}
-
-# use google's DNS service
-server=8.8.8.8
-server=8.8.4.4
diff --git a/xos/observers/vcpe/templates/firewall_sample.j2 b/xos/observers/vcpe/templates/firewall_sample.j2
deleted file mode 100644
index ce85e68..0000000
--- a/xos/observers/vcpe/templates/firewall_sample.j2
+++ /dev/null
@@ -1,5 +0,0 @@
-firewall_enable = {{ firewall_enable }}
-
-{% for firewall_rule in firewall_rules %}
-{{ firewall_rule }}
-{% endfor %}
diff --git a/xos/observers/vcpe/templates/start-vcpe.sh.j2 b/xos/observers/vcpe/templates/start-vcpe.sh.j2
deleted file mode 100755
index c4128f3..0000000
--- a/xos/observers/vcpe/templates/start-vcpe.sh.j2
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/bash
-
-function mac_to_iface {
-    MAC=$1
-    ifconfig|grep $MAC| awk '{print $1}'|grep -v '\.'
-}
-
-iptables -L > /dev/null
-ip6tables -L > /dev/null
-
-STAG={{ s_tags[0] }}
-CTAG={{ c_tags[0] }}
-VCPE=vcpe-$STAG-$CTAG
-
-docker inspect $VCPE > /dev/null 2>&1
-if [ "$?" == 1 ]
-then
-    docker pull andybavier/docker-vcpe
-    docker run -d --name=$VCPE --privileged=true --net=none -v /etc/$VCPE/dnsmasq.d:/etc/dnsmasq.d andybavier/docker-vcpe
-else
-    docker start $VCPE
-fi
-
-# Set up networking via pipework
-WAN_IFACE=$( mac_to_iface {{ wan_mac }} )
-docker exec $VCPE ifconfig eth0 >> /dev/null || pipework $WAN_IFACE -i eth0 $VCPE {{ wan_ip }}/24@{{ wan_next_hop }} {{ wan_container_mac }}
-
-# LAN_IFACE=$( mac_to_iface {{ lan_mac }} )
-# Need to encapsulate VLAN traffic so that Neutron doesn't eat it
-# Assumes that br-lan has been set up appropriately by a previous step
-LAN_IFACE=br-lan
-ifconfig $LAN_IFACE >> /dev/null
-if [ "$?" == 0 ]
-then
-    ifconfig $LAN_IFACE.$STAG >> /dev/null || ip link add link $LAN_IFACE name $LAN_IFACE.$STAG type vlan id $STAG
-    ifconfig $LAN_IFACE.$STAG up
-    docker exec $VCPE ifconfig eth1 >> /dev/null || pipework $LAN_IFACE.$STAG -i eth1 $VCPE 192.168.0.1/24 @$CTAG
-fi
-
-#HPC_IFACE=$( mac_to_iface {{ hpc_client_mac }} )
-#docker exec $VCPE ifconfig eth2 >> /dev/null || pipework $HPC_IFACE -i eth2 $VCPE {{ hpc_client_ip }}/24
-
-# Make sure VM's eth0 (hpc_client) has no IP address
-#ifconfig $HPC_IFACE 0.0.0.0
-
-# Now can start up dnsmasq
-docker exec $VCPE service dnsmasq start
-
-# Attach to container
-docker start -a $VCPE
diff --git a/xos/observers/vcpe/templates/vcpe.conf.j2 b/xos/observers/vcpe/templates/vcpe.conf.j2
deleted file mode 100644
index fa7885e..0000000
--- a/xos/observers/vcpe/templates/vcpe.conf.j2
+++ /dev/null
@@ -1,10 +0,0 @@
-# Upstart script for vCPE
-description "vCPE container"
-author "andy@onlab.us"
-start on filesystem and started docker
-stop on runlevel [!2345]
-respawn
-
-script
-  /usr/local/sbin/start-vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}.sh
-end script
diff --git a/xos/observers/vcpe/templates/vlan_sample.j2 b/xos/observers/vcpe/templates/vlan_sample.j2
deleted file mode 100644
index a26c840..0000000
--- a/xos/observers/vcpe/templates/vlan_sample.j2
+++ /dev/null
@@ -1,5 +0,0 @@
-# below is a list of all vlan_ids associated with this vcpe
-
-{% for vlan_id in vlan_ids %}
-{{ vlan_id }}
-{% endfor %}
diff --git a/xos/observers/vcpe/vcpe-observer.py b/xos/observers/vcpe/vcpe-observer.py
deleted file mode 100755
index d6a71ff..0000000
--- a/xos/observers/vcpe/vcpe-observer.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-
-# This imports and runs ../../xos-observer.py
-
-import importlib
-import os
-import sys
-observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../..")
-sys.path.append(observer_path)
-mod = importlib.import_module("xos-observer")
-mod.main()
diff --git a/xos/observers/vcpe/vcpe_observer_config b/xos/observers/vcpe/vcpe_observer_config
deleted file mode 100644
index d2c9239..0000000
--- a/xos/observers/vcpe/vcpe_observer_config
+++ /dev/null
@@ -1,42 +0,0 @@
-
-[plc]
-name=plc
-deployment=VICCI
-
-[db]
-name=xos
-user=postgres
-password=password
-host=localhost
-port=5432
-
-[api]
-host=128.112.171.237
-port=8000
-ssl_key=None
-ssl_cert=None
-ca_ssl_cert=None
-ratelimit_enabled=0
-omf_enabled=0
-mail_support_address=support@localhost
-nova_enabled=True
-
-[observer]
-name=vcpe
-dependency_graph=/opt/xos/observers/vcpe/model-deps
-steps_dir=/opt/xos/observers/vcpe/steps
-sys_dir=/opt/xos/observers/vcpe/sys
-deleters_dir=/opt/xos/observers/vcpe/deleters
-log_file=console
-#/var/log/hpc.log
-driver=None
-pretend=False
-backoff_disabled=True
-save_ansible_output=True
-# set proxy_ssh to false on cloudlab
-proxy_ssh=False
-full_setup=True
-
-[feefie]
-client_id='vicci_dev_central'
-user_id='pl'
diff --git a/xos/observers/vcpe/vcpe_stats_notifier.py b/xos/observers/vcpe/vcpe_stats_notifier.py
deleted file mode 100644
index d726e3c..0000000
--- a/xos/observers/vcpe/vcpe_stats_notifier.py
+++ /dev/null
@@ -1,268 +0,0 @@
-import six
-import uuid
-import datetime
-from kombu.connection import BrokerConnection
-from kombu.messaging import Exchange, Queue, Consumer, Producer
-import subprocess
-import re
-import time, threading
-import sys, getopt
-import logging
-
-
-logfile = "vcpe_stats_notifier.log"
-level=logging.INFO
-logger=logging.getLogger('vcpe_stats_notifier')
-logger.setLevel(level)
-# create formatter
-formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s")
-handler=logging.handlers.RotatingFileHandler(logfile,maxBytes=1000000, backupCount=1)
-# add formatter to handler
-handler.setFormatter(formatter)
-logger.addHandler(handler)
-
-def extract_dns_stats_from_all_vcpes():
-    p = subprocess.Popen('docker ps', shell=True, stdout=subprocess.PIPE) 
-    firstline = True
-    dockercontainers = {}
-    while True:
-        out = p.stdout.readline()
-        if out == '' and p.poll() != None:
-            break
-        if out != '':
-            if firstline is True:
-                firstline = False
-            else:
-                fields = out.split()
-                container_fields = {}
-                container_fields['id'] = fields[0]
-                dockercontainers[fields[-1]] = container_fields
-    for k,v in dockercontainers.iteritems():
-         cmd = 'docker exec ' + v['id'] + ' killall -10 dnsmasq'
-         p = subprocess.Popen (cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
-         (output, error) = p.communicate()
-         if error:
-             logger.error("killall dnsmasq command failed with error = %s",error)
-             continue
-         cmd = 'docker exec ' + v['id'] + ' tail -7 /var/log/syslog'
-         p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
-         (output, error) = p.communicate()
-         if error:
-             logger.error("tail on dnsmasq log command failed with error = %s",error)
-             continue
-         log_list = output.splitlines()
-         i = 0
-         while i < len(log_list):
-             m = re.search('(?<=:\scache size\s)(\S*)(?=,\s),\s(\S*)(?=/)/(\S*)(?=\scache insertions re-used unexpired cache entries)', log_list[i])
-             if m == None:
-                 i = i+1
-                 continue;
-             v['cache_size'] = m.group(1)
-             v['replaced_unexpired_entries'] = m.group(2)
-             v['total_inserted_entries'] = m.group(3)
-             i = i+1
-             m = re.search('(?<=:\squeries forwarded\s)(\S*)(?=,),\squeries answered locally\s(\S*)(?=$)', log_list[i])
-             v['queries_forwarded'] = m.group(1)
-             v['queries_answered_locally'] = m.group(2)
-             break;
-         i = i+2
-         v['server_stats'] = []
-         while i < len(log_list):
-             m = re.search('(?<=:\sserver\s)(\S*)(?=#)#\d*:\squeries sent\s(\S*)(?=,),\sretried or failed\s(\S*)(?=$)', log_list[i])
-             if m == None:
-                 i = i+1
-                 continue
-             dns_server = {}
-             dns_server['id'] = m.group(1)
-             dns_server['queries_sent'] = m.group(2)
-             dns_server['queries_failed'] = m.group(3)
-             v['server_stats'].append(dns_server)
-             i = i+1
-    return dockercontainers
-
-
-keystone_tenant_id='3a397e70f64e4e40b69b6266c634d9d0'
-keystone_user_id='1e3ce043029547f1a61c1996d1a531a2'
-rabbit_user='openstack'
-rabbit_password='80608318c273f348a7c3'
-rabbit_host='10.11.10.1'
-vcpeservice_rabbit_exchange='vcpeservice'
-cpe_publisher_id='vcpe_publisher'
-
-producer = None
-
-def setup_rabbit_mq_channel():
-     global producer
-     global rabbit_user, rabbit_password, rabbit_host, vcpeservice_rabbit_exchange,cpe_publisher_id
-     vcpeservice_exchange = Exchange(vcpeservice_rabbit_exchange, "topic", durable=False)
-     # connections/channels
-     connection = BrokerConnection(rabbit_host, rabbit_user, rabbit_password)
-     logger.info('Connection to RabbitMQ server successful')
-     channel = connection.channel()
-     # produce
-     producer = Producer(channel, exchange=vcpeservice_exchange, routing_key='notifications.info')
-     p = subprocess.Popen('hostname', shell=True, stdout=subprocess.PIPE)
-     (hostname, error) = p.communicate()
-     cpe_publisher_id = cpe_publisher_id + '_on_' + hostname
-     logger.info('cpe_publisher_id=%s',cpe_publisher_id)
-
-def publish_cpe_stats():
-     global producer
-     global keystone_tenant_id, keystone_user_id, cpe_publisher_id
-
-     logger.debug('publish_cpe_stats invoked')
-
-     cpe_container_stats = extract_dns_stats_from_all_vcpes()
-
-     for k,v in cpe_container_stats.iteritems():
-          msg = {'event_type': 'vcpe', 
-                 'message_id':six.text_type(uuid.uuid4()),
-                 'publisher_id': cpe_publisher_id,
-                 'timestamp':datetime.datetime.now().isoformat(),
-                 'priority':'INFO',
-                 'payload': {'vcpe_id':k, 
-                             'user_id':keystone_user_id, 
-                             'tenant_id':keystone_tenant_id 
-                            }
-                }
-          producer.publish(msg)
-          logger.debug('Publishing vcpe event: %s', msg)
-
-          if 'cache_size' in v:
-               msg = {'event_type': 'vcpe.dns.cache.size', 
-                      'message_id':six.text_type(uuid.uuid4()),
-                      'publisher_id': cpe_publisher_id,
-                      'timestamp':datetime.datetime.now().isoformat(),
-                      'priority':'INFO',
-                      'payload': {'vcpe_id':k, 
-                                  'user_id':keystone_user_id,
-                                  'tenant_id':keystone_tenant_id, 
-                                  'cache_size':v['cache_size'] 
-                                 }
-                     }
-               producer.publish(msg)
-               logger.debug('Publishing vcpe.dns.cache.size event: %s', msg)
-
-          if 'total_inserted_entries' in v:
-               msg = {'event_type': 'vcpe.dns.total_inserted_entries', 
-                      'message_id':six.text_type(uuid.uuid4()),
-                      'publisher_id': cpe_publisher_id,
-                      'timestamp':datetime.datetime.now().isoformat(),
-                      'priority':'INFO',
-                      'payload': {'vcpe_id':k, 
-                                  'user_id':keystone_user_id,
-                                  'tenant_id':keystone_tenant_id, 
-                                  'total_inserted_entries':v['total_inserted_entries'] 
-                                 }
-                     }
-               producer.publish(msg)
-               logger.debug('Publishing vcpe.dns.total_inserted_entries event: %s', msg)
-
-          if 'replaced_unexpired_entries' in v:
-               msg = {'event_type': 'vcpe.dns.replaced_unexpired_entries', 
-                      'message_id':six.text_type(uuid.uuid4()),
-                      'publisher_id': cpe_publisher_id,
-                      'timestamp':datetime.datetime.now().isoformat(),
-                      'priority':'INFO',
-                      'payload': {'vcpe_id':k, 
-                                  'user_id':keystone_user_id,
-                                  'tenant_id':keystone_tenant_id, 
-                                  'replaced_unexpired_entries':v['replaced_unexpired_entries'] 
-                                 }
-                     }
-               producer.publish(msg)
-               logger.debug('Publishing vcpe.dns.replaced_unexpired_entries event: %s', msg)
-
-          if 'queries_forwarded' in v:
-               msg = {'event_type': 'vcpe.dns.queries_forwarded', 
-                      'message_id':six.text_type(uuid.uuid4()),
-                      'publisher_id': cpe_publisher_id,
-                      'timestamp':datetime.datetime.now().isoformat(),
-                      'priority':'INFO',
-                      'payload': {'vcpe_id':k, 
-                                  'user_id':keystone_user_id,
-                                  'tenant_id':keystone_tenant_id, 
-                                  'queries_forwarded':v['queries_forwarded'] 
-                                 }
-                     }
-               producer.publish(msg)
-               logger.debug('Publishing vcpe.dns.queries_forwarded event: %s', msg)
-
-          if 'queries_answered_locally' in v:
-               msg = {'event_type': 'vcpe.dns.queries_answered_locally', 
-                      'message_id':six.text_type(uuid.uuid4()),
-                      'publisher_id': cpe_publisher_id,
-                      'timestamp':datetime.datetime.now().isoformat(),
-                      'priority':'INFO',
-                      'payload': {'vcpe_id':k, 
-                                  'user_id':keystone_user_id,
-                                  'tenant_id':keystone_tenant_id, 
-                                  'queries_answered_locally':v['queries_answered_locally'] 
-                                 }
-                     }
-               producer.publish(msg)
-               logger.debug('Publishing vcpe.dns.queries_answered_locally event: %s', msg)
-
-          if 'server_stats' in v:
-               for server in v['server_stats']:
-                   msg = {'event_type': 'vcpe.dns.server.queries_sent', 
-                          'message_id':six.text_type(uuid.uuid4()),
-                          'publisher_id': cpe_publisher_id,
-                          'timestamp':datetime.datetime.now().isoformat(),
-                          'priority':'INFO',
-                          'payload': {'vcpe_id':k, 
-                                      'user_id':keystone_user_id,
-                                      'tenant_id':keystone_tenant_id, 
-                                      'upstream_server':server['id'],
-                                      'queries_sent':server['queries_sent'] 
-                                     }
-                         }
-                   producer.publish(msg)
-                   logger.debug('Publishing vcpe.dns.server.queries_sent event: %s', msg)
-
-                   msg = {'event_type': 'vcpe.dns.server.queries_failed', 
-                          'message_id':six.text_type(uuid.uuid4()),
-                          'publisher_id': cpe_publisher_id,
-                          'timestamp':datetime.datetime.now().isoformat(),
-                          'priority':'INFO',
-                          'payload': {'vcpe_id':k, 
-                                      'user_id':keystone_user_id,
-                                      'tenant_id':keystone_tenant_id, 
-                                      'upstream_server':server['id'],
-                                      'queries_failed':server['queries_failed'] 
-                                     }
-                         }
-                   producer.publish(msg)
-                   logger.debug('Publishing vcpe.dns.server.queries_failed event: %s', msg)
-
-def periodic_publish():
-     publish_cpe_stats()
-     #Publish every 5minutes
-     threading.Timer(300, periodic_publish).start()
-
-def main(argv):
-   global keystone_tenant_id, keystone_user_id, rabbit_user, rabbit_password, rabbit_host, vcpeservice_rabbit_exchange
-   try:
-      opts, args = getopt.getopt(argv,"",["keystone_tenant_id=","keystone_user_id=","rabbit_host=","rabbit_user=","rabbit_password=","vcpeservice_rabbit_exchange="])
-   except getopt.GetoptError:
-      print 'vcpe_stats_notifier.py keystone_tenant_id=<keystone_tenant_id> keystone_user_id=<keystone_user_id> rabbit_host=<IP addr> rabbit_user=<user> rabbit_password=<password> vcpeservice_rabbit_exchange=<exchange name>'
-      sys.exit(2)
-   for opt, arg in opts:
-      if opt in ("--keystone_tenant_id"):
-         keystone_tenant_id = arg
-      elif opt in ("--keystone_user_id"):
-         keystone_user_id = arg
-      elif opt in ("--rabbit_user"):
-         rabbit_user = arg
-      elif opt in ("--rabbit_password"):
-         rabbit_password = arg
-      elif opt in ("--rabbit_host"):
-         rabbit_host = arg
-      elif opt in ("--vcpeservice_rabbit_exchange"):
-         vcpeservice_rabbit_exchange = arg
-   logger.info("vcpe_stats_notifier args:keystone_tenant_id=%s keystone_user_id=%s rabbit_user=%s rabbit_host=%s vcpeservice_rabbit_exchange=%s",keystone_tenant_id,keystone_user_id,rabbit_user,rabbit_host,vcpeservice_rabbit_exchange)
-   setup_rabbit_mq_channel()
-   periodic_publish()
-
-if __name__ == "__main__":
-   main(sys.argv[1:])
diff --git a/xos/observers/vtn/model-deps b/xos/observers/vtn/model-deps
deleted file mode 100644
index 0967ef4..0000000
--- a/xos/observers/vtn/model-deps
+++ /dev/null
@@ -1 +0,0 @@
-{}
diff --git a/xos/observers/vtn/run.sh b/xos/observers/vtn/run.sh
deleted file mode 100755
index d7e5025..0000000
--- a/xos/observers/vtn/run.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-export XOS_DIR=/opt/xos
-python vtn-observer.py  -C $XOS_DIR/observers/vtn/vtn_observer_config
diff --git a/xos/observers/vtn/start.sh b/xos/observers/vtn/start.sh
deleted file mode 100755
index 168074d..0000000
--- a/xos/observers/vtn/start.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-export XOS_DIR=/opt/xos
-nohup python vtn-observer.py  -C $XOS_DIR/observers/vtn/vtn_observer_config > /dev/null 2>&1 &
diff --git a/xos/observers/vtn/steps/sync_tenant.py b/xos/observers/vtn/steps/sync_tenant.py
deleted file mode 100644
index 77e6a93..0000000
--- a/xos/observers/vtn/steps/sync_tenant.py
+++ /dev/null
@@ -1,95 +0,0 @@
-import os
-import requests
-import socket
-import sys
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from core.models import Service
-from core.models.service import COARSE_KIND
-from services.cord.models import Tenant
-from util.logger import Logger, logging
-from requests.auth import HTTPBasicAuth
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-logger = Logger(level=logging.INFO)
-
-# XXX should save and load this
-glo_saved_vtn_maps = []
-
-class SyncTenant(SyncStep):
-    provides=[Tenant]
-    observes=Tenant
-    requested_interval=0
-
-    def __init__(self, **args):
-        SyncStep.__init__(self, **args)
-
-    def get_vtn_onos_service(self):
-#        vtn_tenant = Tenant.objects.filter(name="VTN_ONOS_app")   # XXX fixme - hardcoded
-#        if not vtn_tenant:
-#            raise "No VTN Onos App found"
-#        vtn_tenant = vtn_tenant[0]
-#
-#        vtn_service = vtn_tenant.provider_service
-        vtn_service = Service.objects.filter(name="service_ONOS_VTN")  # XXX fixme - harcoded
-        if not vtn_service:
-            raise "No VTN Onos Service"
-
-        return vtn_service[0]
-
-    def get_vtn_addr(self):
-        vtn_service = self.get_vtn_onos_service()
-
-        if not vtn_service.slices.exists():
-            raise "VTN Service has no slices"
-
-        vtn_slice = vtn_service.slices.all()[0]
-
-        if not vtn_slice.instances.exists():
-            raise "VTN Slice has no instances"
-
-        vtn_instance = vtn_slice.instances.all()[0]
-
-        return vtn_instance.node.name
-
-    def call(self, **args):
-        global glo_saved_vtn_maps
-
-        logger.info("sync'ing vtn services")
-
-        vtn_maps = []
-        for service in Service.objects.all():
-           for id in service.get_vtn_src_ids():
-               dependencies = service.get_vtn_dependencies_ids()
-               if dependencies:
-                   for dependency in dependencies:
-                       vtn_maps.append( (id, dependency) )
-
-        for vtn_map in vtn_maps:
-            if not (vtn_map in glo_saved_vtn_maps):
-                # call vtn rest api to add map
-                url = "http://" + self.get_vtn_addr() + ":8181/onos/cordvtn/service-dependency/%s/%s" % (vtn_map[0], vtn_map[1])
-
-                print "POST %s" % url
-                r = requests.post(url, auth=HTTPBasicAuth('karaf', 'karaf') )    # XXX fixme - hardcoded auth
-                if (r.status_code != 200):
-                    raise Exception("Received error from vtn service (%d)" % r.status_code)
-
-        for vtn_map in glo_saved_vtn_maps:
-            if not vtn_map in vtn_maps:
-                # call vtn rest api to delete map
-                url = "http://" + self.get_vtn_addr() + ":8181/onos/cordvtn/service-dependency/%s/%s" % (vtn_map[0],vtn_map[1])
-
-                print "DELETE %s" % url
-                r = requests.delete(url, auth=HTTPBasicAuth('karaf', 'karaf') )    # XXX fixme - hardcoded auth
-                if (r.status_code != 200):
-                    raise Exception("Received error from vtn service (%d)" % r.status_code)
-
-        glo_saved_vtn_maps = vtn_maps
-        # TODO: save this
-
diff --git a/xos/observers/vtn/stop.sh b/xos/observers/vtn/stop.sh
deleted file mode 100755
index 261f029..0000000
--- a/xos/observers/vtn/stop.sh
+++ /dev/null
@@ -1 +0,0 @@
-pkill -9 -f vtn-observer.py
diff --git a/xos/observers/vtn/supervisor/vtn-observer.conf b/xos/observers/vtn/supervisor/vtn-observer.conf
deleted file mode 100644
index 714afa7..0000000
--- a/xos/observers/vtn/supervisor/vtn-observer.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-[program:vtn-observer]
-command=python /opt/xos/observers/vbng/vtn-observer.py -C /opt/xos/observers/vbng/vtn_observer_config
diff --git a/xos/observers/vtn/vtn-observer.py b/xos/observers/vtn/vtn-observer.py
deleted file mode 100755
index d6a71ff..0000000
--- a/xos/observers/vtn/vtn-observer.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-
-# This imports and runs ../../xos-observer.py
-
-import importlib
-import os
-import sys
-observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../..")
-sys.path.append(observer_path)
-mod = importlib.import_module("xos-observer")
-mod.main()
diff --git a/xos/observers/vtn/vtn_observer_config b/xos/observers/vtn/vtn_observer_config
deleted file mode 100644
index 19e9a39..0000000
--- a/xos/observers/vtn/vtn_observer_config
+++ /dev/null
@@ -1,38 +0,0 @@
-
-[plc]
-name=plc
-deployment=VICCI
-
-[db]
-name=xos
-user=postgres
-password=password
-host=localhost
-port=5432
-
-[api]
-host=128.112.171.237
-port=8000
-ssl_key=None
-ssl_cert=None
-ca_ssl_cert=None
-ratelimit_enabled=0
-omf_enabled=0
-mail_support_address=support@localhost
-nova_enabled=True
-
-[observer]
-name=vtn
-dependency_graph=/opt/xos/observers/vtn/model-deps
-steps_dir=/opt/xos/observers/vtn/steps
-sys_dir=/opt/xos/observers/vtn/sys
-deleters_dir=/opt/xos/observers/vtn/deleters
-log_file=console
-#/var/log/hpc.log
-driver=None
-pretend=False
-backoff_disabled=True
-
-[feefie]
-client_id='vicci_dev_central'
-user_id='pl'
diff --git a/xos/synchronizers/base/SyncInstanceUsingAnsible.py b/xos/synchronizers/base/SyncInstanceUsingAnsible.py
index 302c1cc..aafbd85 100644
--- a/xos/synchronizers/base/SyncInstanceUsingAnsible.py
+++ b/xos/synchronizers/base/SyncInstanceUsingAnsible.py
@@ -6,8 +6,8 @@
 import time
 from django.db.models import F, Q
 from xos.config import Config
-from synchronizer.base.syncstep import SyncStep
-from synchronizer.base.ansible import run_template_ssh
+from synchronizers.base.syncstep import SyncStep
+from synchronizers.base.ansible import run_template_ssh
 from core.models import Service, Slice, ControllerSlice, ControllerUser
 from util.logger import Logger, logging
 
diff --git a/xos/synchronizers/base/SyncSliverUsingAnsible.py b/xos/synchronizers/base/SyncSliverUsingAnsible.py
index a99eb1e..a76b300 100644
--- a/xos/synchronizers/base/SyncSliverUsingAnsible.py
+++ b/xos/synchronizers/base/SyncSliverUsingAnsible.py
@@ -6,8 +6,8 @@
 import time
 from django.db.models import F, Q
 from xos.config import Config
-from synchronizer.base.syncstep import SyncStep
-from synchronizer.base.ansible import run_template_ssh
+from synchronizers.base.syncstep import SyncStep
+from synchronizers.base.ansible import run_template_ssh
 from core.models import Service, Slice
 from util.logger import Logger, logging
 
diff --git a/xos/synchronizers/base/backend.py b/xos/synchronizers/base/backend.py
index ec22f79..107ba2c 100644
--- a/xos/synchronizers/base/backend.py
+++ b/xos/synchronizers/base/backend.py
@@ -5,7 +5,7 @@
 from synchronizers.base.event_loop import XOSObserver
 from synchronizers.base.event_manager import EventListener
 from util.logger import Logger, logging
-from synchronizers.model_policy import run_policy
+from model_policy import run_policy
 from xos.config import Config
 
 logger = Logger(level=logging.INFO)