Replicated openstack synchronizer into a base synchronizer, to be
skimmed and cleaned up later
diff --git a/xos/synchronizers/base/SyncInstanceUsingAnsible.py b/xos/synchronizers/base/SyncInstanceUsingAnsible.py
new file mode 100644
index 0000000..302c1cc
--- /dev/null
+++ b/xos/synchronizers/base/SyncInstanceUsingAnsible.py
@@ -0,0 +1,221 @@
+import hashlib
+import os
+import socket
+import sys
+import base64
+import time
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizer.base.syncstep import SyncStep
+from synchronizer.base.ansible import run_template_ssh
+from core.models import Service, Slice, ControllerSlice, ControllerUser
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class SyncInstanceUsingAnsible(SyncStep):
+    # All of the following should be defined for classes derived from this
+    # base class. Examples below use VCPETenant.
+
+    # provides=[VCPETenant]
+    # observes=VCPETenant
+    # requested_interval=0
+    # template_name = "sync_vcpetenant.yaml"
+    # service_key_name = "/opt/xos/observers/vcpe/vcpe_private_key"
+
+    def __init__(self, **args):
+        SyncStep.__init__(self, **args)
+
+    def defer_sync(self, o, reason):
+        logger.info("defer object %s due to %s" % (str(o), reason))
+        raise Exception("defer object %s due to %s" % (str(o), reason))
+
+    def get_extra_attributes(self, o):
+        # This is a place to include extra attributes that aren't part of the
+        # object itself.
+
+        return {}
+
+    def get_instance(self, o):
+        # We need to know what instance is associated with the object. Let's
+        # assume 'o' has a field called 'instance'. If the field is called
+        # something else, or if custom logic is needed, then override this
+        # method.
+
+        return o.instance
+
+    def run_playbook(self, o, fields, template_name=None):
+        if not template_name:
+            template_name = self.template_name
+        tStart = time.time()
+        run_template_ssh(template_name, fields)
+        logger.info("playbook execution time %d" % int(time.time()-tStart))
+
+    def pre_sync_hook(self, o, fields):
+        pass
+
+    def post_sync_hook(self, o, fields):
+        pass
+
+    def sync_fields(self, o, fields):
+        self.run_playbook(o, fields)
+
+    def prepare_record(self, o):
+        pass
+
+    def get_node(self,o):
+        return o.node
+
+    def get_node_key(self, node):
+        return "/root/setup/node_key"
+
+    def get_ansible_fields(self, instance):
+        # return all of the fields that tell Ansible how to talk to the context
+        # that's setting up the container.
+
+        if (instance.isolation == "vm"):
+            # legacy where container was configured by sync_vcpetenant.py
+
+            fields = { "instance_name": instance.name,
+                       "hostname": instance.node.name,
+                       "instance_id": instance.instance_id,
+                       "username": "ubuntu",
+                     }
+            key_name = self.service_key_name
+        elif (instance.isolation == "container"):
+            # container on bare metal
+            node = self.get_node(instance)
+            hostname = node.name
+            fields = { "hostname": hostname,
+                       "baremetal_ssh": True,
+                       "instance_name": "rootcontext",
+                       "username": "root",
+                       "container_name": "%s-%s" % (instance.slice.name, str(instance.id))
+                     }
+            key_name = self.get_node_key(node)
+        else:
+            # container in a VM
+            if not instance.parent:
+                raise Exception("Container-in-VM has no parent")
+            if not instance.parent.instance_id:
+                raise Exception("Container-in-VM parent is not yet instantiated")
+            if not instance.parent.slice.service:
+                raise Exception("Container-in-VM parent has no service")
+            if not instance.parent.slice.service.private_key_fn:
+                raise Exception("Container-in-VM parent service has no private_key_fn")
+            fields = { "hostname": instance.parent.node.name,
+                       "instance_name": instance.parent.name,
+                       "instance_id": instance.parent.instance_id,
+                       "username": "ubuntu",
+                       "nat_ip": instance.parent.get_ssh_ip(),
+                       "container_name": "%s-%s" % (instance.slice.name, str(instance.id))
+                         }
+            key_name = instance.parent.slice.service.private_key_fn
+
+        if not os.path.exists(key_name):
+            raise Exception("Node key %s does not exist" % key_name)
+
+        key = file(key_name).read()
+
+        fields["private_key"] = key
+
+        # now the ceilometer stuff
+
+        cslice = ControllerSlice.objects.get(slice=instance.slice)
+        if not cslice:
+            raise Exception("Controller slice object for %s does not exist" % instance.slice.name)
+
+        cuser = ControllerUser.objects.get(user=instance.creator)
+        if not cuser:
+            raise Exception("Controller user object for %s does not exist" % instance.creator)
+
+        fields.update({"keystone_tenant_id": cslice.tenant_id,
+                       "keystone_user_id": cuser.kuser_id,
+                       "rabbit_user": instance.controller.rabbit_user,
+                       "rabbit_password": instance.controller.rabbit_password,
+                       "rabbit_host": instance.controller.rabbit_host})
+
+        return fields
+
+    def sync_record(self, o):
+        logger.info("sync'ing object %s" % str(o))
+
+        self.prepare_record(o)
+
+        instance = self.get_instance(o)
+
+        if isinstance(instance, basestring):
+            # sync to some external host
+
+            # XXX - this probably needs more work...
+
+            fields = { "hostname": instance,
+                       "instance_id": "ubuntu",     # this is the username to log into
+                       "private_key": service.key,
+                     }
+        else:
+            # sync to an XOS instance
+            if not instance:
+                self.defer_sync(o, "waiting on instance")
+                return
+
+            if not instance.instance_name:
+                self.defer_sync(o, "waiting on instance.instance_name")
+                return
+
+            fields = self.get_ansible_fields(instance)
+
+            fields["ansible_tag"] =  o.__class__.__name__ + "_" + str(o.id)
+
+        # If 'o' defines a 'sync_attributes' list, then we'll copy those
+        # attributes into the Ansible recipe's field list automatically.
+        if hasattr(o, "sync_attributes"):
+            for attribute_name in o.sync_attributes:
+                fields[attribute_name] = getattr(o, attribute_name)
+
+        fields.update(self.get_extra_attributes(o))
+
+        self.sync_fields(o, fields)
+
+        o.save()
+
+    def delete_record(self, o):
+        try:
+            controller = o.get_controller()
+            controller_register = json.loads(o.node.site_deployment.controller.backend_register)
+
+            if (controller_register.get('disabled',False)):
+                raise InnocuousException('Controller %s is disabled'%o.node.site_deployment.controller.name)
+        except AttributeError:
+            pass
+
+        instance = self.get_instance(o)
+        if isinstance(instance, basestring):
+            # sync to some external host
+
+            # XXX - this probably needs more work...
+
+            fields = { "hostname": instance,
+                       "instance_id": "ubuntu",     # this is the username to log into
+                       "private_key": service.key,
+                     }
+        else:
+            # sync to an XOS instance
+            fields = self.get_ansible_fields(instance)
+
+            fields["ansible_tag"] =  o.__class__.__name__ + "_" + str(o.id)
+
+        # If 'o' defines a 'sync_attributes' list, then we'll copy those
+        # attributes into the Ansible recipe's field list automatically.
+        if hasattr(o, "sync_attributes"):
+            for attribute_name in o.sync_attributes:
+                fields[attribute_name] = getattr(o, attribute_name)
+
+        fields.update(self.map_delete_inputs(o))
+
+        fields['delete']=True
+        res = self.run_playbook(o,fields)
+        try:
+                self.map_delete_outputs(o,res)
+        except AttributeError:
+                pass
diff --git a/xos/synchronizers/base/SyncSliverUsingAnsible.py b/xos/synchronizers/base/SyncSliverUsingAnsible.py
new file mode 100644
index 0000000..a99eb1e
--- /dev/null
+++ b/xos/synchronizers/base/SyncSliverUsingAnsible.py
@@ -0,0 +1,95 @@
+import hashlib
+import os
+import socket
+import sys
+import base64
+import time
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizer.base.syncstep import SyncStep
+from synchronizer.base.ansible import run_template_ssh
+from core.models import Service, Slice
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class SyncInstanceUsingAnsible(SyncStep):
+    # All of the following should be defined for classes derived from this
+    # base class. Examples below use VCPETenant.
+
+    # provides=[VCPETenant]
+    # observes=VCPETenant
+    # requested_interval=0
+    # template_name = "sync_vcpetenant.yaml"
+    # service_key_name = "/opt/xos/observers/vcpe/vcpe_private_key"
+
+    def __init__(self, **args):
+        SyncStep.__init__(self, **args)
+
+    def defer_sync(self, o, reason):
+        logger.info("defer object %s due to %s" % (str(o), reason))
+        raise Exception("defer object %s due to %s" % (str(o), reason))
+
+    def get_extra_attributes(self, o):
+        # This is a place to include extra attributes that aren't part of the
+        # object itself.
+
+        return {}
+
+    def get_instance(self, o):
+        # We need to know what instance is associated with the object. Let's
+        # assume 'o' has a field called 'instance'. If the field is called
+        # something else, or if custom logic is needed, then override this
+        # method.
+
+        return o.instance
+
+    def run_playbook(self, o, fields):
+        tStart = time.time()
+        run_template_ssh(self.template_name, fields)
+        logger.info("playbook execution time %d" % int(time.time()-tStart))
+
+    def pre_sync_hook(self, o, fields):
+        pass
+
+    def post_sync_hook(self, o, fields):
+        pass
+
+    def sync_fields(self, o, fields):
+        self.run_playbook(o, fields)
+
+    def sync_record(self, o):
+        logger.info("sync'ing object %s" % str(o))
+
+        instance = self.get_instance(o)
+        if not instance:
+            self.defer_sync(o, "waiting on instance")
+            return
+
+        if not os.path.exists(self.service_key_name):
+            raise Exception("Service key %s does not exist" % self.service_key_name)
+
+        service_key = file(self.service_key_name).read()
+
+        fields = { "instance_name": instance.name,
+                   "hostname": instance.node.name,
+                   "instance_id": instance.instance_id,
+                   "private_key": service_key,
+                   "ansible_tag": "vcpe_tenant_" + str(o.id)
+                 }
+
+        # If 'o' defines a 'sync_attributes' list, then we'll copy those
+        # attributes into the Ansible recipe's field list automatically.
+        if hasattr(o, "sync_attributes"):
+            for attribute_name in o.sync_attributes:
+                fields[attribute_name] = getattr(o, attribute_name)
+
+        fields.update(self.get_extra_attributes(o))
+
+        self.sync_fields(o, fields)
+
+        o.save()
+
+    def delete_record(self, m):
+        pass
+
diff --git a/xos/synchronizers/base/__init__.py b/xos/synchronizers/base/__init__.py
new file mode 100644
index 0000000..e56cd39
--- /dev/null
+++ b/xos/synchronizers/base/__init__.py
@@ -0,0 +1,36 @@
+from xos.config import Config
+
+try:
+    observer_disabled = Config().observer_disabled
+except:
+    observer_disabled = False
+
+def EnableObserver(x):
+    """ used for manage.py --noobserver """
+    global observer_disabled
+    observer_disabled = not x
+
+print_once = True
+
+def notify_observer(model=None, delete=False, pk=None, model_dict={}):
+    if (observer_disabled):
+        global print_once
+        if (print_once):
+            print "The observer is disabled"
+            print_once = False
+        return
+
+    try:
+        from .event_manager import EventSender
+        if (model and delete):
+            if hasattr(model,"__name__"):
+                modelName = model.__name__
+            else:
+                modelName = model.__class__.__name__
+            EventSender().fire(delete_flag = delete, model = modelName, pk = pk, model_dict=model_dict)
+        else:
+            EventSender().fire()
+    except Exception,e:
+        print "Exception in Observer. This should not disrupt the front end. %s"%str(e)
+
+
diff --git a/xos/synchronizers/base/ansible.py b/xos/synchronizers/base/ansible.py
new file mode 100644
index 0000000..94b09bd
--- /dev/null
+++ b/xos/synchronizers/base/ansible.py
@@ -0,0 +1,211 @@
+#!/usr/bin/env python
+import jinja2
+import tempfile
+import os
+import json
+import pdb
+import string
+import random
+import re
+import traceback
+import subprocess
+from xos.config import Config, XOS_DIR
+from util.logger import observer_logger
+
+try:
+    step_dir = Config().observer_steps_dir
+    sys_dir = Config().observer_sys_dir
+except:
+    step_dir = XOS_DIR + '/observer/steps'
+    sys_dir = '/opt/opencloud'
+
+os_template_loader = jinja2.FileSystemLoader( searchpath=step_dir)
+os_template_env = jinja2.Environment(loader=os_template_loader)
+
+def parse_output(msg):
+    lines = msg.splitlines()
+    results = []
+
+    observer_logger.info(msg)
+
+    for l in lines:
+        magic_str = 'ok: [127.0.0.1] => '
+        magic_str2 = 'changed: [127.0.0.1] => '
+        if (l.startswith(magic_str)):
+            w = len(magic_str)
+            str = l[w:]
+            d = json.loads(str)
+            results.append(d)
+        elif (l.startswith(magic_str2)):
+            w = len(magic_str2)
+            str = l[w:]
+            d = json.loads(str)
+            results.append(d)
+
+
+    return results
+
+def parse_unreachable(msg):
+    total_unreachable=0
+    total_failed=0
+    for l in msg.splitlines():
+        x = re.findall('ok=([0-9]+).*changed=([0-9]+).*unreachable=([0-9]+).*failed=([0-9]+)', l)
+        if x:
+            (ok, changed, unreachable, failed) = x[0]
+            ok=int(ok)
+            changed=int(changed)
+            unreachable=int(unreachable)
+            failed=int(failed)
+
+            total_unreachable += unreachable
+            total_failed += failed
+    return {'unreachable':total_unreachable,'failed':total_failed}
+	
+
+def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
+    return ''.join(random.choice(chars) for _ in range(size))
+
+def shellquote(s):
+    return "'" + s.replace("'", "'\\''") + "'"
+
+def get_playbook_fn(opts, path):
+    if not opts.get("ansible_tag", None):
+        # if no ansible_tag is in the options, then generate a unique one
+        objname= id_generator()
+        opts = opts.copy()
+        opts["ansible_tag"] = objname
+
+    objname = opts["ansible_tag"]
+
+    os.system('mkdir -p %s' % os.path.join(sys_dir, path))
+    return (opts, os.path.join(sys_dir,path,objname))
+
+def run_template(name, opts, path='', expected_num=None, ansible_config=None, ansible_hosts=None, run_ansible_script=None):
+    template = os_template_env.get_template(name)
+    buffer = template.render(opts)
+
+    (opts, fqp) = get_playbook_fn(opts, path)
+
+    f = open(fqp,'w')
+    f.write(buffer)
+    f.flush()
+
+    # This is messy -- there's no way to specify ansible config file from
+    # the command line, but we can specify it using the environment.
+    env = os.environ.copy()
+    if ansible_config:
+       env["ANSIBLE_CONFIG"] = ansible_config
+    if ansible_hosts:
+       env["ANSIBLE_HOSTS"] = ansible_hosts
+
+    if (not Config().observer_pretend):
+        if not run_ansible_script:
+            run_ansible_script = os.path.join(XOS_DIR, "observer/run_ansible")
+
+        process = subprocess.Popen("%s %s" % (run_ansible_script, shellquote(fqp)), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
+        msg = process.stdout.read()
+        err_msg = process.stderr.read()
+
+        if getattr(Config(), "observer_save_ansible_output", False):
+            try:
+                open(fqp+".out","w").write(msg)
+                open(fqp+".err","w").write(err_msg)
+            except:
+                # fail silently
+                pass
+
+    else:
+        msg = open(fqp+'.out').read()
+
+    try:
+        ok_results = parse_output(msg)
+        if (expected_num is not None) and (len(ok_results) != expected_num):
+            raise ValueError('Unexpected num %s!=%d' % (str(expected_num), len(ok_results)) )
+
+        parsed = parse_unreachable(msg)
+        total_unreachable = parsed['unreachable']
+	failed = parsed['failed']
+	if (failed):
+		raise ValueError('Ansible playbook failed.')
+
+        if (total_unreachable > 0):
+            raise ValueError("Unreachable results in ansible recipe")
+    except ValueError,e:
+        all_fatal = [e.message] + re.findall(r'^msg: (.*)',msg,re.MULTILINE)
+        all_fatal2 = re.findall(r'^ERROR: (.*)',msg,re.MULTILINE)
+
+        all_fatal.extend(all_fatal2)
+        try:
+            error = ' // '.join(all_fatal)
+        except:
+            pass
+        raise Exception(error)
+
+    return ok_results
+
+def run_template_ssh(name, opts, path='', expected_num=None):
+    instance_name = opts["instance_name"]
+    hostname = opts["hostname"]
+    private_key = opts["private_key"]
+    baremetal_ssh = opts.get("baremetal_ssh",False)
+    if baremetal_ssh:
+        # no instance_id or nat_ip for baremetal
+        # we never proxy to baremetal
+        proxy_ssh = False
+    else:
+        instance_id = opts["instance_id"]
+        nat_ip = opts["nat_ip"]
+        try:
+            proxy_ssh = Config().observer_proxy_ssh
+        except:
+            proxy_ssh = True
+
+    (opts, fqp) = get_playbook_fn(opts, path)
+    private_key_pathname = fqp + ".key"
+    config_pathname = fqp + ".config"
+    hosts_pathname = fqp + ".hosts"
+
+    f = open(private_key_pathname, "w")
+    f.write(private_key)
+    f.close()
+
+    f = open(config_pathname, "w")
+    f.write("[ssh_connection]\n")
+    if proxy_ssh:
+        proxy_command = "ProxyCommand ssh -q -i %s -o StrictHostKeyChecking=no %s@%s" % (private_key_pathname, instance_id, hostname)
+        f.write('ssh_args = -o "%s"\n' % proxy_command)
+    f.write('scp_if_ssh = True\n')
+    f.write('pipelining = True\n')
+    f.write('\n[defaults]\n')
+    f.write('host_key_checking = False\n')
+    f.close()
+
+    f = open(hosts_pathname, "w")
+    f.write("[%s]\n" % instance_name)
+    if proxy_ssh or baremetal_ssh:
+        f.write("%s ansible_ssh_private_key_file=%s\n" % (hostname, private_key_pathname))
+    else:
+        # acb: Login user is hardcoded, this is not great
+        f.write("%s ansible_ssh_private_key_file=%s ansible_ssh_user=ubuntu\n" % (nat_ip, private_key_pathname))
+    f.close()
+
+    # SSH will complain if private key is world or group readable
+    os.chmod(private_key_pathname, 0600)
+
+    print "ANSIBLE_CONFIG=%s" % config_pathname
+    print "ANSIBLE_HOSTS=%s" % hosts_pathname
+
+    return run_template(name, opts, path, ansible_config = config_pathname, ansible_hosts = hosts_pathname, run_ansible_script="/opt/xos/observer/run_ansible_verbose")
+
+
+
+def main():
+    run_template('ansible/sync_user_deployments.yaml',{ "endpoint" : "http://172.31.38.128:5000/v2.0/",
+             "name" : "Sapan Bhatia",
+             "email": "gwsapan@gmail.com",
+             "password": "foobar",
+             "admin_user":"admin",
+             "admin_password":"6a789bf69dd647e2",
+             "admin_tenant":"admin",
+             "tenant":"demo",
+             "roles":['user','admin'] })
diff --git a/xos/synchronizers/base/backend.py b/xos/synchronizers/base/backend.py
new file mode 100644
index 0000000..107ba2c
--- /dev/null
+++ b/xos/synchronizers/base/backend.py
@@ -0,0 +1,46 @@
+import os
+import sys
+import threading
+import time
+from synchronizers.base.event_loop import XOSObserver
+from synchronizers.base.event_manager import EventListener
+from util.logger import Logger, logging
+from model_policy import run_policy
+from xos.config import Config
+
+logger = Logger(level=logging.INFO)
+
+class Backend:
+
+    def run(self):
+        # start the openstack observer
+        observer = XOSObserver()
+        observer_thread = threading.Thread(target=observer.run)
+        observer_thread.start()
+
+        # start model policies thread
+        observer_name = getattr(Config(), "observer_name", "")
+        if (not observer_name):
+            model_policy_thread = threading.Thread(target=run_policy)
+            model_policy_thread.start()
+        else:
+            model_policy_thread = None
+            print "Skipping model policies thread for service observer."
+
+
+        # start event listene
+        #event_manager = EventListener(wake_up=observer.wake_up)
+        #event_manager_thread = threading.Thread(target=event_manager.run)
+        #event_manager_thread.start()
+
+        while True:
+            try:
+                time.sleep(1000)
+            except KeyboardInterrupt:
+                print "exiting due to keyboard interrupt"
+                # TODO: See about setting the threads as daemons
+                observer_thread._Thread__stop()
+                if model_policy_thread:
+                    model_policy_thread._Thread__stop()
+                sys.exit(1)
+
diff --git a/xos/synchronizers/base/backend.py.bak b/xos/synchronizers/base/backend.py.bak
new file mode 100644
index 0000000..6e46b85
--- /dev/null
+++ b/xos/synchronizers/base/backend.py.bak
@@ -0,0 +1,34 @@
+import threading
+import time
+from observer.event_loop import XOSObserver
+from observer.event_manager import EventListener
+from util.logger import Logger, logging
+from model_policy import run_policy
+from xos.config import Config
+
+logger = Logger(level=logging.INFO)
+
+class Backend:
+    
+    def run(self):
+            # start the openstack observer
+            observer = XOSObserver()
+            observer_thread = threading.Thread(target=observer.run)
+            observer_thread.start()
+            
+            # start model policies thread
+            observer_name = getattr(Config(), "observer_name", "")
+     	    if (not observer_name):	
+             	model_policy_thread = threading.Thread(target=run_policy)
+             	model_policy_thread.start()
+     	    else:
+         		print "Skipping model policies thread for service observer."
+
+            model_policy_thread = threading.Thread(target=run_policy)
+            model_policy_thread.start()
+
+            # start event listene
+            event_manager = EventListener(wake_up=observer.wake_up)
+            event_manager_thread = threading.Thread(target=event_manager.run)
+            event_manager_thread.start()
+
diff --git a/xos/synchronizers/base/controller.diff b/xos/synchronizers/base/controller.diff
new file mode 100644
index 0000000..865b83e
--- /dev/null
+++ b/xos/synchronizers/base/controller.diff
@@ -0,0 +1,37 @@
+diff -up xos/model_policies/model_policy_Controller.py.orig xos/model_policies/model_policy_Controller.py
+--- xos/model_policies/model_policy_Controller.py.orig	2015-01-19 20:09:13.000000000 +0000
++++ xos/model_policies/model_policy_Controller.py	2015-04-07 21:48:51.462215000 +0000
+@@ -1,6 +1,6 @@
+ 
+ def handle(controller):
+-    from core.models import Controller, Site, ControllerSite, Slice, ControllerSlice, User, ControllerUser
++    from core.models import Controller, Site, ControllerSite, Slice, ControllerSlice, User, ControllerUser, ControllerImages, ControllerNetwork
+     from collections import defaultdict
+ 
+     # relations for all sites
+@@ -36,3 +36,25 @@ def handle(controller):
+             controller not in ctrls_by_user[user]:
+             controller_user = ControllerUser(controller=controller, user=user)
+             controller_user.save()
++    # relations for all networks
++    ctrls_by_network = defaultdict(list)
++    ctrl_networks = ControllerNetwork.objects.all()
++    for ctrl_network in ctrl_networks:
++        ctrls_by_network[ctrl_network.network].append(ctrl_network.controller)
++    networks = Network.objects.all()
++    for network in networks:
++        if network not in ctrls_by_network or \
++            controller not in ctrls_by_network[network]:
++            controller_network = ControllerNetwork(controller=controller, network=network)
++            controller_network.save()
++    # relations for all images
++    ctrls_by_image = defaultdict(list)
++    ctrl_images = ControllerImages.objects.all()
++    for ctrl_image in ctrl_images:
++        ctrls_by_image[ctrl_image.image].append(ctrl_image.controller)
++    images = Image.objects.all()
++    for image in images:
++        if image not in ctrls_by_image or \
++            controller not in ctrls_by_image[image]:
++            controller_image = ControllerImages(controller=controller, image=image)
++            controller_image.save()
diff --git a/xos/synchronizers/base/deleter.py b/xos/synchronizers/base/deleter.py
new file mode 100644
index 0000000..93fa572
--- /dev/null
+++ b/xos/synchronizers/base/deleter.py
@@ -0,0 +1,16 @@
+import os
+import base64
+from xos.config import Config
+
+class Deleter:
+	model=None # Must be overridden
+
+        def __init__(self, *args, **kwargs):
+                pass
+
+	def call(self, pk, model_dict):
+		# Fetch object from XOS db and delete it
+		pass
+
+	def __call__(self, *args, **kwargs):
+		return self.call(*args, **kwargs)
diff --git a/xos/synchronizers/base/error_mapper.py b/xos/synchronizers/base/error_mapper.py
new file mode 100644
index 0000000..3039a56
--- /dev/null
+++ b/xos/synchronizers/base/error_mapper.py
@@ -0,0 +1,25 @@
+from xos.config import Config
+from util.logger import Logger, logging, logger
+
+class ErrorMapper:
+	def __init__(self, error_map_file):
+		self.error_map = {}
+		try:
+			error_map_lines = open(error_map_file).read().splitlines()
+			for l in error_map_lines:
+				if (not l.startswith('#')):
+					splits = l.split('->')
+					k,v = map(lambda i:i.rstrip(),splits)
+					self.error_map[k]=v
+		except:
+			logging.info('Could not read error map')
+
+
+	def map(self, error):
+		return self.error_map[error]
+
+
+
+
+
+
diff --git a/xos/synchronizers/base/event_loop.py b/xos/synchronizers/base/event_loop.py
new file mode 100644
index 0000000..85c7257
--- /dev/null
+++ b/xos/synchronizers/base/event_loop.py
@@ -0,0 +1,527 @@
+import os
+import imp
+import inspect
+import time
+import sys
+import traceback
+import commands
+import threading
+import json
+import pdb
+import pprint
+
+
+from datetime import datetime
+from collections import defaultdict
+from core.models import *
+from django.db.models import F, Q
+from django.db import connection
+from django.db import reset_queries
+#from openstack.manager import OpenStackManager
+from openstack.driver import OpenStackDriver
+from util.logger import Logger, logging, logger
+#from timeout import timeout
+from xos.config import Config, XOS_DIR
+from observer.steps import *
+from syncstep import SyncStep
+from toposort import toposort
+from observer.error_mapper import *
+from openstack_observer.openstacksyncstep import OpenStackSyncStep
+from synchronizers.base.steps.sync_object import SyncObject
+
+# Load app models
+
+try:
+    app_module_names = Config().observer_applist.split(',')
+except AttributeError:
+    app_module_names = []
+
+if (type(app_module_names)!=list):
+    app_module_names=[app_module_names]
+
+app_modules = []
+
+for m in app_module_names:
+    model_path = m+'.models'
+    module = __import__(model_path,fromlist=[m])
+    app_modules.append(module)
+
+
+debug_mode = False
+
+class bcolors:
+    HEADER = '\033[95m'
+    OKBLUE = '\033[94m'
+    OKGREEN = '\033[92m'
+    WARNING = '\033[93m'
+    FAIL = '\033[91m'
+    ENDC = '\033[0m'
+    BOLD = '\033[1m'
+    UNDERLINE = '\033[4m'
+
+logger = Logger(level=logging.INFO)
+
+class StepNotReady(Exception):
+	pass
+
+class NoOpDriver:
+	def __init__(self):
+		 self.enabled = True
+		 self.dependency_graph = None
+
+STEP_STATUS_WORKING=1
+STEP_STATUS_OK=2
+STEP_STATUS_KO=3
+
+def invert_graph(g):
+	ig = {}
+	for k,v in g.items():
+		for v0 in v:
+			try:
+				ig[v0].append(k)
+			except:
+				ig[v0]=[k]
+	return ig
+
+class XOSObserver:
+	sync_steps = []
+
+
+	def __init__(self):
+		# The Condition object that gets signalled by Feefie events
+		self.step_lookup = {}
+		self.load_sync_step_modules()
+		self.load_sync_steps()
+		self.event_cond = threading.Condition()
+
+		self.driver_kind = getattr(Config(), "observer_driver", "openstack")
+		self.observer_name = getattr(Config(), "observer_name", "")
+		if self.driver_kind=="openstack":
+			self.driver = OpenStackDriver()
+		else:
+			self.driver = NoOpDriver()
+
+        def consolePrint(self, what):
+            if getattr(Config(), "observer_console_print", True):
+                print what
+
+	def wait_for_event(self, timeout):
+		self.event_cond.acquire()
+		self.event_cond.wait(timeout)
+		self.event_cond.release()
+
+	def wake_up(self):
+		logger.info('Wake up routine called. Event cond %r'%self.event_cond)
+		self.event_cond.acquire()
+		self.event_cond.notify()
+		self.event_cond.release()
+
+	def load_sync_step_modules(self, step_dir=None):
+		if step_dir is None:
+			if hasattr(Config(), "observer_steps_dir"):
+				step_dir = Config().observer_steps_dir
+			else:
+				step_dir = XOS_DIR + "/observer/steps"
+
+		for fn in os.listdir(step_dir):
+			pathname = os.path.join(step_dir,fn)
+			if os.path.isfile(pathname) and fn.endswith(".py") and (fn!="__init__.py"):
+				module = imp.load_source(fn[:-3],pathname)
+				for classname in dir(module):
+					c = getattr(module, classname, None)
+
+					# make sure 'c' is a descendent of SyncStep and has a
+					# provides field (this eliminates the abstract base classes
+					# since they don't have a provides)
+
+					if inspect.isclass(c) and (issubclass(c, SyncStep) or issubclass(c,OpenStackSyncStep)) and hasattr(c,"provides") and (c not in self.sync_steps):
+						self.sync_steps.append(c)
+		logger.info('loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps]))
+
+	def load_sync_steps(self):
+		dep_path = Config().observer_dependency_graph
+		logger.info('Loading model dependency graph from %s' % dep_path)
+		try:
+			# This contains dependencies between records, not sync steps
+			self.model_dependency_graph = json.loads(open(dep_path).read())
+			for left,lst in self.model_dependency_graph.items():
+                                new_lst = [] 
+				for k in lst:
+					try:
+                                                tup = (k,k.lower())
+                                                new_lst.append(tup)
+						deps = self.model_dependency_graph[k]
+					except:
+						self.model_dependency_graph[k] = []
+
+                                self.model_dependency_graph[left] = new_lst
+		except Exception,e:
+			raise e
+
+		try:
+			backend_path = Config().observer_pl_dependency_graph
+			logger.info('Loading backend dependency graph from %s' % backend_path)
+			# This contains dependencies between backend records
+			self.backend_dependency_graph = json.loads(open(backend_path).read())
+			for k,v in self.backend_dependency_graph.items():
+				try:
+					self.model_dependency_graph[k].extend(v)
+				except KeyError:
+					self.model_dependency_graphp[k] = v
+
+		except Exception,e:
+			logger.info('Backend dependency graph not loaded')
+			# We can work without a backend graph
+			self.backend_dependency_graph = {}
+
+		provides_dict = {}
+		for s in self.sync_steps:
+			self.step_lookup[s.__name__] = s 
+			for m in s.provides:
+				try:
+					provides_dict[m.__name__].append(s.__name__)
+				except KeyError:
+					provides_dict[m.__name__]=[s.__name__]
+
+		step_graph = {}
+                phantom_steps = []
+		for k,v in self.model_dependency_graph.items():
+			try:
+				for source in provides_dict[k]:
+					if (not v):
+						step_graph[source] = []
+		
+					for m,_ in v:
+						try:
+							for dest in provides_dict[m]:
+								# no deps, pass
+								try:
+									if (dest not in step_graph[source]):
+										step_graph[source].append(dest)
+								except:
+									step_graph[source]=[dest]
+						except KeyError:
+							if (not provides_dict.has_key(m)):
+                                                                try:
+								    step_graph[source]+=['#%s'%m]
+                                                                except:
+                                                                    step_graph[source]=['#%s'%m]
+
+                                                                phantom_steps+=['#%s'%m]
+							pass
+					
+			except KeyError:
+				pass
+				# no dependencies, pass
+		
+
+		self.dependency_graph = step_graph
+		self.deletion_dependency_graph = invert_graph(step_graph)
+
+		pp = pprint.PrettyPrinter(indent=4)
+                logger.info(pp.pformat(step_graph))
+		self.ordered_steps = toposort(self.dependency_graph, phantom_steps+map(lambda s:s.__name__,self.sync_steps))
+		self.ordered_steps = [i for i in self.ordered_steps if i!='SyncObject']
+
+		logger.info("Order of steps=%s" % self.ordered_steps)
+
+		self.load_run_times()
+		
+
+	def check_duration(self, step, duration):
+		try:
+			if (duration > step.deadline):
+				logger.info('Sync step %s missed deadline, took %.2f seconds'%(step.name,duration))
+		except AttributeError:
+			# S doesn't have a deadline
+			pass
+
+	def update_run_time(self, step, deletion):
+		if (not deletion):
+			self.last_run_times[step.__name__]=time.time()
+		else:
+			self.last_deletion_run_times[step.__name__]=time.time()
+
+
+	def check_schedule(self, step, deletion):
+		last_run_times = self.last_run_times if not deletion else self.last_deletion_run_times
+
+		time_since_last_run = time.time() - last_run_times.get(step.__name__, 0)
+		try:
+			if (time_since_last_run < step.requested_interval):
+				raise StepNotReady
+		except AttributeError:
+			logger.info('Step %s does not have requested_interval set'%step.__name__)
+			raise StepNotReady
+	
+	def load_run_times(self):
+		try:
+			jrun_times = open('/tmp/%sobserver_run_times'%self.observer_name).read()
+			self.last_run_times = json.loads(jrun_times)
+		except:
+			self.last_run_times={}
+			for e in self.ordered_steps:
+				self.last_run_times[e]=0
+		try:
+			jrun_times = open('/tmp/%sobserver_deletion_run_times'%self.observer_name).read()
+			self.last_deletion_run_times = json.loads(jrun_times)
+		except:
+			self.last_deletion_run_times={}
+			for e in self.ordered_steps:
+				self.last_deletion_run_times[e]=0
+
+        def lookup_step_class(self,s):
+		if ('#' in s):
+			return SyncObject
+		else:
+			step = self.step_lookup[s]
+		return step
+
+	def lookup_step(self,s):
+		if ('#' in s):
+			objname = s[1:]
+			so = SyncObject()
+			
+                        try:
+			    obj = globals()[objname]
+                        except:
+                            for m in app_modules:
+                                if (hasattr(m,objname)):
+                                    obj = getattr(m,objname)
+
+			so.provides=[obj]
+			so.observes=[obj]
+			step = so
+		else:
+			step_class = self.step_lookup[s]
+                        step = step_class(driver=self.driver,error_map=self.error_mapper)
+		return step
+			
+	def save_run_times(self):
+		run_times = json.dumps(self.last_run_times)
+		open('/tmp/%sobserver_run_times'%self.observer_name,'w').write(run_times)
+
+		deletion_run_times = json.dumps(self.last_deletion_run_times)
+		open('/tmp/%sobserver_deletion_run_times'%self.observer_name,'w').write(deletion_run_times)
+
+	def check_class_dependency(self, step, failed_steps):
+		step.dependenices = []
+		for obj in step.provides:
+		        lst = self.model_dependency_graph.get(obj.__name__, [])
+			nlst = map(lambda(a,b):b,lst)
+			step.dependenices.extend(nlst)
+		for failed_step in failed_steps:
+			if (failed_step in step.dependencies):
+				raise StepNotReady
+
+	def sync(self, S, deletion):
+            try:
+		step = self.lookup_step_class(S)
+		start_time=time.time()
+
+                logger.info("Starting to work on step %s, deletion=%s" % (step.__name__, str(deletion)))
+		
+		dependency_graph = self.dependency_graph if not deletion else self.deletion_dependency_graph
+                step_conditions = self.step_conditions# if not deletion else self.deletion_step_conditions
+                step_status = self.step_status# if not deletion else self.deletion_step_status
+
+		# Wait for step dependencies to be met
+		try:
+			deps = dependency_graph[S]
+			has_deps = True
+		except KeyError:
+			has_deps = False
+
+		go = True
+
+                failed_dep = None
+		if (has_deps):
+			for d in deps:
+                                if d==step.__name__:
+                                    logger.info("   step %s self-wait skipped" % step.__name__)
+				    go = True
+                                    continue
+
+				cond = step_conditions[d]
+				cond.acquire()
+				if (step_status[d] is STEP_STATUS_WORKING):
+                                        logger.info("  step %s wait on dep %s" % (step.__name__, d))
+					cond.wait()
+				elif step_status[d] == STEP_STATUS_OK:
+					go = True
+				else:
+					go = False
+                        		failed_dep = d
+				cond.release()
+				if (not go):
+					break
+		else:
+			go = True
+
+		if (not go):
+                        self.consolePrint(bcolors.FAIL + "Step %r skipped on %r" % (step,failed_dep) + bcolors.ENDC)
+                        # SMBAKER: sync_step was not defined here, so I changed
+                        #    this from 'sync_step' to 'step'. Verify.
+			self.failed_steps.append(step)
+			my_status = STEP_STATUS_KO
+		else:
+			sync_step = self.lookup_step(S)
+			sync_step. __name__= step.__name__
+			sync_step.dependencies = []
+			try:
+				mlist = sync_step.provides
+
+                                try:
+                                    for m in mlist:
+                                            lst =  self.model_dependency_graph[m.__name__]
+                                            nlst = map(lambda(a,b):b,lst)
+                                            sync_step.dependencies.extend(nlst)
+                                except Exception,e:
+                                    raise e
+
+			except KeyError:
+				pass
+			sync_step.debug_mode = debug_mode
+
+			should_run = False
+			try:
+				# Various checks that decide whether
+				# this step runs or not
+				self.check_class_dependency(sync_step, self.failed_steps) # dont run Slices if Sites failed
+				self.check_schedule(sync_step, deletion) # dont run sync_network_routes if time since last run < 1 hour
+				should_run = True
+			except StepNotReady:
+				logger.info('Step not ready: %s'%sync_step.__name__)
+				self.failed_steps.append(sync_step)
+				my_status = STEP_STATUS_KO
+			except Exception,e:
+				logger.error('%r' % e)
+				logger.log_exc("sync step failed: %r. Deletion: %r"%(sync_step,deletion))
+				self.failed_steps.append(sync_step)
+				my_status = STEP_STATUS_KO
+
+			if (should_run):
+				try:
+					duration=time.time() - start_time
+
+					logger.info('Executing step %s, deletion=%s' % (sync_step.__name__, deletion))
+
+					self.consolePrint(bcolors.OKBLUE + "Executing step %s" % sync_step.__name__ + bcolors.ENDC)
+					failed_objects = sync_step(failed=list(self.failed_step_objects), deletion=deletion)
+
+					self.check_duration(sync_step, duration)
+
+					if failed_objects:
+						self.failed_step_objects.update(failed_objects)
+
+                                        logger.info("Step %r succeeded" % sync_step.__name__)
+                                        self.consolePrint(bcolors.OKGREEN + "Step %r succeeded" % sync_step.__name__ + bcolors.ENDC)
+					my_status = STEP_STATUS_OK
+					self.update_run_time(sync_step,deletion)
+				except Exception,e:
+                        		self.consolePrint(bcolors.FAIL + "Model step %r failed" % (sync_step.__name__) + bcolors.ENDC)
+					logger.error('Model step %r failed. This seems like a misconfiguration or bug: %r. This error will not be relayed to the user!' % (sync_step.__name__, e))
+					logger.log_exc(e)
+					self.failed_steps.append(S)
+					my_status = STEP_STATUS_KO
+			else:
+                                logger.info("Step %r succeeded due to non-run" % step)
+				my_status = STEP_STATUS_OK
+
+		try:
+			my_cond = step_conditions[S]
+			my_cond.acquire()
+			step_status[S]=my_status
+			my_cond.notify_all()
+			my_cond.release()
+		except KeyError,e:
+			logger.info('Step %r is a leaf' % step)
+			pass
+            finally:
+                try:
+                    reset_queries()
+                except:
+                    # this shouldn't happen, but in case it does, catch it...
+                    logger.log_exc("exception in reset_queries")
+
+                connection.close()
+
+	def run(self):
+		if not self.driver.enabled:
+			return
+
+		if (self.driver_kind=="openstack") and (not self.driver.has_openstack):
+			return
+
+		while True:
+                    logger.info('Waiting for event')
+                    self.wait_for_event(timeout=5)
+                    logger.info('Observer woke up')
+
+                    self.run_once()
+
+        def run_once(self):
+                try:
+                        loop_start = time.time()
+                        error_map_file = getattr(Config(), "error_map_path", XOS_DIR + "/error_map.txt")
+                        self.error_mapper = ErrorMapper(error_map_file)
+
+                        # Two passes. One for sync, the other for deletion.
+                        for deletion in [False,True]:
+                                # Set of individual objects within steps that failed
+                                self.failed_step_objects = set()
+
+                                # Set up conditions and step status
+                                # This is needed for steps to run in parallel
+                                # while obeying dependencies.
+
+                                providers = set()
+                                dependency_graph = self.dependency_graph if not deletion else self.deletion_dependency_graph
+
+                                for v in dependency_graph.values():
+                                        if (v):
+                                                providers.update(v)
+
+                                self.step_conditions = {}
+                                self.step_status = {}
+
+                                for p in list(providers):
+                                        self.step_conditions[p] = threading.Condition()
+
+                                        self.step_status[p] = STEP_STATUS_WORKING
+
+                                self.failed_steps = []
+
+                                threads = []
+                                logger.info('Deletion=%r...'%deletion)
+                                schedule = self.ordered_steps if not deletion else reversed(self.ordered_steps)
+
+                                for S in schedule:
+                                        thread = threading.Thread(target=self.sync, args=(S, deletion))
+
+                                        logger.info('Deletion=%r...'%deletion)
+                                        threads.append(thread)
+
+                                # Start threads
+                                for t in threads:
+                                        t.start()
+
+                                # another spot to clean up debug state
+                                try:
+                                    reset_queries()
+                                except:
+                                    # this shouldn't happen, but in case it does, catch it...
+                                    logger.log_exc("exception in reset_queries")
+
+                                # Wait for all threads to finish before continuing with the run loop
+                                for t in threads:
+                                        t.join()
+
+                        self.save_run_times()
+
+                        loop_end = time.time()
+                        open('/tmp/%sobserver_last_run'%self.observer_name,'w').write(json.dumps({'last_run': loop_end, 'last_duration':loop_end - loop_start}))
+                except Exception, e:
+                        logger.error('Core error. This seems like a misconfiguration or bug: %r. This error will not be relayed to the user!' % e)
+                        logger.log_exc("Exception in observer run loop")
+                        traceback.print_exc()
diff --git a/xos/synchronizers/base/event_manager.py b/xos/synchronizers/base/event_manager.py
new file mode 100644
index 0000000..fce2b68
--- /dev/null
+++ b/xos/synchronizers/base/event_manager.py
@@ -0,0 +1,120 @@
+import threading
+import requests, json
+
+from xos.config import Config, XOS_DIR
+
+import uuid
+import os
+import imp
+import inspect
+import base64
+import json
+import traceback
+
+if getattr(Config(),"observer_fofum_disabled", False) != True:
+    from fofum import Fofum
+    fofum_enabled = True
+else:
+    fofum_enabled = False
+
+random_client_id=None
+def get_random_client_id():
+    global random_client_id
+
+    if (random_client_id is None) and os.path.exists(XOS_DIR + "/random_client_id"):
+        # try to use the last one we used, if we saved it
+        try:
+            random_client_id = open(XOS_DIR+"/random_client_id","r").readline().strip()
+            print "get_random_client_id: loaded %s" % random_client_id
+        except:
+            print "get_random_client_id: failed to read " + XOS_DIR + "/random_client_id"
+
+    if random_client_id is None:
+        random_client_id = base64.urlsafe_b64encode(os.urandom(12))
+        print "get_random_client_id: generated new id %s" % random_client_id
+
+        # try to save it for later (XXX: could race with another client here)
+        try:
+            open(XOS_DIR + "/random_client_id","w").write("%s\n" % random_client_id)
+        except:
+            print "get_random_client_id: failed to write " + XOS_DIR + "/random_client_id"
+
+    return random_client_id
+
+# decorator that marks dispatachable event methods
+def event(func):
+    setattr(func, 'event', func.__name__)
+    return func
+
+class EventHandler:
+    # This code is currently not in use.
+    def __init__(self):
+        pass
+
+    @staticmethod
+    def get_events():
+        events = []
+        for name in dir(EventHandler):
+            attribute = getattr(EventHandler, name)
+            if hasattr(attribute, 'event'):
+                events.append(getattr(attribute, 'event'))
+        return events
+
+    def dispatch(self, event, *args, **kwds):
+        if hasattr(self, event):
+            return getattr(self, event)(*args, **kwds)
+            
+
+class EventSender:
+    def __init__(self,user=None,clientid=None):
+        try:
+            user = Config().feefie_client_user
+        except:
+            user = 'pl'
+
+        try:
+            clid = Config().feefie_client_id
+        except:
+            clid = get_random_client_id()
+            print "EventSender: no feefie_client_id configured. Using random id %s" % clid
+
+        if fofum_enabled:
+            self.fofum = Fofum(user=user)
+            self.fofum.make(clid)
+
+    def fire(self,**kwargs):
+        kwargs["uuid"] = str(uuid.uuid1())
+        if fofum_enabled:
+            self.fofum.fire(json.dumps(kwargs))
+
+class EventListener:
+    def __init__(self,wake_up=None):
+        self.handler = EventHandler()
+        self.wake_up = wake_up
+
+    def handle_event(self, payload):
+        payload_dict = json.loads(payload)
+
+        if (self.wake_up):
+            self.wake_up()
+
+    def run(self):
+        # This is our unique client id, to be used when firing and receiving events
+        # It needs to be generated once and placed in the config file
+
+        try:
+            user = Config().feefie_client_user
+        except:
+            user = 'pl'
+
+        try:
+            clid = Config().feefie_client_id
+        except:
+            clid = get_random_client_id()
+            print "EventListener: no feefie_client_id configured. Using random id %s" % clid
+
+        if fofum_enabled:
+            f = Fofum(user=user)
+
+            listener_thread = threading.Thread(target=f.listen_for_event,args=(clid,self.handle_event))
+            listener_thread.start()
diff --git a/xos/synchronizers/base/observer b/xos/synchronizers/base/observer
new file mode 120000
index 0000000..ae75af5
--- /dev/null
+++ b/xos/synchronizers/base/observer
@@ -0,0 +1 @@
+openstack_observer
\ No newline at end of file
diff --git a/xos/synchronizers/base/openstacksyncstep.py b/xos/synchronizers/base/openstacksyncstep.py
new file mode 100644
index 0000000..cc568f8
--- /dev/null
+++ b/xos/synchronizers/base/openstacksyncstep.py
@@ -0,0 +1,14 @@
+import os
+import base64
+from syncstep import SyncStep
+
+class OpenStackSyncStep(SyncStep):
+    """ XOS Sync step for copying data to OpenStack 
+    """ 
+    
+    def __init__(self, **args):
+        SyncStep.__init__(self, **args)
+        return
+
+    def __call__(self, **args):
+        return self.call(**args)
diff --git a/xos/synchronizers/base/run_ansible b/xos/synchronizers/base/run_ansible
new file mode 100755
index 0000000..a504ec3
--- /dev/null
+++ b/xos/synchronizers/base/run_ansible
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+source /opt/ansible/hacking/env-setup >> /dev/null
+ansible-playbook -v "$@"
diff --git a/xos/synchronizers/base/run_ansible_verbose b/xos/synchronizers/base/run_ansible_verbose
new file mode 100755
index 0000000..d17cad7
--- /dev/null
+++ b/xos/synchronizers/base/run_ansible_verbose
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+source /opt/ansible/hacking/env-setup >> /dev/null
+ansible-playbook -vvv "$@"
diff --git a/xos/synchronizers/base/steps/__init__.py b/xos/synchronizers/base/steps/__init__.py
new file mode 100644
index 0000000..c70b0c0
--- /dev/null
+++ b/xos/synchronizers/base/steps/__init__.py
@@ -0,0 +1,6 @@
+#from .sync_controller_sites import SyncControllerSites
+#from .sync_controller_slices import SyncControllerSlices
+#from .sync_controller_users import SyncControllerUsers
+#from .sync_controller_site_privileges import SyncControllerSitePrivileges
+#from .sync_controller_slice_privileges import SyncControllerSlicePrivileges
+#from .sync_controller_networks import SyncControllerNetworks
diff --git a/xos/synchronizers/base/steps/delete_slivers.yaml b/xos/synchronizers/base/steps/delete_slivers.yaml
new file mode 100644
index 0000000..fa6b879
--- /dev/null
+++ b/xos/synchronizers/base/steps/delete_slivers.yaml
@@ -0,0 +1,8 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+
+  - nova_compute:
+      state: absent
+      name: {{ name }}
diff --git a/xos/synchronizers/base/steps/purge_disabled_users.py b/xos/synchronizers/base/steps/purge_disabled_users.py
new file mode 100644
index 0000000..80690b0
--- /dev/null
+++ b/xos/synchronizers/base/steps/purge_disabled_users.py
@@ -0,0 +1,25 @@
+import os
+import base64
+import datetime
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from core.models.user import User
+from util.logger import observer_logger as logger
+
+class SyncRoles(OpenStackSyncStep):
+    provides=[User]
+    requested_interval=0
+    observes=User
+
+    def fetch_pending(self, deleted):
+        if (deleted):
+            # users marked as deleted
+            return User.deleted_objects.all()
+        else:
+            # disabled users that haven't been updated in over a week 
+            one_week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
+            return User.objects.filter(is_active=False, updated__gt=one_week_ago)             
+
+    def sync_record(self, user):
+        user.delete() 
diff --git a/xos/synchronizers/base/steps/sliver.yaml b/xos/synchronizers/base/steps/sliver.yaml
new file mode 100644
index 0000000..e630415
--- /dev/null
+++ b/xos/synchronizers/base/steps/sliver.yaml
@@ -0,0 +1,17 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  - nova_compute:
+       state: present
+       auth_url: http://172.31.38.128:5000/v2.0/
+       login_username: admin
+       login_password: 6a789bf69dd647e2
+       login_tenant_name: admin
+       name: gloopy
+       image_id: 3ee851df-b35a-41c5-8551-f681e7209095
+       key_name: boo
+       wait_for: 200
+       flavor_id: 3
+       nics:
+         - net-id: d1de537b-80dc-4c1b-aa5f-4a197b33b5f6
diff --git a/xos/synchronizers/base/steps/sync_container.py b/xos/synchronizers/base/steps/sync_container.py
new file mode 100644
index 0000000..29fe3e0
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_container.py
@@ -0,0 +1,163 @@
+import hashlib
+import os
+import socket
+import sys
+import base64
+import time
+from django.db.models import F, Q
+from xos.config import Config
+from observers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+from observer.syncstep import SyncStep, DeferredException
+from synchronizers.base.ansible import run_template_ssh
+from core.models import Service, Slice, Instance
+from services.onos.models import ONOSService, ONOSApp
+from util.logger import Logger, logging
+
+# hpclibrary will be in steps/..
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+logger = Logger(level=logging.INFO)
+
+class SyncContainer(SyncInstanceUsingAnsible):
+    provides=[Instance]
+    observes=Instance
+    requested_interval=0
+    template_name = "sync_container.yaml"
+
+    def __init__(self, *args, **kwargs):
+        super(SyncContainer, self).__init__(*args, **kwargs)
+
+    def fetch_pending(self, deletion=False):
+        objs = super(SyncContainer, self).fetch_pending(deletion)
+        objs = [x for x in objs if x.isolation in ["container", "container_vm"]]
+        return objs
+
+    def get_instance_port(self, container_port):
+        for p in container_port.network.links.all():
+            if (p.instance) and (p.instance.isolation=="vm") and (p.instance.node == container_port.instance.node) and (p.mac):
+                return p
+        return None
+
+    def get_parent_port_mac(self, instance, port):
+        if not instance.parent:
+            raise Exception("instance has no parent")
+        for parent_port in instance.parent.ports.all():
+            if parent_port.network == port.network:
+                if not parent_port.mac:
+                     raise DeferredException("parent port on network %s does not have mac yet" % parent_port.network.name)
+                return parent_port.mac
+        raise Exception("failed to find corresponding parent port for network %s" % port.network.name)
+
+    def get_ports(self, o):
+        i=0
+        ports = []
+        if (o.slice.network in ["host", "bridged"]):
+            pass # no ports in host or bridged mode
+        else:
+            for port in o.ports.all():
+                if (not port.ip):
+                    # 'unmanaged' ports may have an ip, but no mac
+                    # XXX: are there any ports that have a mac but no ip?
+                    raise DeferredException("Port on network %s is not yet ready" % port.network.name)
+
+                pd={}
+                pd["mac"] = port.mac or ""
+                pd["ip"] = port.ip or ""
+                pd["xos_network_id"] = port.network.id
+
+                if port.network.name == "wan_network":
+                    if port.ip:
+                        (a, b, c, d) = port.ip.split('.')
+                        pd["mac"] = "02:42:%02x:%02x:%02x:%02x" % (int(a), int(b), int(c), int(d))
+
+
+                if o.isolation == "container":
+                    # container on bare metal
+                    instance_port = self.get_instance_port(port)
+                    if not instance_port:
+                        raise DeferredException("No instance on slice for port on network %s" % port.network.name)
+
+                    pd["snoop_instance_mac"] = instance_port.mac
+                    pd["snoop_instance_id"] = instance_port.instance.instance_id
+                    pd["src_device"] = ""
+                    pd["bridge"] = "br-int"
+                else:
+                    # container in VM
+                    pd["snoop_instance_mac"] = ""
+                    pd["snoop_instance_id"] = ""
+                    pd["parent_mac"] = self.get_parent_port_mac(o, port)
+                    pd["bridge"] = ""
+
+                for (k,v) in port.get_parameters().items():
+                    pd[k] = v
+
+                ports.append(pd)
+
+            # for any ports that don't have a device, assign one
+            used_ports = [x["device"] for x in ports if ("device" in x)]
+            avail_ports = ["eth%d"%i for i in range(0,64) if ("eth%d"%i not in used_ports)]
+            for port in ports:
+                if not port.get("device",None):
+                    port["device"] = avail_ports.pop(0)
+
+        return ports
+
+    def get_extra_attributes(self, o):
+        fields={}
+        fields["ansible_tag"] = "container-%s" % str(o.id)
+        if o.image.tag:
+            fields["docker_image"] = o.image.path + ":" + o.image.tag
+        else:
+            fields["docker_image"] = o.image.path
+        fields["ports"] = self.get_ports(o)
+        if o.volumes:
+            fields["volumes"] = [x.strip() for x in o.volumes.split(",")]
+        else:
+            fields["volumes"] = ""
+        fields["network_method"] = o.slice.network or "default"
+        return fields
+
+    def sync_record(self, o):
+        logger.info("sync'ing object %s" % str(o))
+
+        fields = self.get_ansible_fields(o)
+
+        # If 'o' defines a 'sync_attributes' list, then we'll copy those
+        # attributes into the Ansible recipe's field list automatically.
+        if hasattr(o, "sync_attributes"):
+            for attribute_name in o.sync_attributes:
+                fields[attribute_name] = getattr(o, attribute_name)
+
+        fields.update(self.get_extra_attributes(o))
+
+        self.run_playbook(o, fields)
+
+        o.instance_id = fields["container_name"]
+        o.instance_name = fields["container_name"]
+
+        o.save()
+
+    def delete_record(self, o):
+        logger.info("delete'ing object %s" % str(o))
+
+        fields = self.get_ansible_fields(o)
+
+        # If 'o' defines a 'sync_attributes' list, then we'll copy those
+        # attributes into the Ansible recipe's field list automatically.
+        if hasattr(o, "sync_attributes"):
+            for attribute_name in o.sync_attributes:
+                fields[attribute_name] = getattr(o, attribute_name)
+
+        fields.update(self.get_extra_attributes(o))
+
+        self.run_playbook(o, fields, "teardown_container.yaml")
+
+    def run_playbook(self, o, fields, template_name=None):
+        if not template_name:
+            template_name = self.template_name
+        tStart = time.time()
+        run_template_ssh(template_name, fields, path="container")
+        logger.info("playbook execution time %d" % int(time.time()-tStart))
+
+
diff --git a/xos/synchronizers/base/steps/sync_container.yaml b/xos/synchronizers/base/steps/sync_container.yaml
new file mode 100644
index 0000000..77e57cd
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_container.yaml
@@ -0,0 +1,116 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: {{ username }}
+  sudo: yes
+
+  vars:
+    container_name: {{ container_name }}
+    docker_image: {{ docker_image }}
+    network_method: {{ network_method }}
+    ports:
+    {% for port in ports %}
+       - device: {{ port.device }}
+         xos_network_id: {{ port.xos_network_id }}
+         mac: {{ port.mac|default("") }}
+         ip: {{ port.ip }}
+         snoop_instance_mac: {{ port.snoop_instance_mac }}
+         snoop_instance_id: {{ port.snoop_instance_id }}
+         parent_mac: {{ port.parent_mac|default("") }}
+         s_tag: {{ port.s_tag|default("")  }}
+         c_tag: {{ port.c_tag|default("") }}
+         next_hop: {{ port.next_hop|default("") }}
+         bridge: {{ port.bridge }}
+    {% endfor %}
+    volumes:
+    {% for volume in volumes %}
+       - {{ volume }}
+    {% endfor %}
+
+  tasks:
+
+#  - name: Fix /etc/hosts
+#    lineinfile:
+#      dest=/etc/hosts
+#      regexp="127.0.0.1 localhost"
+#      line="127.0.0.1 localhost {{ instance_hostname }}"
+
+  - name: Add repo key
+    apt_key:
+      keyserver=hkp://pgp.mit.edu:80
+      id=58118E89F3A912897C070ADBF76221572C52609D
+
+  - name: Install Docker repo
+    apt_repository:
+      repo="deb https://apt.dockerproject.org/repo ubuntu-trusty main"
+      state=present
+
+  - name: Install Docker
+    apt:
+      name={{ '{{' }} item {{ '}}' }}
+      state=latest
+      update_cache=yes
+    with_items:
+    - docker-engine
+    - python-pip
+    - python-httplib2
+
+  # Something is installing a requests library that is incompative with pip, and
+  # will cause this recipe to fail next time it tries to run pip. Only the one
+  # in /usr/local/lib is bad. There's still a good one in /usr/lib
+  - name: check if bad requests library installed
+    stat: path=/usr/local/lib/python2.7/dist-packages/requests
+    register: bad_requests
+
+  - name: remove bad request library
+    shell: mv /usr/local/lib/python2.7/dist-packages/requests /usr/local/lib/python2.7/dist-packages/requests-bad
+    when: bad_requests.stat.exists == True
+
+  - name: Install docker-py
+    pip:
+      name=docker-py
+      state=latest
+
+  - name: install Pipework
+    get_url: url=https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework
+       dest=/usr/local/bin/pipework
+       mode=0755
+
+#  - name: Start Container
+#    docker:
+#      docker_api_version: "1.18"
+#      name: {{ container_name }}
+#      # was: reloaded
+#      state: running
+#      image: {{ docker_image }}
+
+  - name: check if systemd is installed
+    stat: path=/usr/bin/systemctl
+    register: systemctl
+
+  - name: container upstart
+    template: src=/opt/xos/openstack_observer/templates/container.conf.j2 dest=/etc/init/container-{{ container_name }}.conf
+
+  - name: container systemd
+    template: src=/opt/xos/openstack_observer/templates/container.service.j2 dest=/lib/systemd/system/container-{{ container_name }}.service
+
+  - name: container startup script
+    template: src=/opt/xos/openstack_observer/templates/start-container.sh.j2 dest=/usr/local/sbin/start-container-{{ container_name }}.sh mode=0755
+
+  - name: container teardown script
+    template: src=/opt/xos/openstack_observer/templates/stop-container.sh.j2 dest=/usr/local/sbin/stop-container-{{ container_name }}.sh mode=0755
+
+  - name: restart systemd
+    shell: systemctl daemon-reload
+    when: systemctl.stat.exists == True
+
+{% if ports %}
+  - name: make sure bridges are setup
+    shell: ifconfig {{ '{{' }} item.bridge {{ '}}' }}
+    with_items: "ports"
+{% endif %}
+
+  - name: Make sure container is running
+    service: name=container-{{ container_name }} state=started
+
diff --git a/xos/synchronizers/base/steps/sync_controller_images.py b/xos/synchronizers/base/steps/sync_controller_images.py
new file mode 100644
index 0000000..04908dc
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_controller_images.py
@@ -0,0 +1,44 @@
+import os
+import base64
+from collections import defaultdict
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from observer.syncstep import *
+from core.models import Controller
+from core.models import Image, ControllerImages
+from util.logger import observer_logger as logger 
+from observer.ansible import *
+import json
+
+class SyncControllerImages(OpenStackSyncStep):
+    provides=[ControllerImages]
+    observes = ControllerImages
+    requested_interval=0
+    playbook='sync_controller_images.yaml'
+
+    def fetch_pending(self, deleted):
+        if (deleted):
+            return []
+
+        # now we return all images that need to be enacted
+        return ControllerImages.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+    def map_sync_inputs(self, controller_image):
+        image_fields = {'endpoint':controller_image.controller.auth_url,
+                        'endpoint_v3': controller_image.controller.auth_url_v3,
+                        'admin_user':controller_image.controller.admin_user,
+                        'admin_password':controller_image.controller.admin_password,
+                        'domain': controller_image.controller.domain,
+                        'name':controller_image.image.name,
+                        'filepath':controller_image.image.path,
+                        'ansible_tag': '%s@%s'%(controller_image.image.name,controller_image.controller.name), # name of ansible playbook
+                        }
+
+	return image_fields
+
+    def map_sync_outputs(self, controller_image, res):
+        image_id = res[0]['id']
+        controller_image.glance_image_id = image_id
+	controller_image.backend_status = '1 - OK'
+        controller_image.save()
diff --git a/xos/synchronizers/base/steps/sync_controller_images.yaml b/xos/synchronizers/base/steps/sync_controller_images.yaml
new file mode 100644
index 0000000..6247a30
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_controller_images.yaml
@@ -0,0 +1,13 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  - glance_image:
+        auth_url={{ endpoint }}
+        login_username="{{ admin_user }}"
+        login_tenant_name="admin"
+        login_password="{{ admin_password }}"
+        name="{{ name }}"
+        file="{{ filepath }}"
+        disk_format='raw'
+        is_public=true
diff --git a/xos/synchronizers/base/steps/sync_controller_networks.py b/xos/synchronizers/base/steps/sync_controller_networks.py
new file mode 100644
index 0000000..5b1ca20
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_controller_networks.py
@@ -0,0 +1,136 @@
+import os
+import base64
+from collections import defaultdict
+from netaddr import IPAddress, IPNetwork
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from observer.syncstep import *
+from core.models.network import *
+from core.models.slice import *
+from core.models.instance import Instance
+from util.logger import observer_logger as logger
+from observer.ansible import *
+from openstack.driver import OpenStackDriver
+from xos.config import Config
+import json
+
+import pdb
+
+class SyncControllerNetworks(OpenStackSyncStep):
+    requested_interval = 0
+    provides=[Network]
+    observes=ControllerNetwork	
+    playbook='sync_controller_networks.yaml'
+
+    def alloc_subnet(self, uuid):
+        # 16 bits only
+        uuid_masked = uuid & 0xffff
+        a = 10
+        b = uuid_masked >> 8
+        c = uuid_masked & 0xff
+        d = 0
+
+        cidr = '%d.%d.%d.%d/24'%(a,b,c,d)
+        return cidr
+
+    def alloc_gateway(self, uuid):
+        # 16 bits only
+        uuid_masked = uuid & 0xffff
+        a = 10
+        b = uuid_masked >> 8
+        c = uuid_masked & 0xff
+        d = 1
+
+        gateway = '%d.%d.%d.%d'%(a,b,c,d)
+        return gateway
+
+
+    def save_controller_network(self, controller_network):
+        network_name = controller_network.network.name
+        subnet_name = '%s-%d'%(network_name,controller_network.pk)
+        cidr = self.alloc_subnet(controller_network.pk)
+        self.cidr=cidr
+        slice = controller_network.network.owner
+
+        network_fields = {'endpoint':controller_network.controller.auth_url,
+                    'endpoint_v3': controller_network.controller.auth_url_v3,
+                    'admin_user':slice.creator.email,
+                    'tenant_name':slice.name,
+                    'admin_password':slice.creator.remote_password,
+                    'domain': controller_network.controller.domain,
+                    'name':network_name,
+                    'subnet_name':subnet_name,
+                    'ansible_tag':'%s-%s@%s'%(network_name,slice.slicename,controller_network.controller.name),
+                    'cidr':cidr,
+                    'gateway':self.alloc_gateway(controller_network.pk),
+                    'use_vtn':getattr(Config(), "networking_use_vtn", False),
+                    'delete':False	
+                    }
+        return network_fields
+
+    def map_sync_outputs(self, controller_network,res):
+        network_id = res[0]['id']
+        subnet_id = res[1]['id']
+        controller_network.net_id = network_id
+        controller_network.subnet = self.cidr
+        controller_network.subnet_id = subnet_id
+	controller_network.backend_status = '1 - OK'
+        controller_network.save()
+
+
+    def map_sync_inputs(self, controller_network):
+        # XXX This check should really be made from booleans, rather than using hardcoded network names
+        if (controller_network.network.template.name not in ['Private', 'Private-Indirect', 'Private-Direct']):
+            logger.info("skipping network controller %s because it is not private" % controller_network)
+            # We only sync private networks
+            return SyncStep.SYNC_WITHOUT_RUNNING
+        
+        if not controller_network.controller.admin_user:
+            logger.info("controller %r has no admin_user, skipping" % controller_network.controller)
+            return
+
+        if controller_network.network.owner and controller_network.network.owner.creator:
+	    return self.save_controller_network(controller_network)
+        else:
+            raise Exception('Could not save network controller %s'%controller_network)
+
+    def map_delete_inputs(self, controller_network):
+        # XXX This check should really be made from booleans, rather than using hardcoded network names
+	if (controller_network.network.template.name not in ['Private', 'Private-Indirect', 'Private-Direct']):
+            # We only sync private networks
+            return
+	try:
+        	slice = controller_network.network.owner # XXX: FIXME!!
+        except:
+                raise Exception('Could not get slice for Network %s'%controller_network.network.name)
+
+	network_name = controller_network.network.name
+        subnet_name = '%s-%d'%(network_name,controller_network.pk)
+	cidr = controller_network.subnet
+	network_fields = {'endpoint':controller_network.controller.auth_url,
+                    'admin_user':slice.creator.email, # XXX: FIXME
+                    'tenant_name':slice.name, # XXX: FIXME
+                    'admin_password':slice.creator.remote_password,
+                    'name':network_name,
+                    'subnet_name':subnet_name,
+                    'ansible_tag':'%s-%s@%s'%(network_name,slice.slicename,controller_network.controller.name),
+                    'cidr':cidr,
+		    'delete':True	
+                    }
+
+        return network_fields
+
+	"""
+        driver = OpenStackDriver().client_driver(caller=controller_network.network.owner.creator,
+                                                 tenant=controller_network.network.owner.name,
+                                                 controller=controller_network.controller.name)
+        if (controller_network.router_id) and (controller_network.subnet_id):
+            driver.delete_router_interface(controller_network.router_id, controller_network.subnet_id)
+        if controller_network.subnet_id:
+            driver.delete_subnet(controller_network.subnet_id)
+        if controller_network.router_id:
+            driver.delete_router(controller_network.router_id)
+        if controller_network.net_id:
+            driver.delete_network(controller_network.net_id)
+	"""
diff --git a/xos/synchronizers/base/steps/sync_controller_networks.yaml b/xos/synchronizers/base/steps/sync_controller_networks.yaml
new file mode 100644
index 0000000..b885516
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_controller_networks.yaml
@@ -0,0 +1,39 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  - quantum_network:
+        auth_url={{ endpoint }}
+        login_username={{ admin_user }}
+        login_tenant_name={{ tenant_name }}
+        login_password={{ admin_password }}
+        tenant_name={{ tenant_name }}
+        name={{ name }}
+        {% if delete %}
+        state=absent
+        {% else %}
+        state=present
+        {% endif %}
+        shared=true
+  {% if not delete %}
+  - quantum_subnet:
+        auth_url={{ endpoint }}
+        login_username={{ admin_user }}
+        login_tenant_name={{ tenant_name }}
+        login_password={{ admin_password }}
+        tenant_name={{ tenant_name }}
+        name={{ subnet_name }}
+        network_name={{ name }}
+        {% if delete %}
+        state=absent
+        {% else %}
+        state=present
+        {% if use_vtn %}
+        gateway_ip={{ gateway }}
+        {% else %}
+        no_gateway=true
+        {% endif %}
+        dns_nameservers=8.8.8.8
+        cidr={{ cidr }}
+        {% endif %}
+  {% endif %}
diff --git a/xos/synchronizers/base/steps/sync_controller_site_privileges.py b/xos/synchronizers/base/steps/sync_controller_site_privileges.py
new file mode 100644
index 0000000..5688932
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_controller_site_privileges.py
@@ -0,0 +1,84 @@
+import os
+import base64
+from collections import defaultdict
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from observer.syncstep import *
+from core.models.site import Controller, SitePrivilege 
+from core.models.user import User
+from core.models.controlleruser import ControllerUser, ControllerSitePrivilege
+from util.logger import observer_logger as logger
+from observer.ansible import *
+import json
+
+class SyncControllerSitePrivileges(OpenStackSyncStep):
+    provides=[SitePrivilege]
+    requested_interval=0
+    observes=ControllerSitePrivilege
+    playbook='sync_controller_users.yaml'
+
+    def map_sync_inputs(self, controller_site_privilege):
+	controller_register = json.loads(controller_site_privilege.controller.backend_register)
+        if not controller_site_privilege.controller.admin_user:
+            logger.info("controller %r has no admin_user, skipping" % controller_site_privilege.controller)
+            return
+
+        roles = [controller_site_privilege.site_privilege.role.role]
+	# setup user home site roles at controller 
+        if not controller_site_privilege.site_privilege.user.site:
+            raise Exception('Siteless user %s'%controller_site_privilege.site_privilege.user.email)
+        else:
+            # look up tenant id for the user's site at the controller
+            #ctrl_site_deployments = SiteDeployment.objects.filter(
+            #  site_deployment__site=controller_site_privilege.user.site,
+            #  controller=controller_site_privilege.controller)
+
+            #if ctrl_site_deployments:
+            #    # need the correct tenant id for site at the controller
+            #    tenant_id = ctrl_site_deployments[0].tenant_id  
+            #    tenant_name = ctrl_site_deployments[0].site_deployment.site.login_base
+            user_fields = {
+               'endpoint':controller_site_privilege.controller.auth_url,
+               'endpoint_v3': controller_site_privilege.controller.auth_url_v3,
+               'domain': controller_site_privilege.controller.domain,
+		       'name': controller_site_privilege.site_privilege.user.email,
+               'email': controller_site_privilege.site_privilege.user.email,
+               'password': controller_site_privilege.site_privilege.user.remote_password,
+               'admin_user': controller_site_privilege.controller.admin_user,
+		       'admin_password': controller_site_privilege.controller.admin_password,
+	           'ansible_tag':'%s@%s'%(controller_site_privilege.site_privilege.user.email.replace('@','-at-'),controller_site_privilege.controller.name),
+		       'admin_tenant': controller_site_privilege.controller.admin_tenant,
+		       'roles':roles,
+		       'tenant':controller_site_privilege.site_privilege.site.login_base}    
+	
+	    return user_fields
+
+    def map_sync_outputs(self, controller_site_privilege, res):
+	    # results is an array in which each element corresponds to an 
+	    # "ok" string received per operation. If we get as many oks as
+	    # the number of operations we issued, that means a grand success.
+	    # Otherwise, the number of oks tell us which operation failed.
+            controller_site_privilege.role_id = res[0]['id']
+            controller_site_privilege.save()
+
+    def delete_record(self, controller_site_privilege):
+	controller_register = json.loads(controller_site_privilege.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise InnocuousException('Controller %s is disabled'%controller_site_privilege.controller.name)
+
+        if controller_site_privilege.role_id:
+            driver = self.driver.admin_driver(controller=controller_site_privilege.controller)
+            user = ControllerUser.objects.get(
+                controller=controller_site_privilege.controller, 
+                user=controller_site_privilege.site_privilege.user
+            )
+            site = ControllerSite.objects.get(
+                controller=controller_site_privilege.controller, 
+                user=controller_site_privilege.site_privilege.user
+            )
+            driver.delete_user_role(
+                user.kuser_id, 
+                site.tenant_id, 
+                controller_site_privilege.site_prvilege.role.role
+            )
diff --git a/xos/synchronizers/base/steps/sync_controller_sites.py b/xos/synchronizers/base/steps/sync_controller_sites.py
new file mode 100644
index 0000000..614d435
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_controller_sites.py
@@ -0,0 +1,67 @@
+import os
+import base64
+from django.db.models import F, Q
+from xos.config import Config
+from openstack_observer.openstacksyncstep import OpenStackSyncStep
+from core.models.site import *
+from observer.syncstep import *
+from observer.ansible import *
+from util.logger import observer_logger as logger
+import json
+
+class SyncControllerSites(OpenStackSyncStep):
+    requested_interval=0
+    provides=[Site]
+    observes=ControllerSite
+    playbook = 'sync_controller_sites.yaml'
+
+    def fetch_pending(self, deleted=False):
+        lobjs = ControllerSite.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False),Q(controller__isnull=False))
+        return lobjs
+
+    def map_sync_inputs(self, controller_site):
+	tenant_fields = {'endpoint':controller_site.controller.auth_url,
+                 'endpoint_v3': controller_site.controller.auth_url_v3,
+                 'domain': controller_site.controller.domain,
+		         'admin_user': controller_site.controller.admin_user,
+		         'admin_password': controller_site.controller.admin_password,
+		         'admin_tenant': controller_site.controller.admin_tenant,
+	             'ansible_tag': '%s@%s'%(controller_site.site.login_base,controller_site.controller.name), # name of ansible playbook
+		         'tenant': controller_site.site.login_base,
+		         'tenant_description': controller_site.site.name}
+        return tenant_fields
+
+    def map_sync_outputs(self, controller_site, res):
+	controller_site.tenant_id = res[0]['id']
+	controller_site.backend_status = '1 - OK'
+        controller_site.save()
+            
+    def delete_record(self, controller_site):
+	controller_register = json.loads(controller_site.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise InnocuousException('Controller %s is disabled'%controller_site.controller.name)
+
+	if controller_site.tenant_id:
+            driver = self.driver.admin_driver(controller=controller_site.controller)
+            driver.delete_tenant(controller_site.tenant_id)
+
+	"""
+        Ansible does not support tenant deletion yet
+
+	import pdb
+	pdb.set_trace()
+        template = os_template_env.get_template('delete_controller_sites.yaml')
+	tenant_fields = {'endpoint':controller_site.controller.auth_url,
+		         'admin_user': controller_site.controller.admin_user,
+		         'admin_password': controller_site.controller.admin_password,
+		         'admin_tenant': 'admin',
+	                 'ansible_tag': 'controller_sites/%s@%s'%(controller_site.controller_site.site.login_base,controller_site.controller_site.deployment.name), # name of ansible playbook
+		         'tenant': controller_site.controller_site.site.login_base,
+		         'delete': True}
+
+	rendered = template.render(tenant_fields)
+	res = run_template('sync_controller_sites.yaml', tenant_fields)
+
+	if (len(res)!=1):
+		raise Exception('Could not assign roles for user %s'%tenant_fields['tenant'])
+	"""
diff --git a/xos/synchronizers/base/steps/sync_controller_sites.yaml b/xos/synchronizers/base/steps/sync_controller_sites.yaml
new file mode 100644
index 0000000..4129802
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_controller_sites.yaml
@@ -0,0 +1,5 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}"
diff --git a/xos/synchronizers/base/steps/sync_controller_slice_privileges.py b/xos/synchronizers/base/steps/sync_controller_slice_privileges.py
new file mode 100644
index 0000000..c3d9ce4
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_controller_slice_privileges.py
@@ -0,0 +1,79 @@
+import os
+import base64
+from collections import defaultdict
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from observer.syncstep import *
+from core.models.slice import Controller, SlicePrivilege 
+from core.models.user import User
+from core.models.controlleruser import ControllerUser, ControllerSlicePrivilege
+from observer.ansible import *
+from util.logger import observer_logger as logger
+import json
+
+class SyncControllerSlicePrivileges(OpenStackSyncStep):
+    provides=[SlicePrivilege]
+    requested_interval=0
+    observes=ControllerSlicePrivilege
+    playbook = 'sync_controller_users.yaml'
+
+    def map_inputs(self, controller_slice_privilege):
+        if not controller_slice_privilege.controller.admin_user:
+            logger.info("controller %r has no admin_user, skipping" % controller_slice_privilege.controller)
+            return
+
+	template = os_template_env.get_template('sync_controller_users.yaml')
+        roles = [controller_slice_privilege.slice_privilege.role.role]
+	# setup user home slice roles at controller 
+        if not controller_slice_privilege.slice_privilege.user.site:
+            raise Exception('Sliceless user %s'%controller_slice_privilege.slice_privilege.user.email)
+        else:
+            # look up tenant id for the user's slice at the controller
+            #ctrl_slice_deployments = SliceDeployment.objects.filter(
+            #  slice_deployment__slice=controller_slice_privilege.user.slice,
+            #  controller=controller_slice_privilege.controller)
+
+            #if ctrl_slice_deployments:
+            #    # need the correct tenant id for slice at the controller
+            #    tenant_id = ctrl_slice_deployments[0].tenant_id  
+            #    tenant_name = ctrl_slice_deployments[0].slice_deployment.slice.login_base
+            user_fields = {
+               'endpoint':controller_slice_privilege.controller.auth_url,
+               'endpoint_v3': controller_slice_privilege.controller.auth_url_v3,
+               'domain': controller_slice_privilege.controller.domain,
+		       'name': controller_slice_privilege.slice_privilege.user.email,
+               'email': controller_slice_privilege.slice_privilege.user.email,
+               'password': controller_slice_privilege.slice_privilege.user.remote_password,
+               'admin_user': controller_slice_privilege.controller.admin_user,
+		       'admin_password': controller_slice_privilege.controller.admin_password,
+               'ansible_tag':'%s@%s@%s'%(controller_slice_privilege.slice_privilege.user.email.replace('@','-at-'),controller_slice_privilege.slice_privilege.slice.name,controller_slice_privilege.controller.name),
+		       'admin_tenant': controller_slice_privilege.controller.admin_tenant,
+		       'roles':roles,
+		       'tenant':controller_slice_privilege.slice_privilege.slice.name}    
+            return user_fields
+	
+    def map_sync_outputs(self, controller_slice_privilege, res):
+        controller_slice_privilege.role_id = res[0]['id']
+        controller_slice_privilege.save()
+
+    def delete_record(self, controller_slice_privilege):
+	controller_register = json.loads(controller_slice_privilege.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise InnocuousException('Controller %s is disabled'%controller_slice_privilege.controller.name)
+
+        if controller_slice_privilege.role_id:
+            driver = self.driver.admin_driver(controller=controller_slice_privilege.controller)
+            user = ControllerUser.objects.get(
+                controller=controller_slice_privilege.controller, 
+                user=controller_slice_privilege.slice_privilege.user
+            )
+            slice = ControllerSlice.objects.get(
+                controller=controller_slice_privilege.controller, 
+                user=controller_slice_privilege.slice_privilege.user
+            )
+            driver.delete_user_role(
+                user.kuser_id, 
+                slice.tenant_id, 
+                controller_slice_privilege.slice_prvilege.role.role
+            )
diff --git a/xos/synchronizers/base/steps/sync_controller_slices.py b/xos/synchronizers/base/steps/sync_controller_slices.py
new file mode 100644
index 0000000..aee2a0a
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_controller_slices.py
@@ -0,0 +1,84 @@
+import os
+import base64
+from collections import defaultdict
+from netaddr import IPAddress, IPNetwork
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from observer.syncstep import *
+from core.models import *
+from observer.ansible import *
+from openstack.driver import OpenStackDriver
+from util.logger import observer_logger as logger
+import json
+
+class SyncControllerSlices(OpenStackSyncStep):
+    provides=[Slice]
+    requested_interval=0
+    observes=ControllerSlice
+    playbook='sync_controller_slices.yaml'
+
+    def map_sync_inputs(self, controller_slice):
+        logger.info("sync'ing slice controller %s" % controller_slice)
+
+        if not controller_slice.controller.admin_user:
+            logger.info("controller %r has no admin_user, skipping" % controller_slice.controller)
+            return
+
+        controller_users = ControllerUser.objects.filter(user=controller_slice.slice.creator,
+                                                             controller=controller_slice.controller)
+        if not controller_users:
+            raise Exception("slice createor %s has not accout at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name))
+        else:
+            controller_user = controller_users[0]
+            roles = ['admin']
+
+        max_instances=int(controller_slice.slice.max_instances)
+        tenant_fields = {'endpoint':controller_slice.controller.auth_url,
+                         'endpoint_v3': controller_slice.controller.auth_url_v3,
+                         'domain': controller_slice.controller.domain,
+                         'admin_user': controller_slice.controller.admin_user,
+                         'admin_password': controller_slice.controller.admin_password,
+                         'admin_tenant': 'admin',
+                         'tenant': controller_slice.slice.name,
+                         'tenant_description': controller_slice.slice.description,
+                         'roles':roles,
+                         'name':controller_user.user.email,
+                         'ansible_tag':'%s@%s'%(controller_slice.slice.name,controller_slice.controller.name),
+                         'max_instances':max_instances}
+
+        return tenant_fields
+
+    def map_sync_outputs(self, controller_slice, res):
+        tenant_id = res[0]['id']
+        if (not controller_slice.tenant_id):
+            try:
+                driver = OpenStackDriver().admin_driver(controller=controller_slice.controller)
+                driver.shell.nova.quotas.update(tenant_id=tenant_id, instances=int(controller_slice.slice.max_instances))
+            except:
+                logger.log_exc('Could not update quota for %s'%controller_slice.slice.name)
+                raise Exception('Could not update quota for %s'%controller_slice.slice.name)
+
+            controller_slice.tenant_id = tenant_id
+            controller_slice.backend_status = '1 - OK'
+            controller_slice.save()
+
+
+    def map_delete_inputs(self, controller_slice):
+        controller_users = ControllerUser.objects.filter(user=controller_slice.slice.creator,
+                                                              controller=controller_slice.controller)
+        if not controller_users:
+            raise Exception("slice createor %s has not accout at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name))
+        else:
+            controller_user = controller_users[0]
+
+        tenant_fields = {'endpoint':controller_slice.controller.auth_url,
+                          'admin_user': controller_slice.controller.admin_user,
+                          'admin_password': controller_slice.controller.admin_password,
+                          'admin_tenant': 'admin',
+                          'tenant': controller_slice.slice.name,
+                          'tenant_description': controller_slice.slice.description,
+                          'name':controller_user.user.email,
+                          'ansible_tag':'%s@%s'%(controller_slice.slice.name,controller_slice.controller.name),
+                          'delete': True}
+	return tenant_fields
diff --git a/xos/synchronizers/base/steps/sync_controller_slices.py.bak b/xos/synchronizers/base/steps/sync_controller_slices.py.bak
new file mode 100644
index 0000000..e04da8e
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_controller_slices.py.bak
@@ -0,0 +1,95 @@
+import os
+import base64
+from collections import defaultdict
+from netaddr import IPAddress, IPNetwork
+from django.db.models import F, Q
+from xos.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models import *
+from observer.ansible import *
+from openstack.driver import OpenStackDriver
+from util.logger import observer_logger as logger
+import json
+
+class SyncControllerSlices(OpenStackSyncStep):
+    provides=[Slice]
+    requested_interval=0
+    observes=ControllerSlice
+
+    def fetch_pending(self, deleted):
+        if (deleted):
+            return ControllerSlice.deleted_objects.all()
+        else:
+            return ControllerSlice.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+    def sync_record(self, controller_slice):
+        logger.info("sync'ing slice controller %s" % controller_slice)
+
+	controller_register = json.loads(controller_slice.controller.backend_register)
+	if (controller_register.get('disabled',False)):
+		raise Exception('Controller %s is disabled'%controller_slice.controller.name)
+
+        if not controller_slice.controller.admin_user:
+            logger.info("controller %r has no admin_user, skipping" % controller_slice.controller)
+            return
+
+        controller_users = ControllerUser.objects.filter(user=controller_slice.slice.creator,
+                                                             controller=controller_slice.controller)
+        if not controller_users:
+            raise Exception("slice createor %s has not accout at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name))
+        else:
+            controller_user = controller_users[0]
+            roles = ['Admin']
+
+        max_instances=int(controller_slice.slice.max_slivers)
+        tenant_fields = {'endpoint':controller_slice.controller.auth_url,
+                         'admin_user': controller_slice.controller.admin_user,
+                         'admin_password': controller_slice.controller.admin_password,
+                         'admin_tenant': 'admin',
+                         'tenant': controller_slice.slice.name,
+                         'tenant_description': controller_slice.slice.description,
+                         'roles':roles,
+                         'name':controller_user.user.email,
+                         'ansible_tag':'%s@%s'%(controller_slice.slice.name,controller_slice.controller.name),
+                         'max_instances':max_instances}
+
+        expected_num = len(roles)+1
+        res = run_template('sync_controller_slices.yaml', tenant_fields, path='controller_slices', expected_num=expected_num)
+        tenant_id = res[0]['id']
+        if (not controller_slice.tenant_id):
+            try:
+                    driver = OpenStackDriver().admin_driver(controller=controller_slice.controller)
+                    driver.shell.nova.quotas.update(tenant_id=controller_slice.tenant_id, instances=int(controller_slice.slice.max_slivers))
+            except:
+                    logger.log_exc('Could not update quota for %s'%controller_slice.slice.name)
+                    raise Exception('Could not update quota for %s'%controller_slice.slice.name)
+                
+            controller_slice.tenant_id = tenant_id
+            controller_slice.backend_status = '1 - OK'
+            controller_slice.save()
+
+
+    def delete_record(self, controller_slice):
+        controller_register = json.loads(controller_slice.controller.backend_register)
+ 	if (controller_register.get('disabled',False)):
+ 		raise Exception('Controller %s is disabled'%controller_slice.controller.name)
+ 
+ 	controller_users = ControllerUser.objects.filter(user=controller_slice.slice.creator,
+                                                              controller=controller_slice.controller)
+         if not controller_users:
+             raise Exception("slice createor %s has not accout at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name))
+         else:
+             controller_user = controller_users[0]
+ 
+ 	tenant_fields = {'endpoint':controller_slice.controller.auth_url,
+                          'admin_user': controller_slice.controller.admin_user,
+                          'admin_password': controller_slice.controller.admin_password,
+                          'admin_tenant': 'admin',
+                          'tenant': controller_slice.slice.name,
+                          'tenant_description': controller_slice.slice.description,
+                          'name':controller_user.user.email,
+                          'ansible_tag':'%s@%s'%(controller_slice.slice.name,controller_slice.controller.name),
+ 			 'delete': True}
+ 
+         expected_num = 1
+         res = run_template('sync_controller_slices.yaml', tenant_fields, path='controller_slices', expected_num=expected_num)
diff --git a/xos/synchronizers/base/steps/sync_controller_slices.yaml b/xos/synchronizers/base/steps/sync_controller_slices.yaml
new file mode 100644
index 0000000..61470ce
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_controller_slices.yaml
@@ -0,0 +1,12 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  {% if delete -%}
+  - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}" state=absent
+  {% else -%}
+  - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}"
+  {% for role in roles %}
+  - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} user="{{ name }}" role={{ role }} tenant={{ tenant }}
+  {% endfor %}
+  {% endif %}
diff --git a/xos/synchronizers/base/steps/sync_controller_users.py b/xos/synchronizers/base/steps/sync_controller_users.py
new file mode 100644
index 0000000..68faec9
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_controller_users.py
@@ -0,0 +1,69 @@
+import os
+import base64
+from collections import defaultdict
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from observer.syncstep import *
+from core.models.site import Controller, SiteDeployment, SiteDeployment
+from core.models.user import User
+from core.models.controlleruser import ControllerUser
+from observer.ansible import *
+from util.logger import observer_logger as logger
+import json
+
+class SyncControllerUsers(OpenStackSyncStep):
+    provides=[User]
+    requested_interval=0
+    observes=ControllerUser
+    playbook='sync_controller_users.yaml'
+
+    def map_sync_inputs(self, controller_user):
+        if not controller_user.controller.admin_user:
+            logger.info("controller %r has no admin_user, skipping" % controller_user.controller)
+            return
+
+        # All users will have at least the 'user' role at their home site/tenant.
+        # We must also check if the user should have the admin role
+        roles = ['user']
+        if controller_user.user.is_admin:
+            roles.append('admin')
+
+        # setup user home site roles at controller
+        if not controller_user.user.site:
+            raise Exception('Siteless user %s'%controller_user.user.email)
+        else:
+            # look up tenant id for the user's site at the controller
+            #ctrl_site_deployments = SiteDeployment.objects.filter(
+            #  site_deployment__site=controller_user.user.site,
+            #  controller=controller_user.controller)
+
+            #if ctrl_site_deployments:
+            #    # need the correct tenant id for site at the controller
+            #    tenant_id = ctrl_site_deployments[0].tenant_id
+            #    tenant_name = ctrl_site_deployments[0].site_deployment.site.login_base
+            user_fields = {
+                'endpoint':controller_user.controller.auth_url,
+                'endpoint_v3': controller_user.controller.auth_url_v3,
+                'domain': controller_user.controller.domain,
+                'name': controller_user.user.email,
+                'email': controller_user.user.email,
+                'password': controller_user.user.remote_password,
+                'admin_user': controller_user.controller.admin_user,
+                'admin_password': controller_user.controller.admin_password,
+                'ansible_tag':'%s@%s'%(controller_user.user.email.replace('@','-at-'),controller_user.controller.name),
+                'admin_tenant': controller_user.controller.admin_tenant,
+                'roles':roles,
+                'tenant':controller_user.user.site.login_base
+                }
+	    return user_fields
+
+    def map_sync_outputs(self, controller_user, res):
+        controller_user.kuser_id = res[0]['id']
+        controller_user.backend_status = '1 - OK'
+        controller_user.save()
+
+    def delete_record(self, controller_user):
+        if controller_user.kuser_id:
+            driver = self.driver.admin_driver(controller=controller_user.controller)
+            driver.delete_user(controller_user.kuser_id)
diff --git a/xos/synchronizers/base/steps/sync_controller_users.yaml b/xos/synchronizers/base/steps/sync_controller_users.yaml
new file mode 100644
index 0000000..4f2db5e
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_controller_users.yaml
@@ -0,0 +1,16 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  - keystone_user:
+       endpoint={{ endpoint }}
+       login_user={{ admin_user }} 
+       login_password={{ admin_password }} 
+       login_tenant_name={{ admin_tenant }} 
+       user="{{ name }}"
+       email={{ email }}
+       password={{ password }}
+       tenant={{ tenant }}
+  {% for role in roles %}
+  - keystone_user: endpoint={{ endpoint}}  login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} user="{{ name }}" role={{ role }} tenant={{ tenant }}
+  {% endfor %}
diff --git a/xos/synchronizers/base/steps/sync_images.py b/xos/synchronizers/base/steps/sync_images.py
new file mode 100644
index 0000000..40c6447
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_images.py
@@ -0,0 +1,52 @@
+import os
+import base64
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from core.models.image import Image
+from util.logger import observer_logger as logger
+
+class SyncImages(OpenStackSyncStep):
+    provides=[Image]
+    requested_interval=0
+    observes=Image
+
+    def fetch_pending(self, deleted):
+        # Images come from the back end
+        # You can't delete them
+        if (deleted):
+            logger.info("SyncImages: returning because deleted=True")
+            return []
+
+        # get list of images on disk
+        images_path = Config().observer_images_directory
+
+        logger.info("SyncImages: deleted=False, images_path=%s" % images_path)
+
+        available_images = {}
+        if os.path.exists(images_path):
+            for f in os.listdir(images_path):
+                filename = os.path.join(images_path, f)
+                if os.path.isfile(filename):
+                    available_images[f] = filename
+
+        logger.info("SyncImages: available_images = %s" % str(available_images))
+
+        images = Image.objects.all()
+        image_names = [image.name for image in images]
+
+        for image_name in available_images:
+            #remove file extension
+            clean_name = ".".join(image_name.split('.')[:-1])
+            if clean_name not in image_names:
+                logger.info("SyncImages: adding %s" % clean_name)
+                image = Image(name=clean_name,
+                              disk_format='raw',
+                              container_format='bare', 
+                              path = available_images[image_name])
+                image.save()
+
+        return Image.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None)) 
+
+    def sync_record(self, image):
+        image.save()
diff --git a/xos/synchronizers/base/steps/sync_instances.py b/xos/synchronizers/base/steps/sync_instances.py
new file mode 100644
index 0000000..e336279
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_instances.py
@@ -0,0 +1,174 @@
+import os
+import base64
+import socket
+from django.db.models import F, Q
+from xos.config import Config
+from xos.settings import RESTAPI_HOSTNAME, RESTAPI_PORT
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from core.models.instance import Instance
+from core.models.slice import Slice, SlicePrivilege, ControllerSlice
+from core.models.network import Network, NetworkSlice, ControllerNetwork
+from observer.ansible import *
+from observer.syncstep import *
+from util.logger import observer_logger as logger
+
+def escape(s):
+    s = s.replace('\n',r'\n').replace('"',r'\"')
+    return s
+
+class SyncInstances(OpenStackSyncStep):
+    provides=[Instance]
+    requested_interval=0
+    observes=Instance
+    playbook='sync_instances.yaml'
+
+    def fetch_pending(self, deletion=False):
+        objs = super(SyncInstances, self).fetch_pending(deletion)
+        objs = [x for x in objs if x.isolation=="vm"]
+        return objs
+
+    def get_userdata(self, instance, pubkeys):
+        userdata = '#cloud-config\n\nopencloud:\n   slicename: "%s"\n   hostname: "%s"\n   restapi_hostname: "%s"\n   restapi_port: "%s"\n' % (instance.slice.name, instance.node.name, RESTAPI_HOSTNAME, str(RESTAPI_PORT))
+        userdata += 'ssh_authorized_keys:\n'
+        for key in pubkeys:
+            userdata += '  - %s\n' % key
+        return userdata
+
+    def map_sync_inputs(self, instance):
+        inputs = {}
+	metadata_update = {}
+        if (instance.numberCores):
+            metadata_update["cpu_cores"] = str(instance.numberCores)
+
+        for tag in instance.slice.tags.all():
+            if tag.name.startswith("sysctl-"):
+                metadata_update[tag.name] = tag.value
+
+	slice_memberships = SlicePrivilege.objects.filter(slice=instance.slice)
+        pubkeys = set([sm.user.public_key for sm in slice_memberships if sm.user.public_key])
+        if instance.creator.public_key:
+            pubkeys.add(instance.creator.public_key)
+
+        if instance.slice.creator.public_key:
+            pubkeys.add(instance.slice.creator.public_key)
+
+        if instance.slice.service and instance.slice.service.public_key:
+            pubkeys.add(instance.slice.service.public_key)
+
+        nics = []
+        networks = [ns.network for ns in NetworkSlice.objects.filter(slice=instance.slice)]
+        controller_networks = ControllerNetwork.objects.filter(network__in=networks,
+                                                                controller=instance.node.site_deployment.controller)
+
+        for controller_network in controller_networks:
+
+            # Lenient exception - causes slow backoff
+            if controller_network.network.template.visibility == 'private' and \
+               controller_network.network.template.translation == 'none':
+                   if not controller_network.net_id:
+                        raise DeferredException("Instance %s Private Network %s has no id; Try again later" % (instance, controller_network.network.name))
+                   nics.append(controller_network.net_id)
+
+        # now include network template
+        network_templates = [network.template.shared_network_name for network in networks \
+                             if network.template.shared_network_name]
+
+        #driver = self.driver.client_driver(caller=instance.creator, tenant=instance.slice.name, controller=instance.controllerNetwork)
+        driver = self.driver.admin_driver(tenant='admin', controller=instance.node.site_deployment.controller)
+        nets = driver.shell.quantum.list_networks()['networks']
+        for net in nets:
+            if net['name'] in network_templates:
+                nics.append(net['id'])
+
+        if (not nics):
+            for net in nets:
+                if net['name']=='public':
+                    nics.append(net['id'])
+
+        image_name = None
+        controller_images = instance.image.controllerimages.filter(controller=instance.node.site_deployment.controller)
+        if controller_images:
+            image_name = controller_images[0].image.name
+            logger.info("using image from ControllerImage object: " + str(image_name))
+
+        if image_name is None:
+            controller_driver = self.driver.admin_driver(controller=instance.node.site_deployment.controller)
+            images = controller_driver.shell.glanceclient.images.list()
+            for image in images:
+                if image.name == instance.image.name or not image_name:
+                    image_name = image.name
+                    logger.info("using image from glance: " + str(image_name))
+
+	try:
+            legacy = Config().observer_legacy
+        except:
+            legacy = False
+
+        if (legacy):
+            host_filter = instance.node.name.split('.',1)[0]
+        else:
+            host_filter = instance.node.name.strip()
+
+        availability_zone_filter = 'nova:%s'%host_filter
+        instance_name = '%s-%d'%(instance.slice.name,instance.id)
+        self.instance_name = instance_name
+
+        userData = self.get_userdata(instance, pubkeys)
+        if instance.userData:
+            userData += instance.userData
+
+        controller = instance.node.site_deployment.controller
+        fields = {'endpoint':controller.auth_url,
+                     'endpoint_v3': controller.auth_url_v3,
+                     'domain': controller.domain,
+                     'admin_user': instance.creator.email,
+                     'admin_password': instance.creator.remote_password,
+                     'admin_tenant': instance.slice.name,
+                     'tenant': instance.slice.name,
+                     'tenant_description': instance.slice.description,
+                     'name':instance_name,
+                     'ansible_tag':instance_name,
+                     'availability_zone': availability_zone_filter,
+                     'image_name':image_name,
+                     'flavor_name':instance.flavor.name,
+                     'nics':nics,
+                     'meta':metadata_update,
+                     'user_data':r'%s'%escape(userData)}
+        return fields
+
+
+    def map_sync_outputs(self, instance, res):
+	instance_id = res[0]['info']['OS-EXT-SRV-ATTR:instance_name']
+        instance_uuid = res[0]['id']
+
+	try:
+            hostname = res[0]['info']['OS-EXT-SRV-ATTR:hypervisor_hostname']
+            ip = socket.gethostbyname(hostname)
+            instance.ip = ip
+        except:
+            pass
+
+        instance.instance_id = instance_id
+        instance.instance_uuid = instance_uuid
+        instance.instance_name = self.instance_name
+        instance.save()
+	
+	
+    def map_delete_inputs(self, instance):
+        controller_register = json.loads(instance.node.site_deployment.controller.backend_register)
+
+        if (controller_register.get('disabled',False)):
+            raise InnocuousException('Controller %s is disabled'%instance.node.site_deployment.controller.name)
+
+        instance_name = '%s-%d'%(instance.slice.name,instance.id)
+        controller = instance.node.site_deployment.controller
+        input = {'endpoint':controller.auth_url,
+                     'admin_user': instance.creator.email,
+                     'admin_password': instance.creator.remote_password,
+                     'admin_tenant': instance.slice.name,
+                     'tenant': instance.slice.name,
+                     'tenant_description': instance.slice.description,
+                     'name':instance_name,
+                     'ansible_tag':instance_name,
+                     'delete': True}
+        return input
diff --git a/xos/synchronizers/base/steps/sync_instances.yaml b/xos/synchronizers/base/steps/sync_instances.yaml
new file mode 100644
index 0000000..a61e5cf
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_instances.yaml
@@ -0,0 +1,35 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  - nova_compute:
+      auth_url: {{ endpoint }}
+      login_username: {{ admin_user }}
+      login_password: {{ admin_password }}
+      login_tenant_name: {{ admin_tenant }}
+      name: {{ name }}
+      {% if delete -%}
+      state: absent
+      {% else -%}
+      state: present
+      availability_zone: {{ availability_zone }}
+      image_name: {{ image_name }}
+      wait_for: 200
+      flavor_name: {{ flavor_name }}
+      user_data: "{{ user_data }}"
+      config_drive: yes
+      nics:
+      {% for net in nics %}
+          - net-id: {{ net }}
+      {% endfor %}
+      {% for port in ports %}
+          - port-id: {{ port }}
+      {% endfor %}
+
+      {% if meta %}
+      meta:
+      {% for k,v in meta.items() %}
+          {{ k }} : "{{ v }}"
+      {% endfor %}
+      {% endif %}
+      {% endif %}
diff --git a/xos/synchronizers/base/steps/sync_object.py b/xos/synchronizers/base/steps/sync_object.py
new file mode 100644
index 0000000..dc117d6
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_object.py
@@ -0,0 +1,20 @@
+import os
+import base64
+from collections import defaultdict
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from observer.syncstep import *
+from core.models import *
+from observer.ansible import *
+from openstack.driver import OpenStackDriver
+from util.logger import observer_logger as logger
+import json
+
+class SyncObject(OpenStackSyncStep):
+    provides=[] # Caller fills this in
+    requested_interval=0
+    observes=[] # Caller fills this in
+
+    def sync_record(self, r):
+        raise DeferredException('Waiting for Service dependency: %r'%r)
diff --git a/xos/synchronizers/base/steps/sync_ports.py b/xos/synchronizers/base/steps/sync_ports.py
new file mode 100644
index 0000000..37b780a
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_ports.py
@@ -0,0 +1,196 @@
+import os
+import base64
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from core.models import Controller
+from core.models.network import *
+from util.logger import observer_logger as logger
+
+class SyncPorts(OpenStackSyncStep):
+    requested_interval = 0 # 3600
+    provides=[Port]
+    observes=Port
+
+    #     The way it works is to enumerate the all of the ports that quantum
+    #     has, and then work backward from each port's network-id to determine
+    #     which Network is associated from the port.
+
+    def call(self, **args):
+        logger.info("sync'ing network instances")
+
+        ports = Port.objects.all()
+        ports_by_id = {}
+        ports_by_neutron_port = {}
+        for port in ports:
+            ports_by_id[port.id] = port
+            ports_by_neutron_port[port.port_id] = port
+
+        networks = Network.objects.all()
+        networks_by_id = {}
+        for network in networks:
+            for nd in network.controllernetworks.all():
+                networks_by_id[nd.net_id] = network
+
+        #logger.info("networks_by_id = ")
+        #for (network_id, network) in networks_by_id.items():
+        #    logger.info("   %s: %s" % (network_id, network.name))
+
+        instances = Instance.objects.all()
+        instances_by_instance_uuid = {}
+        for instance in instances:
+            instances_by_instance_uuid[instance.instance_uuid] = instance
+
+        # Get all ports in all controllers
+
+        ports_by_id = {}
+        templates_by_id = {}
+        for controller in Controller.objects.all():
+            if not controller.admin_tenant:
+                logger.info("controller %s has no admin_tenant" % controller)
+                continue
+            try:
+                driver = self.driver.admin_driver(controller = controller)
+                ports = driver.shell.quantum.list_ports()["ports"]
+            except:
+                logger.log_exc("failed to get ports from controller %s" % controller)
+                continue
+
+            for port in ports:
+                ports_by_id[port["id"]] = port
+
+            # public-nat and public-dedicated networks don't have a net-id anywhere
+            # in the data model, so build up a list of which ids map to which network
+            # templates.
+            try:
+                neutron_networks = driver.shell.quantum.list_networks()["networks"]
+            except:
+                print "failed to get networks from controller %s" % controller
+                continue
+            for network in neutron_networks:
+                for template in NetworkTemplate.objects.all():
+                    if template.shared_network_name == network["name"]:
+                        templates_by_id[network["id"]] = template
+
+        for port in ports_by_id.values():
+            #logger.info("port %s" % str(port))
+            if port["id"] in ports_by_neutron_port:
+                # we already have it
+                #logger.info("already accounted for port %s" % port["id"])
+                continue
+
+            if port["device_owner"] != "compute:nova":
+                # we only want the ports that connect to instances
+                #logger.info("port %s is not a compute port, it is a %s" % (port["id"], port["device_owner"]))
+                continue
+
+            instance = instances_by_instance_uuid.get(port['device_id'], None)
+            if not instance:
+                logger.info("no instance for port %s device_id %s" % (port["id"], port['device_id']))
+                continue
+
+            network = networks_by_id.get(port['network_id'], None)
+            if not network:
+                # maybe it's public-nat or public-dedicated. Search the templates for
+                # the id, then see if the instance's slice has some network that uses
+                # that template
+                template = templates_by_id.get(port['network_id'], None)
+                if template and instance.slice:
+                    for candidate_network in instance.slice.networks.all():
+                         if candidate_network.template == template:
+                             network=candidate_network
+            if not network:
+                logger.info("no network for port %s network %s" % (port["id"], port["network_id"]))
+
+                # we know it's associated with a instance, but we don't know
+                # which network it is part of.
+
+                continue
+
+            if network.template.shared_network_name:
+                # If it's a shared network template, then more than one network
+                # object maps to the quantum network. We have to do a whole bunch
+                # of extra work to find the right one.
+                networks = network.template.network_set.all()
+                network = None
+                for candidate_network in networks:
+                    if (candidate_network.owner == instance.slice):
+                        logger.info("found network %s" % candidate_network)
+                        network = candidate_network
+
+                if not network:
+                    logger.info("failed to find the correct network for a shared template for port %s network %s" % (port["id"], port["network_id"]))
+                    continue
+
+            if not port["fixed_ips"]:
+                logger.info("port %s has no fixed_ips" % port["id"])
+                continue
+
+            ip=port["fixed_ips"][0]["ip_address"]
+            mac=port["mac_address"]
+            logger.info("creating Port (%s, %s, %s, %s)" % (str(network), str(instance), ip, str(port["id"])))
+
+            ns = Port(network=network,
+                               instance=instance,
+                               ip=ip,
+                               mac=mac,
+                               port_id=port["id"])
+
+            try:
+                ns.save()
+            except:
+                logger.log_exc("failed to save port %s" % str(ns))
+                continue
+
+        # For ports that were created by the user, find that ones
+        # that don't have neutron ports, and create them.
+        for port in Port.objects.filter(Q(port_id__isnull=True), Q(instance__isnull=False) ):
+            logger.info("XXX working on port %s" % port)
+            controller = port.instance.node.site_deployment.controller
+            slice = port.instance.slice
+
+            if controller:
+                cn=port.network.controllernetworks.filter(controller=controller)
+                if not cn:
+                    logger.log_exc("no controllernetwork for %s" % port)
+                    continue
+                cn=cn[0]
+                if cn.lazy_blocked:
+                    cn.lazy_blocked=False
+                    cn.save()
+                    logger.info("deferring port %s because controllerNetwork was lazy-blocked" % port)
+                    continue
+                if not cn.net_id:
+                    logger.info("deferring port %s because controllerNetwork does not have a port-id yet" % port)
+                    continue
+                try:
+                    # We need to use a client driver that specifies the tenant
+                    # of the destination instance. Nova-compute will not connect
+                    # ports to instances if the port's tenant does not match
+                    # the instance's tenant.
+
+                    # A bunch of stuff to compensate for OpenStackDriver.client_driveR()
+                    # not being in working condition.
+                    from openstack.client import OpenStackClient
+                    from openstack.driver import OpenStackDriver
+                    caller = port.network.owner.creator
+                    auth = {'username': caller.email,
+                            'password': caller.remote_password,
+                            'tenant': slice.name}
+                    client = OpenStackClient(controller=controller, **auth) # cacert=self.config.nova_ca_ssl_cert,
+                    driver = OpenStackDriver(client=client)
+
+                    neutron_port = driver.shell.quantum.create_port({"port": {"network_id": cn.net_id}})["port"]
+                    port.port_id = neutron_port["id"]
+                    if neutron_port["fixed_ips"]:
+                        port.ip = neutron_port["fixed_ips"][0]["ip_address"]
+                    port.mac = neutron_port["mac_address"]
+                except:
+                    logger.log_exc("failed to create neutron port for %s" % port)
+                    continue
+                port.save()
+
+    def delete_record(self, network_instance):
+        # Nothing to do, this is an OpenCloud object
+        pass
+
diff --git a/xos/synchronizers/base/steps/sync_roles.py b/xos/synchronizers/base/steps/sync_roles.py
new file mode 100644
index 0000000..e157dc2
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_roles.py
@@ -0,0 +1,23 @@
+import os
+import base64
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from core.models.role import Role
+from core.models.site import SiteRole, Controller, ControllerRole
+from core.models.slice import SliceRole
+from util.logger import observer_logger as logger
+
+class SyncRoles(OpenStackSyncStep):
+    provides=[Role]
+    requested_interval=0
+    observes=[SiteRole,SliceRole,ControllerRole]
+
+    def sync_record(self, role):
+        if not role.enacted:
+            controllers = Controller.objects.all()
+       	    for controller in controllers:
+                driver = self.driver.admin_driver(controller=controller)
+                driver.create_role(role.role)
+            role.save()
+    
diff --git a/xos/synchronizers/base/steps/sync_slivers.py.bak b/xos/synchronizers/base/steps/sync_slivers.py.bak
new file mode 100644
index 0000000..c4240fa
--- /dev/null
+++ b/xos/synchronizers/base/steps/sync_slivers.py.bak
@@ -0,0 +1,179 @@
+import os
+import base64
+import socket
+from django.db.models import F, Q
+from xos.config import Config
+from xos.settings import RESTAPI_HOSTNAME, RESTAPI_PORT
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.sliver import Sliver
+from core.models.slice import Slice, SlicePrivilege, ControllerSlice
+from core.models.network import Network, NetworkSlice, ControllerNetwork
+from observer.ansible import *
+from util.logger import observer_logger as logger
+
+def escape(s):
+    s = s.replace('\n',r'\n').replace('"',r'\"')
+    return s
+
+class SyncSlivers(OpenStackSyncStep):
+    provides=[Sliver]
+    requested_interval=0
+    observes=Sliver
+
+    def get_userdata(self, sliver, pubkeys):
+        userdata = '#cloud-config\n\nopencloud:\n   slicename: "%s"\n   hostname: "%s"\n   restapi_hostname: "%s"\n   restapi_port: "%s"\n' % (sliver.slice.name, sliver.node.name, RESTAPI_HOSTNAME, str(RESTAPI_PORT))
+        userdata += 'ssh_authorized_keys:\n'
+        for key in pubkeys:
+            userdata += '  - %s\n' % key
+        return userdata
+
+    def sync_record(self, sliver):
+        logger.info("sync'ing sliver:%s slice:%s controller:%s " % (sliver, sliver.slice.name, sliver.node.site_deployment.controller))
+    	controller_register = json.loads(sliver.node.site_deployment.controller.backend_register)
+ 
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%sliver.node.site_deployment.controller.name)
+
+        metadata_update = {}
+        if (sliver.numberCores):
+            metadata_update["cpu_cores"] = str(sliver.numberCores)
+
+        for tag in sliver.slice.tags.all():
+            if tag.name.startswith("sysctl-"):
+                metadata_update[tag.name] = tag.value
+
+        # public keys
+        slice_memberships = SlicePrivilege.objects.filter(slice=sliver.slice)
+        pubkeys = set([sm.user.public_key for sm in slice_memberships if sm.user.public_key])
+        if sliver.creator.public_key:
+            pubkeys.add(sliver.creator.public_key)
+
+        if sliver.slice.creator.public_key:
+            pubkeys.add(sliver.slice.creator.public_key)
+
+        if sliver.slice.service and sliver.slice.service.public_key:
+            pubkeys.add(sliver.slice.service.public_key)
+
+        if sliver.slice.service and sliver.slice.service.public_key:
+            pubkeys.add(sliver.slice.service.public_key)
+
+        nics = []
+        networks = [ns.network for ns in NetworkSlice.objects.filter(slice=sliver.slice)]
+        controller_networks = ControllerNetwork.objects.filter(network__in=networks,
+                                                                controller=sliver.node.site_deployment.controller)
+
+        for controller_network in controller_networks:
+            if controller_network.network.template.visibility == 'private' and \
+               controller_network.network.template.translation == 'none':
+                   if not controller_network.net_id:
+                        raise Exception("Private Network %s has no id; Try again later" % controller_network.network.name)
+                   nics.append(controller_network.net_id)
+
+        # now include network template
+        network_templates = [network.template.shared_network_name for network in networks \
+                             if network.template.shared_network_name]
+
+        #driver = self.driver.client_driver(caller=sliver.creator, tenant=sliver.slice.name, controller=sliver.controllerNetwork)
+        driver = self.driver.admin_driver(tenant='admin', controller=sliver.node.site_deployment.controller)
+        nets = driver.shell.quantum.list_networks()['networks']
+        for net in nets:
+            if net['name'] in network_templates:
+                nics.append(net['id'])
+
+        if (not nics):
+            for net in nets:
+                if net['name']=='public':
+                    nics.append(net['id'])
+
+        image_id = None
+        controller_images = sliver.image.controllerimages.filter(controller=sliver.node.site_deployment.controller)
+        if controller_images:
+            image_id = controller_images[0].glance_image_id
+            logger.info("using image_id from ControllerImage object: " + str(image_id))
+
+        if image_id is None:
+            controller_driver = self.driver.admin_driver(controller=sliver.node.site_deployment.controller)
+            image_id = None
+            images = controller_driver.shell.glanceclient.images.list()
+            for image in images:
+                if image.name == sliver.image.name or not image_id:
+                    image_id = image.id
+                    logger.info("using image_id from glance: " + str(image_id))
+
+        try:
+            legacy = Config().observer_legacy
+        except:
+            legacy = False
+
+        if (legacy):
+            host_filter = sliver.node.name.split('.',1)[0]
+        else:
+            host_filter = sliver.node.name.strip()
+
+        availability_zone_filter = 'nova:%s'%host_filter
+        sliver_name = '%s-%d'%(sliver.slice.name,sliver.id)
+
+        userData = self.get_userdata(sliver, pubkeys)
+        if sliver.userData:
+            userData = sliver.userData
+
+        controller = sliver.node.site_deployment.controller
+        tenant_fields = {'endpoint':controller.auth_url,
+                     'admin_user': sliver.creator.email,
+                     'admin_password': sliver.creator.remote_password,
+                     'admin_tenant': sliver.slice.name,
+                     'tenant': sliver.slice.name,
+                     'tenant_description': sliver.slice.description,
+                     'name':sliver_name,
+                     'ansible_tag':sliver_name,
+                     'availability_zone': availability_zone_filter,
+                     'image_id':image_id,
+                     'flavor_id':sliver.flavor.id,
+                     'nics':nics,
+                     'meta':metadata_update,
+                     'user_data':r'%s'%escape(userData)}
+
+        res = run_template('sync_slivers.yaml', tenant_fields,path='slivers', expected_num=1)
+        sliver_id = res[0]['info']['OS-EXT-SRV-ATTR:instance_name']
+        sliver_uuid = res[0]['id']
+
+        try:
+            hostname = res[0]['info']['OS-EXT-SRV-ATTR:hypervisor_hostname']
+            ip = socket.gethostbyname(hostname)
+            sliver.ip = ip
+        except:
+            pass
+
+        sliver.instance_id = sliver_id
+        sliver.instance_uuid = sliver_uuid
+        sliver.instance_name = sliver_name
+        sliver.save()
+
+    def delete_record(self, sliver):
+	controller_register = json.loads(sliver.node.site_deployment.controller.backend_register)
+
+        if (controller_register.get('disabled',False)):
+                raise Exception('Controller %s is disabled'%sliver.node.site_deployment.controller.name)
+
+        sliver_name = '%s-%d'%(sliver.slice.name,sliver.id)
+        controller = sliver.node.site_deployment.controller
+        tenant_fields = {'endpoint':controller.auth_url,
+                     'admin_user': sliver.creator.email,
+                     'admin_password': sliver.creator.remote_password,
+                     'admin_tenant': sliver.slice.name,
+                     'tenant': sliver.slice.name,
+                     'tenant_description': sliver.slice.description,
+                     'name':sliver_name,
+                     'ansible_tag':sliver_name,
+                     'delete': True}
+
+        try:
+               res = run_template('sync_slivers.yaml', tenant_fields,path='slivers', expected_num=1)

+        except Exception,e:

+               print "Could not sync %s"%sliver_name

+               #import traceback

+               #traceback.print_exc()

+               raise e

+
+        if (len(res)!=1):
+            raise Exception('Could not delete sliver %s'%sliver.slice.name)
diff --git a/xos/synchronizers/base/steps/teardown_container.yaml b/xos/synchronizers/base/steps/teardown_container.yaml
new file mode 100644
index 0000000..5cabc78
--- /dev/null
+++ b/xos/synchronizers/base/steps/teardown_container.yaml
@@ -0,0 +1,33 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: {{ username }}
+  sudo: yes
+
+  vars:
+    container_name: {{ container_name }}
+    docker_image: {{ docker_image }}
+    ports:
+    {% for port in ports %}
+       - device: {{ port.device }}
+         xos_network_id: {{ port.xos_network_id }}
+         mac: {{ port.mac|default("") }}
+         ip: {{ port.ip }}
+         snoop_instance_mac: {{ port.snoop_instance_mac }}
+         snoop_instance_id: {{ port.snoop_instance_id }}
+         parent_mac: {{ port.parent_mac|default("") }}
+         s_tag: {{ port.s_tag|default("")  }}
+         c_tag: {{ port.c_tag|default("") }}
+         next_hop: {{ port.next_hop|default("") }}
+         bridge: {{ port.bridge }}
+    {% endfor %}
+    volumes:
+    {% for volume in volumes %}
+       - {{ volume }}
+    {% endfor %}
+
+  tasks:
+  - name: Make sure container is stopped
+    service: name=container-{{ container_name }} state=stopped
+
diff --git a/xos/synchronizers/base/syncstep-portal.py b/xos/synchronizers/base/syncstep-portal.py
new file mode 100644
index 0000000..7cc9b7f
--- /dev/null
+++ b/xos/synchronizers/base/syncstep-portal.py
@@ -0,0 +1,221 @@
+import os
+import base64
+from datetime import datetime
+from xos.config import Config
+from util.logger import Logger, logging
+from observer.steps import *
+from django.db.models import F, Q
+from core.models import * 
+from django.db import reset_queries
+import json
+import time
+import pdb
+import traceback
+
+logger = Logger(level=logging.INFO)
+
+def f7(seq):
+    seen = set()
+    seen_add = seen.add
+    return [ x for x in seq if not (x in seen or seen_add(x))]
+
+def elim_dups(backend_str):
+    strs = backend_str.split('/')
+    strs = map(lambda x:x.split('(')[0],strs) 
+    strs2 = f7(strs)
+    return '/'.join(strs2)
+    
+def deepgetattr(obj, attr):
+    return reduce(getattr, attr.split('.'), obj)
+
+
+class InnocuousException(Exception):
+    pass
+
+class FailedDependency(Exception):
+    pass
+
+class SyncStep(object):
+    """ An XOS Sync step. 
+
+    Attributes:
+        psmodel        Model name the step synchronizes 
+        dependencies    list of names of models that must be synchronized first if the current model depends on them
+    """ 
+    slow=False
+    def get_prop(prop):
+        try:
+            sync_config_dir = Config().sync_config_dir
+        except:
+            sync_config_dir = '/etc/xos/sync'
+        prop_config_path = '/'.join(sync_config_dir,self.name,prop)
+        return open(prop_config_path).read().rstrip()
+
+    def __init__(self, **args):
+        """Initialize a sync step
+           Keyword arguments:
+                   name -- Name of the step
+                provides -- XOS models sync'd by this step
+        """
+        dependencies = []
+        self.driver = args.get('driver')
+        self.error_map = args.get('error_map')
+
+        try:
+            self.soft_deadline = int(self.get_prop('soft_deadline_seconds'))
+        except:
+            self.soft_deadline = 5 # 5 seconds
+
+        return
+
+    def fetch_pending(self, deletion=False):
+        # This is the most common implementation of fetch_pending
+        # Steps should override it if they have their own logic
+        # for figuring out what objects are outstanding.
+        main_obj = self.observes
+        if (not deletion):
+            objs = main_obj.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
+        else:
+            objs = main_obj.deleted_objects.all()
+
+        return objs
+        #return Sliver.objects.filter(ip=None)
+    
+    def check_dependencies(self, obj, failed):
+        for dep in self.dependencies:
+            peer_name = dep[0].lower() + dep[1:]    # django names are camelCased with the first letter lower
+ 
+            try:
+                peer_object = deepgetattr(obj, peer_name)
+                try: 
+                    peer_objects = peer_object.all() 
+                except AttributeError:
+                    peer_objects = [peer_object] 
+            except:
+                peer_objects = []
+
+            if (hasattr(obj,'controller')):
+                try:
+                	peer_objects = filter(lambda o:o.controller==obj.controller, peer_objects)
+                except AttributeError:
+                        pass
+
+            if (failed in peer_objects):
+                if (obj.backend_status!=failed.backend_status):
+                    obj.backend_status = failed.backend_status
+                    obj.save(update_fields=['backend_status'])
+                raise FailedDependency("Failed dependency for %s:%s peer %s:%s failed  %s:%s" % (obj.__class__.__name__, str(getattr(obj,"pk","no_pk")), peer_object.__class__.__name__, str(getattr(peer_object,"pk","no_pk")), failed.__class__.__name__, str(getattr(failed,"pk","no_pk"))))
+
+    def call(self, failed=[], deletion=False):
+        pending = self.fetch_pending(deletion)
+        for o in pending:
+            # another spot to clean up debug state
+            try:
+                reset_queries()
+            except:
+                # this shouldn't happen, but in case it does, catch it...
+                logger.log_exc("exception in reset_queries")
+
+            sync_failed = False
+            try:
+                backoff_disabled = Config().observer_backoff_disabled
+            except:
+                backoff_disabled = 0
+
+            try:
+                scratchpad = json.loads(o.backend_register)
+                if (scratchpad):
+                    next_run = scratchpad['next_run']
+                    if (not backoff_disabled and next_run>time.time()):
+                        sync_failed = True
+            except:
+                logger.log_exc("Exception while loading scratchpad")
+                pass
+
+            if (not sync_failed):
+                try:
+                    for f in failed:
+                        self.check_dependencies(o,f) # Raises exception if failed
+                    if (deletion):
+                        self.delete_record(o)
+                        o.delete(purge=True)
+                    else:
+                        self.sync_record(o)
+                        o.enacted = datetime.now() # Is this the same timezone? XXX
+                        scratchpad = {'next_run':0, 'exponent':0}
+                        o.backend_register = json.dumps(scratchpad)
+                        o.backend_status = "1 - OK"
+                        o.save(update_fields=['enacted','backend_status','backend_register'])
+		except (InnocuousException,Exception) as e:
+                    logger.log_exc("Syncstep caught exception")
+
+                    force_error = False
+                    try:
+                        if (o.backend_status.startswith('2 - ')):
+                            force_error = False # Already in error state
+                            str_e = '%s/%s'%(o.backend_status[4:],str(e))
+			    str_e = elim_dups(str_e)
+                        else:
+                            str_e = str(e)
+                    except:
+                        str_e = str(e)
+
+                    if (not str_e):
+                        str_e = 'Unknown'
+
+                    try:
+                        error = self.error_map.map(str_e)
+                    except:
+                        error = str_e
+
+                    if isinstance(e, InnocuousException) and not force_error:
+ 		    	o.backend_status = '1 - %s'%error
+		    else:	
+ 		    	o.backend_status = '2 - %s'%error 
+
+                    cmd = 'wget -O /dev/null -q "http://xoslnprof.appspot.com/command?action=pushlog&node=1&log_path=/%s/%s"'%(self.__class__.__name__,error)
+		    os.system(cmd)	
+                     
+                    try:
+                        scratchpad = json.loads(o.backend_register)
+                        scratchpad['exponent']
+                    except:
+                        logger.log_exc("Exception while updating scratchpad")
+                        scratchpad = {'next_run':0, 'exponent':0}
+
+                    # Second failure
+                    if (scratchpad['exponent']):
+                        delay = scratchpad['exponent'] * 600 # 10 minutes
+                        if (delay<1440):
+                            delay = 1440
+                        scratchpad['next_run'] = time.time() + delay
+
+                    scratchpad['exponent']+=1
+
+                    o.backend_register = json.dumps(scratchpad)
+
+                    # TOFIX:
+                    # DatabaseError: value too long for type character varying(140)
+                    if (o.pk):
+                        try:
+                            o.backend_status = o.backend_status[:1024]
+                            o.save(update_fields=['backend_status','backend_register','updated'])
+                        except:
+                            print "Could not update backend status field!"
+                            pass
+                    sync_failed = True
+
+
+            if (sync_failed):
+                failed.append(o)
+
+        return failed
+
+    def sync_record(self, o):
+        return
+
+    def delete_record(self, o):
+        return
+
+    def __call__(self, **args):
+        return self.call(**args)
\ No newline at end of file
diff --git a/xos/synchronizers/base/syncstep.py b/xos/synchronizers/base/syncstep.py
new file mode 100644
index 0000000..0a25c58
--- /dev/null
+++ b/xos/synchronizers/base/syncstep.py
@@ -0,0 +1,307 @@
+import os
+import base64
+from datetime import datetime
+from xos.config import Config
+from util.logger import Logger, logging
+from observer.steps import *
+from django.db.models import F, Q
+from core.models import *
+from django.db import reset_queries
+from observer.ansible import *
+from dependency_walker import *
+
+from time import time
+import json
+import time
+import pdb
+
+logger = Logger(level=logging.INFO)
+
+def f7(seq):
+    seen = set()
+    seen_add = seen.add
+    return [ x for x in seq if not (x in seen or seen_add(x))]
+
+def elim_dups(backend_str):
+    strs = backend_str.split(' // ')
+    strs2 = f7(strs)
+    return ' // '.join(strs2)
+
+def deepgetattr(obj, attr):
+    return reduce(getattr, attr.split('.'), obj)
+
+
+class InnocuousException(Exception):
+    pass
+
+class DeferredException(Exception):
+    pass
+
+class FailedDependency(Exception):
+    pass
+
+class SyncStep(object):
+    """ An XOS Sync step.
+
+    Attributes:
+        psmodel        Model name the step synchronizes
+        dependencies    list of names of models that must be synchronized first if the current model depends on them
+    """
+
+    # map_sync_outputs can return this value to cause a step to be marked
+    # successful without running ansible. Used for sync_network_controllers
+    # on nat networks.
+    SYNC_WITHOUT_RUNNING = "sync_without_running"
+
+    slow=False
+    def get_prop(self, prop):
+        try:
+            sync_config_dir = Config().sync_config_dir
+        except:
+            sync_config_dir = '/etc/xos/sync'
+        prop_config_path = '/'.join(sync_config_dir,self.name,prop)
+        return open(prop_config_path).read().rstrip()
+
+    def __init__(self, **args):
+        """Initialize a sync step
+           Keyword arguments:
+                   name -- Name of the step
+                provides -- XOS models sync'd by this step
+        """
+        dependencies = []
+        self.driver = args.get('driver')
+        self.error_map = args.get('error_map')
+
+        try:
+            self.soft_deadline = int(self.get_prop('soft_deadline_seconds'))
+        except:
+            self.soft_deadline = 5 # 5 seconds
+
+        return
+
+    def fetch_pending(self, deletion=False):
+        # This is the most common implementation of fetch_pending
+        # Steps should override it if they have their own logic
+        # for figuring out what objects are outstanding.
+
+        main_objs = self.observes
+	if (type(main_objs) is not list):
+		main_objs=[main_objs]
+	
+	objs = []
+	for main_obj in main_objs:
+		if (not deletion):
+		    lobjs = main_obj.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False),Q(no_sync=False))
+		else:
+		    lobjs = main_obj.deleted_objects.all()
+	        objs.extend(lobjs)
+
+        return objs
+        #return Instance.objects.filter(ip=None)
+
+    def check_dependencies(self, obj, failed):
+        for dep in self.dependencies:
+            peer_name = dep[0].lower() + dep[1:]    # django names are camelCased with the first letter lower
+
+            peer_objects=[]
+            try:
+                peer_names = plural(peer_name)
+                peer_object_list=[]
+
+                try:
+                    peer_object_list.append(deepgetattr(obj, peer_name))
+                except:
+                    pass
+
+                try:
+                    peer_object_list.append(deepgetattr(obj, peer_names))
+                except:
+                    pass
+
+                for peer_object in peer_object_list:
+                    try:
+                        peer_objects.extend(peer_object.all())
+                    except AttributeError:
+                        peer_objects.append(peer_object)
+            except:
+                peer_objects = []
+
+            if (hasattr(obj,'controller')):
+                try:
+                    peer_objects = filter(lambda o:o.controller==obj.controller, peer_objects)
+                except AttributeError:
+                    pass
+
+            if (failed in peer_objects):
+                if (obj.backend_status!=failed.backend_status):
+                    obj.backend_status = failed.backend_status
+                    obj.save(update_fields=['backend_status'])
+                raise FailedDependency("Failed dependency for %s:%s peer %s:%s failed  %s:%s" % (obj.__class__.__name__, str(getattr(obj,"pk","no_pk")), peer_object.__class__.__name__, str(getattr(peer_object,"pk","no_pk")), failed.__class__.__name__, str(getattr(failed,"pk","no_pk"))))
+
+
+    def sync_record(self, o):
+        try:
+            controller = o.get_controller()
+            controller_register = json.loads(o.node.site_deployment.controller.backend_register)
+
+            if (controller_register.get('disabled',False)):
+                raise InnocuousException('Controller %s is disabled'%sliver.node.site_deployment.controller.name)
+        except AttributeError:
+            pass
+
+        tenant_fields = self.map_sync_inputs(o)
+        if tenant_fields == SyncStep.SYNC_WITHOUT_RUNNING:
+            return
+        main_objs=self.observes
+        if (type(main_objs) is list):
+            main_objs=main_objs[0]
+
+        path = ''.join(main_objs.__name__).lower()
+        res = run_template(self.playbook,tenant_fields,path=path)
+
+        try:
+            self.map_sync_outputs(o,res)
+        except AttributeError:
+            pass
+         
+    def delete_record(self, o):
+        try:
+            controller = o.get_controller()
+            controller_register = json.loads(o.node.site_deployment.controller.backend_register)
+
+            if (controller_register.get('disabled',False)):
+                raise InnocuousException('Controller %s is disabled'%sliver.node.site_deployment.controller.name)
+        except AttributeError:
+            pass
+
+        tenant_fields = self.map_delete_inputs(o)
+
+        main_objs=self.observes
+        if (type(main_objs) is list):
+            main_objs=main_objs[0]
+
+        path = ''.join(main_objs.__name__).lower()
+
+        tenant_fields['delete']=True
+        res = run_template(self.playbook,tenant_fields,path=path)
+        try:
+                self.map_delete_outputs(o,res)
+        except AttributeError:
+                pass
+
+    def call(self, failed=[], deletion=False):
+        #if ('Instance' in self.__class__.__name__):
+        #    pdb.set_trace()
+
+        pending = self.fetch_pending(deletion)
+
+        for o in pending:
+            # another spot to clean up debug state
+            try:
+                reset_queries()
+            except:
+                # this shouldn't happen, but in case it does, catch it...
+                logger.log_exc("exception in reset_queries")
+
+            sync_failed = False
+            try:
+                backoff_disabled = Config().observer_backoff_disabled
+            except:
+                backoff_disabled = 0
+
+            try:
+                scratchpad = json.loads(o.backend_register)
+                if (scratchpad):
+                    next_run = scratchpad['next_run']
+                    if (not backoff_disabled and next_run>time.time()):
+                        sync_failed = True
+            except:
+                logger.log_exc("Exception while loading scratchpad")
+                pass
+
+            if (not sync_failed):
+                try:
+                    for f in failed:
+                        self.check_dependencies(o,f) # Raises exception if failed
+                    if (deletion):
+                        self.delete_record(o)
+                        o.delete(purge=True)
+                    else:
+                        self.sync_record(o)
+                        o.enacted = datetime.now() # Is this the same timezone? XXX
+                        scratchpad = {'next_run':0, 'exponent':0, 'last_success':time.time()}
+                        o.backend_register = json.dumps(scratchpad)
+                        o.backend_status = "1 - OK"
+                        o.save(update_fields=['enacted','backend_status','backend_register'])
+                except (InnocuousException,Exception,DeferredException) as e:
+                    logger.log_exc("sync step failed!")
+                    try:
+                        if (o.backend_status.startswith('2 - ')):
+                            str_e = '%s // %r'%(o.backend_status[4:],e)
+                            str_e = elim_dups(str_e)
+                        else:
+                            str_e = '%r'%e
+                    except:
+                        str_e = '%r'%e
+
+                    try:
+                        error = self.error_map.map(str_e)
+                    except:
+                        error = '%s'%str_e
+
+                    if isinstance(e, InnocuousException) and not force_error:
+                        o.backend_status = '1 - %s'%error
+                    else:
+                        o.backend_status = '2 - %s'%error
+
+                    try:
+                        scratchpad = json.loads(o.backend_register)
+                        scratchpad['exponent']
+                    except:
+                        logger.log_exc("Exception while updating scratchpad")
+                        scratchpad = {'next_run':0, 'exponent':0, 'last_success':time.time(),'failures':0}
+
+                    # Second failure
+                    if (scratchpad['exponent']):
+                        if isinstance(e,DeferredException):
+                            delay = scratchpad['exponent'] * 60 # 1 minute
+                        else:
+                            delay = scratchpad['exponent'] * 600 # 10 minutes
+                        # cap delays at 8 hours
+                        if (delay>8*60*60):
+                            delay=8*60*60
+                        scratchpad['next_run'] = time.time() + delay
+
+                    try:
+                        scratchpad['exponent']+=1
+                    except:
+                        scratchpad['exponent']=1
+
+                    try:
+                        scratchpad['failures']+=1
+                    except KeyError:
+                        scratchpad['failures']=1
+
+                    scratchpad['last_failure']=time.time()
+
+                    o.backend_register = json.dumps(scratchpad)
+
+                    # TOFIX:
+                    # DatabaseError: value too long for type character varying(140)
+                    if (o.pk):
+                        try:
+                            o.backend_status = o.backend_status[:1024]
+                            o.save(update_fields=['backend_status','backend_register','updated'])
+                        except:
+                            print "Could not update backend status field!"
+                            pass
+                    sync_failed = True
+
+
+            if (sync_failed):
+                failed.append(o)
+
+        return failed
+
+    def __call__(self, **args):
+        return self.call(**args)
diff --git a/xos/synchronizers/base/syncstep.py.bak b/xos/synchronizers/base/syncstep.py.bak
new file mode 100644
index 0000000..a1f242b
--- /dev/null
+++ b/xos/synchronizers/base/syncstep.py.bak
@@ -0,0 +1,203 @@
+import os
+import base64
+from datetime import datetime
+from xos.config import Config
+from util.logger import Logger, logging
+from observer.steps import *
+from django.db.models import F, Q
+from core.models import * 
+import json
+import time
+import pdb
+import traceback
+
+logger = Logger(level=logging.INFO)
+
+def f7(seq):
+    seen = set()
+    seen_add = seen.add
+    return [ x for x in seq if not (x in seen or seen_add(x))]
+
+def elim_dups(backend_str):
+    strs = backend_str.split(' // ')
+    strs2 = f7(strs)
+    return ' // '.join(strs2)
+    
+def deepgetattr(obj, attr):
+    return reduce(getattr, attr.split('.'), obj)
+
+
+class InnocuousException(Exception):
+    pass
+
+class FailedDependency(Exception):
+    pass
+
+class SyncStep(object):
+    """ An XOS Sync step. 
+
+    Attributes:
+        psmodel        Model name the step synchronizes 
+        dependencies    list of names of models that must be synchronized first if the current model depends on them
+    """ 
+    slow=False
+    def get_prop(self, prop):
+        try:
+            sync_config_dir = Config().sync_config_dir
+        except:
+            sync_config_dir = '/etc/xos/sync'
+        prop_config_path = '/'.join(sync_config_dir,self.name,prop)
+        return open(prop_config_path).read().rstrip()
+
+    def __init__(self, **args):
+        """Initialize a sync step
+           Keyword arguments:
+                   name -- Name of the step
+                provides -- XOS models sync'd by this step
+        """
+        dependencies = []
+        self.driver = args.get('driver')
+        self.error_map = args.get('error_map')
+
+        try:
+            self.soft_deadline = int(self.get_prop('soft_deadline_seconds'))
+        except:
+            self.soft_deadline = 5 # 5 seconds
+
+        return
+
+    def fetch_pending(self, deletion=False):
+        # This is the most common implementation of fetch_pending
+        # Steps should override it if they have their own logic
+        # for figuring out what objects are outstanding.
+        main_obj = self.observes
+        if (not deletion):
+            objs = main_obj.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
+        else:
+            objs = main_obj.deleted_objects.all()
+
+        return objs
+        #return Sliver.objects.filter(ip=None)
+    
+    def check_dependencies(self, obj, failed):
+        for dep in self.dependencies:
+            peer_name = dep[0].lower() + dep[1:]    # django names are camelCased with the first letter lower
+ 
+            try:
+                peer_object = deepgetattr(obj, peer_name)
+                try: 
+                    peer_objects = peer_object.all() 
+                except AttributeError:
+                    peer_objects = [peer_object] 
+            except:
+                peer_objects = []
+
+            if (hasattr(obj,'controller')):
+                try:
+                	peer_objects = filter(lambda o:o.controller==obj.controller, peer_objects)
+                except AttributeError:
+                        pass
+
+            if (failed in peer_objects):
+                if (obj.backend_status!=failed.backend_status):
+                    obj.backend_status = failed.backend_status
+                    obj.save(update_fields=['backend_status'])
+                raise FailedDependency("Failed dependency for %s:%s peer %s:%s failed  %s:%s" % (obj.__class__.__name__, str(getattr(obj,"pk","no_pk")), peer_object.__class__.__name__, str(getattr(peer_object,"pk","no_pk")), failed.__class__.__name__, str(getattr(failed,"pk","no_pk"))))
+
+    def call(self, failed=[], deletion=False):
+        pending = self.fetch_pending(deletion)
+        for o in pending:
+            sync_failed = False
+            try:
+                backoff_disabled = Config().observer_backoff_disabled
+            except:
+                backoff_disabled = 0
+
+            try:
+                scratchpad = json.loads(o.backend_register)
+                if (scratchpad):
+                    next_run = scratchpad['next_run']
+                    if (not backoff_disabled and next_run>time.time()):
+                        sync_failed = True
+            except:
+                logger.log_exc("Exception while loading scratchpad")
+                pass
+
+            if (not sync_failed):
+                try:
+                    for f in failed:
+                        self.check_dependencies(o,f) # Raises exception if failed
+                    if (deletion):
+                        self.delete_record(o)
+                        o.delete(purge=True)
+                    else:
+                        self.sync_record(o)
+                        o.enacted = datetime.now() # Is this the same timezone? XXX
+                        scratchpad = {'next_run':0, 'exponent':0}
+                        o.backend_register = json.dumps(scratchpad)
+                        o.backend_status = "1 - OK"
+                        o.save(update_fields=['enacted','backend_status','backend_register'])
+                except (InnocuousException,Exception) as e:
+                    logger.log_exc("sync step failed!")
+                    try:
+                        if (o.backend_status.startswith('2 - ')):
+                            str_e = '%s // %r'%(o.backend_status[4:],e)
+			    str_e = elim_dups(str_e)
+                        else:
+                            str_e = '%r'%e
+                    except:
+                        str_e = '%r'%e
+
+                    try:
+                        error = self.error_map.map(str_e)
+                    except:
+                        error = '2 - %s'%str_e
+
+                    if isinstance(e, InnocuousException) and not force_error:
+                        o.backend_status = '1 - %s'%error
+                    else:
+                        o.backend_status = '3 - %s'%error
+
+                    try:
+                        scratchpad = json.loads(o.backend_register)
+                        scratchpad['exponent']
+                    except:
+                        logger.log_exc("Exception while updating scratchpad")
+                        scratchpad = {'next_run':0, 'exponent':0}
+
+                    # Second failure
+                    if (scratchpad['exponent']):
+                        delay = scratchpad['exponent'] * 600 # 10 minutes
+                        if (delay<1440):
+                            delay = 1440
+                        scratchpad['next_run'] = time.time() + delay
+
+                    scratchpad['exponent']+=1
+
+                    o.backend_register = json.dumps(scratchpad)
+
+                    # TOFIX:
+                    # DatabaseError: value too long for type character varying(140)
+                    if (o.pk):
+                        try:
+                            o.backend_status = o.backend_status[:1024]
+                            o.save(update_fields=['backend_status','backend_register','updated'])
+                        except:
+                            print "Could not update backend status field!"
+                            pass
+                    sync_failed = True
+
+
+            if (sync_failed):
+                failed.append(o)
+
+        return failed
+
+    def sync_record(self, o):
+        return
+
+    def delete_record(self, o):
+        return
+
+    def __call__(self, **args):
+        return self.call(**args)
diff --git a/xos/synchronizers/base/templates/container.conf.j2 b/xos/synchronizers/base/templates/container.conf.j2
new file mode 100644
index 0000000..7cbb880
--- /dev/null
+++ b/xos/synchronizers/base/templates/container.conf.j2
@@ -0,0 +1,14 @@
+# Upstart script for container
+description "container"
+author "smbaker@gmail.com"
+start on filesystem and started docker
+stop on runlevel [!2345]
+respawn
+
+script
+  /usr/local/sbin/start-container-{{ container_name }}.sh ATTACH
+end script
+
+post-stop script
+  /usr/local/sbin/stop-container-{{ container_name }}.sh
+end script
\ No newline at end of file
diff --git a/xos/synchronizers/base/templates/container.service.j2 b/xos/synchronizers/base/templates/container.service.j2
new file mode 100644
index 0000000..817d6d7
--- /dev/null
+++ b/xos/synchronizers/base/templates/container.service.j2
@@ -0,0 +1,11 @@
+[Unit]
+Description={{ container_name }}
+After=docker.service
+
+[Service]
+ExecStart=/bin/bash -c "/usr/local/sbin/start-container-{{ container_name }}.sh ATTACH"
+ExecStop=/bin/bash -c "/usr/local/sbin/stop-container-{{ container_name }}.sh"
+SuccessExitStatus=0 137
+
+[Install]
+WantedBy=multi-user.target
diff --git a/xos/synchronizers/base/templates/start-container.sh.j2 b/xos/synchronizers/base/templates/start-container.sh.j2
new file mode 100644
index 0000000..2fbf478
--- /dev/null
+++ b/xos/synchronizers/base/templates/start-container.sh.j2
@@ -0,0 +1,136 @@
+#!/bin/bash
+
+iptables -L > /dev/null
+ip6tables -L > /dev/null
+
+CONTAINER={{ container_name }}
+IMAGE={{ docker_image }}
+
+function mac_to_iface {
+    PARENT_MAC=$1
+    ifconfig|grep $PARENT_MAC| awk '{print $1}'|grep -v '\.'
+}
+
+function encapsulate_stag {
+    LAN_IFACE=$1
+    STAG=$2
+    ifconfig $LAN_IFACE >> /dev/null
+    if [ "$?" == 0 ]; then
+        STAG_IFACE=$LAN_IFACE.$STAG
+        ifconfig $LAN_IFACE up
+        ifconfig $STAG_IFACE
+        if [ "$?" == 0 ]; then
+            echo $STAG_IFACE is already created
+        else
+            ifconfig $STAG_IFACE >> /dev/null || ip link add link $LAN_IFACE name $STAG_IFACE type vlan id $STAG
+        fi
+            ifconfig $STAG_IFACE up
+    else
+        echo There is no $LAN_IFACE. Aborting.
+        exit -1
+    fi
+}
+
+
+{% if volumes %}
+{% for volume in volumes %}
+DEST_DIR=/var/container_volumes/$CONTAINER/{{ volume }}
+mkdir -p $DEST_DIR
+VOLUME_ARGS="$VOLUME_ARGS -v $DEST_DIR:{{ volume }}"
+{% endfor %}
+{% endif %}
+
+docker inspect $CONTAINER > /dev/null 2>&1
+if [ "$?" == 1 ]
+then
+    docker pull $IMAGE
+{% if network_method=="host" %}
+    docker run -d --name=$CONTAINER --privileged=true --net=host $VOLUME_ARGS $IMAGE
+{% elif network_method=="bridged" %}
+    docker run -d --name=$CONTAINER --privileged=true --net=bridge $VOLUME_ARGS $IMAGE
+{% else %}
+    docker run -d --name=$CONTAINER --privileged=true --net=none $VOLUME_ARGS $IMAGE
+{% endif %}
+else
+    docker start $CONTAINER
+fi
+
+{% if ports %}
+{% for port in ports %}
+
+{% if port.next_hop %}
+NEXTHOP_ARG="@{{ port.next_hop }}"
+{% else %}
+NEXTHOP_ARG=""
+{% endif %}
+
+{% if port.c_tag %}
+CTAG_ARG="@{{ port.c_tag }}"
+{% else %}
+CTAG_ARG=""
+{% endif %}
+
+{% if port.parent_mac %}
+# container-in-VM
+SRC_DEV=$( mac_to_iface "{{ port.parent_mac }}" )
+CMD="docker exec $CONTAINER ifconfig $SRC_DEV >> /dev/null || pipework $SRC_DEV -i {{ port.device }} $CONTAINER {{ port.ip }}/24$NEXTHOP_ARG {{ port.mac }} $CTAG_ARG"
+echo $CMD
+eval $CMD
+
+{% else %}
+# container-on-metal
+IP="{{ port.ip }}"
+{% if port.mac %}
+MAC="{{ port.mac }}"
+{% else %}
+MAC=""
+{% endif %}
+
+DEVICE="{{ port.device }}"

+BRIDGE="{{ port.bridge }}"

+{% if port.s_tag %}

+# This is intended for lan_network. Assume that BRIDGE is set to br_lan. We

+# create a device that strips off the S-TAG.

+STAG="{{ port.s_tag }}"

+encapsulate_stag $BRIDGE $STAG

+SRC_DEV=$STAG_IFACE

+{% else %}

+# This is for a standard neutron private network. We use a donor VM to setup

+# openvswitch for us, and we snoop at its devices and create a tap using the

+# same settings.

+XOS_NETWORK_ID="{{ port.xos_network_id }}"

+INSTANCE_MAC="{{ port.snoop_instance_mac }}"
+INSTANCE_ID="{{ port.snoop_instance_id }}"
+INSTANCE_TAP=`virsh domiflist $INSTANCE_ID | grep -i $INSTANCE_MAC | awk '{print $1}'`
+INSTANCE_TAP=${INSTANCE_TAP:3}
+VLAN_ID=`ovs-vsctl show | grep -i -A 1 port.*$INSTANCE_TAP | grep -i tag | awk '{print $2}'`
+# One tap for all containers per XOS/neutron network. Included the VLAN_ID in the
+# hash, to cover the case where XOS is reinstalled and the XOS network ids
+# get reused.
+TAP="con`echo ${XOS_NETWORK_ID}_$VLAN_ID|md5sum|awk '{print $1}'`"
+TAP=${TAP:0:10}
+echo im=$INSTANCE_MAC ii=$INSTANCE_ID it=$INSTANCE_TAP vlan=$VLAN_ID tap=$TAP con=$CONTAINER dev=$DEVICE mac=$MAC
+ovs-vsctl show | grep -i $TAP
+if [[ $? == 1 ]]; then
+    echo creating tap
+    ovs-vsctl add-port $BRIDGE $TAP tag=$VLAN_ID -- set interface $TAP type=internal
+else
+    echo tap exists
+fi
+SRC_DEV=$TAP
+{% endif %}
+
+CMD="docker exec $CONTAINER ifconfig $DEVICE >> /dev/null || pipework $SRC_DEV -i $DEVICE $CONTAINER $IP/24$NEXTHOP_ARG $MAC $CTAG_ARG"
+echo $CMD
+eval $CMD
+{% endif %}
+{% endfor %}
+{% endif %}
+
+# Attach to container
+# (this is only done when using upstart, since upstart expects to be attached
+#  to a running service)
+if [[ "$1" == "ATTACH" ]]; then
+    docker start -a $CONTAINER
+fi
+
diff --git a/xos/synchronizers/base/templates/stop-container.sh.j2 b/xos/synchronizers/base/templates/stop-container.sh.j2
new file mode 100644
index 0000000..9cabb00
--- /dev/null
+++ b/xos/synchronizers/base/templates/stop-container.sh.j2
@@ -0,0 +1,4 @@
+CONTAINER={{ container_name }}
+
+docker stop $CONTAINER
+docker rm $CONTAINER
diff --git a/xos/synchronizers/base/toposort.py b/xos/synchronizers/base/toposort.py
new file mode 100644
index 0000000..6839861
--- /dev/null
+++ b/xos/synchronizers/base/toposort.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+
+import time
+import traceback
+import commands
+import threading
+import json
+import pdb
+
+from datetime import datetime
+from collections import defaultdict
+
+# Topological sort
+# Notes:
+# - Uses a stack instead of recursion
+# - Forfeits optimization involving tracking currently visited nodes
+def toposort(g, steps=None):
+	# Get set of all nodes, including those without outgoing edges
+	keys = set(g.keys())
+	values = set({})
+	for v in g.values():
+		values=values | set(v)
+	
+	all_nodes=list(keys|values)
+	if (not steps):
+		steps = all_nodes
+
+	# Final order
+	order = []
+
+	# DFS stack, not using recursion
+	stack = []
+
+	# Unmarked set
+	unmarked = all_nodes
+
+	# visiting = [] - skip, don't expect 1000s of nodes, |E|/|V| is small
+
+	while unmarked:
+		stack.insert(0,unmarked[0]) # push first unmarked
+
+		while (stack):
+			n = stack[0]
+			add = True
+			try:
+				for m in g[n]:
+					if (m in unmarked):
+					    add = False
+					    stack.insert(0,m)
+			except KeyError:
+				pass
+			if (add):
+				if (n in steps and n not in order):
+					order.append(n)
+				item = stack.pop(0)
+				try:
+					unmarked.remove(item)
+				except ValueError:
+					pass
+
+	noorder = list(set(steps) - set(order))
+	return order + noorder
+
+def main():
+	graph_file=open('xos.deps').read()
+	g = json.loads(graph_file)
+	print toposort(g)
+
+if (__name__=='__main__'):
+	main()
+
+#print toposort({'a':'b','b':'c','c':'d','d':'c'},['d','c','b','a'])