migrate code over from xos repo

Change-Id: I764ac3196babdb1ce78dded2b2d8d6ad6e965ca6
diff --git a/xos/synchronizer/steps/__init__.py b/xos/synchronizer/steps/__init__.py
new file mode 100644
index 0000000..c70b0c0
--- /dev/null
+++ b/xos/synchronizer/steps/__init__.py
@@ -0,0 +1,6 @@
+#from .sync_controller_sites import SyncControllerSites
+#from .sync_controller_slices import SyncControllerSlices
+#from .sync_controller_users import SyncControllerUsers
+#from .sync_controller_site_privileges import SyncControllerSitePrivileges
+#from .sync_controller_slice_privileges import SyncControllerSlicePrivileges
+#from .sync_controller_networks import SyncControllerNetworks
diff --git a/xos/synchronizer/steps/delete_slivers.yaml b/xos/synchronizer/steps/delete_slivers.yaml
new file mode 100644
index 0000000..fa6b879
--- /dev/null
+++ b/xos/synchronizer/steps/delete_slivers.yaml
@@ -0,0 +1,8 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+
+  - nova_compute:
+      state: absent
+      name: {{ name }}
diff --git a/xos/synchronizer/steps/purge_disabled_users.py b/xos/synchronizer/steps/purge_disabled_users.py
new file mode 100644
index 0000000..6b1dac3
--- /dev/null
+++ b/xos/synchronizer/steps/purge_disabled_users.py
@@ -0,0 +1,25 @@
+import os
+import base64
+import datetime
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from core.models.user import User
+from xos.logger import observer_logger as logger
+
+#class SyncRoles(OpenStackSyncStep):
+#    provides=[User]
+#    requested_interval=0
+#    observes=User
+#
+#    def fetch_pending(self, deleted):
+#        if (deleted):
+#            # users marked as deleted
+#            return User.deleted_objects.all()
+#        else:
+#            # disabled users that haven't been updated in over a week
+#            one_week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
+#            return User.objects.filter(is_active=False, updated__gt=one_week_ago)
+#
+#    def sync_record(self, user):
+#        user.delete()
diff --git a/xos/synchronizer/steps/sliver.yaml b/xos/synchronizer/steps/sliver.yaml
new file mode 100644
index 0000000..e630415
--- /dev/null
+++ b/xos/synchronizer/steps/sliver.yaml
@@ -0,0 +1,17 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  - nova_compute:
+       state: present
+       auth_url: http://172.31.38.128:5000/v2.0/
+       login_username: admin
+       login_password: 6a789bf69dd647e2
+       login_tenant_name: admin
+       name: gloopy
+       image_id: 3ee851df-b35a-41c5-8551-f681e7209095
+       key_name: boo
+       wait_for: 200
+       flavor_id: 3
+       nics:
+         - net-id: d1de537b-80dc-4c1b-aa5f-4a197b33b5f6
diff --git a/xos/synchronizer/steps/sync_container.py b/xos/synchronizer/steps/sync_container.py
new file mode 100644
index 0000000..41e1305
--- /dev/null
+++ b/xos/synchronizer/steps/sync_container.py
@@ -0,0 +1,162 @@
+import hashlib
+import os
+import socket
+import sys
+import base64
+import time
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+from synchronizers.base.syncstep import SyncStep, DeferredException
+from synchronizers.base.ansible import run_template_ssh
+from core.models import Service, Slice, Instance
+from xos.logger import Logger, logging
+
+# hpclibrary will be in steps/..
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+logger = Logger(level=logging.INFO)
+
+class SyncContainer(SyncInstanceUsingAnsible):
+    provides=[Instance]
+    observes=Instance
+    requested_interval=0
+    template_name = "sync_container.yaml"
+
+    def __init__(self, *args, **kwargs):
+        super(SyncContainer, self).__init__(*args, **kwargs)
+
+    def fetch_pending(self, deletion=False):
+        objs = super(SyncContainer, self).fetch_pending(deletion)
+        objs = [x for x in objs if x.isolation in ["container", "container_vm"]]
+        return objs
+
+    def get_instance_port(self, container_port):
+        for p in container_port.network.links.all():
+            if (p.instance) and (p.instance.isolation=="vm") and (p.instance.node == container_port.instance.node) and (p.mac):
+                return p
+        return None
+
+    def get_parent_port_mac(self, instance, port):
+        if not instance.parent:
+            raise Exception("instance has no parent")
+        for parent_port in instance.parent.ports.all():
+            if parent_port.network == port.network:
+                if not parent_port.mac:
+                     raise DeferredException("parent port on network %s does not have mac yet" % parent_port.network.name)
+                return parent_port.mac
+        raise Exception("failed to find corresponding parent port for network %s" % port.network.name)
+
+    def get_ports(self, o):
+        i=0
+        ports = []
+        if (o.slice.network in ["host", "bridged"]):
+            pass # no ports in host or bridged mode
+        else:
+            for port in o.ports.all():
+                if (not port.ip):
+                    # 'unmanaged' ports may have an ip, but no mac
+                    # XXX: are there any ports that have a mac but no ip?
+                    raise DeferredException("Port on network %s is not yet ready" % port.network.name)
+
+                pd={}
+                pd["mac"] = port.mac or ""
+                pd["ip"] = port.ip or ""
+                pd["xos_network_id"] = port.network.id
+
+                if port.network.name == "wan_network":
+                    if port.ip:
+                        (a, b, c, d) = port.ip.split('.')
+                        pd["mac"] = "02:42:%02x:%02x:%02x:%02x" % (int(a), int(b), int(c), int(d))
+
+
+                if o.isolation == "container":
+                    # container on bare metal
+                    instance_port = self.get_instance_port(port)
+                    if not instance_port:
+                        raise DeferredException("No instance on slice for port on network %s" % port.network.name)
+
+                    pd["snoop_instance_mac"] = instance_port.mac
+                    pd["snoop_instance_id"] = instance_port.instance.instance_id
+                    pd["src_device"] = ""
+                    pd["bridge"] = "br-int"
+                else:
+                    # container in VM
+                    pd["snoop_instance_mac"] = ""
+                    pd["snoop_instance_id"] = ""
+                    pd["parent_mac"] = self.get_parent_port_mac(o, port)
+                    pd["bridge"] = ""
+
+                for (k,v) in port.get_parameters().items():
+                    pd[k] = v
+
+                ports.append(pd)
+
+            # for any ports that don't have a device, assign one
+            used_ports = [x["device"] for x in ports if ("device" in x)]
+            avail_ports = ["eth%d"%i for i in range(0,64) if ("eth%d"%i not in used_ports)]
+            for port in ports:
+                if not port.get("device",None):
+                    port["device"] = avail_ports.pop(0)
+
+        return ports
+
+    def get_extra_attributes(self, o):
+        fields={}
+        fields["ansible_tag"] = "container-%s" % str(o.id)
+        if o.image.tag:
+            fields["docker_image"] = o.image.path + ":" + o.image.tag
+        else:
+            fields["docker_image"] = o.image.path
+        fields["ports"] = self.get_ports(o)
+        if o.volumes:
+            fields["volumes"] = [x.strip() for x in o.volumes.split(",")]
+        else:
+            fields["volumes"] = ""
+        fields["network_method"] = o.slice.network or "default"
+        return fields
+
+    def sync_record(self, o):
+        logger.info("sync'ing object %s" % str(o),extra=o.tologdict())
+
+        fields = self.get_ansible_fields(o)
+
+        # If 'o' defines a 'sync_attributes' list, then we'll copy those
+        # attributes into the Ansible recipe's field list automatically.
+        if hasattr(o, "sync_attributes"):
+            for attribute_name in o.sync_attributes:
+                fields[attribute_name] = getattr(o, attribute_name)
+
+        fields.update(self.get_extra_attributes(o))
+
+        self.run_playbook(o, fields)
+
+        o.instance_id = fields["container_name"]
+        o.instance_name = fields["container_name"]
+
+        o.save()
+
+    def delete_record(self, o):
+        logger.info("delete'ing object %s" % str(o),extra=o.tologdict())
+
+        fields = self.get_ansible_fields(o)
+
+        # If 'o' defines a 'sync_attributes' list, then we'll copy those
+        # attributes into the Ansible recipe's field list automatically.
+        if hasattr(o, "sync_attributes"):
+            for attribute_name in o.sync_attributes:
+                fields[attribute_name] = getattr(o, attribute_name)
+
+        fields.update(self.get_extra_attributes(o))
+
+        self.run_playbook(o, fields, "teardown_container.yaml")
+
+    def run_playbook(self, o, fields, template_name=None):
+        if not template_name:
+            template_name = self.template_name
+        tStart = time.time()
+        run_template_ssh(template_name, fields, path="container")
+        logger.info("playbook execution time %d" % int(time.time()-tStart),extra=o.tologdict())
+
+
diff --git a/xos/synchronizer/steps/sync_container.yaml b/xos/synchronizer/steps/sync_container.yaml
new file mode 100644
index 0000000..4ae4eb2
--- /dev/null
+++ b/xos/synchronizer/steps/sync_container.yaml
@@ -0,0 +1,124 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: {{ username }}
+  sudo: yes
+
+  vars:
+    container_name: {{ container_name }}
+    docker_image: {{ docker_image }}
+    network_method: {{ network_method }}
+    ports:
+    {% for port in ports %}
+       - device: {{ port.device }}
+         xos_network_id: {{ port.xos_network_id }}
+         mac: {{ port.mac|default("") }}
+         ip: {{ port.ip }}
+         snoop_instance_mac: {{ port.snoop_instance_mac }}
+         snoop_instance_id: {{ port.snoop_instance_id }}
+         parent_mac: {{ port.parent_mac|default("") }}
+         s_tag: {{ port.s_tag|default("")  }}
+         c_tag: {{ port.c_tag|default("") }}
+         next_hop: {{ port.next_hop|default("") }}
+         bridge: {{ port.bridge }}
+    {% endfor %}
+    volumes:
+    {% for volume in volumes %}
+       - {{ volume }}
+    {% endfor %}
+
+  tasks:
+
+#  - name: Fix /etc/hosts
+#    lineinfile:
+#      dest=/etc/hosts
+#      regexp="127.0.0.1 localhost"
+#      line="127.0.0.1 localhost {{ instance_hostname }}"
+
+  - name: Add repo key
+    apt_key:
+      keyserver=hkp://pgp.mit.edu:80
+      id=58118E89F3A912897C070ADBF76221572C52609D
+
+  - name: Install Docker repo
+    apt_repository:
+      repo="deb https://apt.dockerproject.org/repo ubuntu-trusty main"
+      state=present
+
+  - name: Install Docker
+    apt:
+      name={{ '{{' }} item {{ '}}' }}
+      state=latest
+      update_cache=yes
+    with_items:
+# XXX docker 1.10 is not working on cloudlab
+#    - docker-engine
+    - python-pip
+    - python-httplib2
+
+  - name: Install Docker 1.9.1
+    apt:
+      name={{ '{{' }} item {{ '}}' }}
+      update_cache=yes
+    with_items:
+    - docker-engine=1.9.1-0~trusty
+
+  # Something is installing a requests library that is incompative with pip, and
+  # will cause this recipe to fail next time it tries to run pip. Only the one
+  # in /usr/local/lib is bad. There's still a good one in /usr/lib
+  - name: check if bad requests library installed
+    stat: path=/usr/local/lib/python2.7/dist-packages/requests
+    register: bad_requests
+
+  - name: remove bad request library
+    shell: mv /usr/local/lib/python2.7/dist-packages/requests /usr/local/lib/python2.7/dist-packages/requests-bad
+    when: bad_requests.stat.exists == True
+
+  - name: Install docker-py
+    pip:
+      name=docker-py
+      state=latest
+
+  - name: install Pipework
+    get_url: url=https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework
+       dest=/usr/local/bin/pipework
+       mode=0755
+
+#  - name: Start Container
+#    docker:
+#      docker_api_version: "1.18"
+#      name: {{ container_name }}
+#      # was: reloaded
+#      state: running
+#      image: {{ docker_image }}
+
+  - name: check if systemd is installed
+    stat: path=/usr/bin/systemctl
+    register: systemctl
+
+  - name: container upstart
+    template: src=/opt/xos/synchronizers/openstack/templates/container.conf.j2 dest=/etc/init/container-{{ container_name }}.conf
+
+  - name: container systemd
+    template: src=/opt/xos/synchronizers/openstack/templates/container.service.j2 dest=/lib/systemd/system/container-{{ container_name }}.service
+
+  - name: container startup script
+    template: src=/opt/xos/synchronizers/openstack/templates/start-container.sh.j2 dest=/usr/local/sbin/start-container-{{ container_name }}.sh mode=0755
+
+  - name: container teardown script
+    template: src=/opt/xos/synchronizers/openstack/templates/stop-container.sh.j2 dest=/usr/local/sbin/stop-container-{{ container_name }}.sh mode=0755
+
+  - name: restart systemd
+    shell: systemctl daemon-reload
+    when: systemctl.stat.exists == True
+
+{% if ports %}
+  - name: make sure bridges are setup
+    shell: ifconfig {{ '{{' }} item.bridge {{ '}}' }}
+    with_items: "ports"
+{% endif %}
+
+  - name: Make sure container is running
+    service: name=container-{{ container_name }} state=started
+
diff --git a/xos/synchronizer/steps/sync_controller_images.py b/xos/synchronizer/steps/sync_controller_images.py
new file mode 100644
index 0000000..c1e5136
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_images.py
@@ -0,0 +1,44 @@
+import os
+import base64
+from collections import defaultdict
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from synchronizers.base.syncstep import *
+from core.models import Controller
+from core.models import Image, ControllerImages
+from xos.logger import observer_logger as logger 
+from synchronizers.base.ansible import *
+import json
+
+class SyncControllerImages(OpenStackSyncStep):
+    provides=[ControllerImages]
+    observes = ControllerImages
+    requested_interval=0
+    playbook='sync_controller_images.yaml'
+
+    def fetch_pending(self, deleted):
+        if (deleted):
+            return []
+
+        # now we return all images that need to be enacted
+        return ControllerImages.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+    def map_sync_inputs(self, controller_image):
+        image_fields = {'endpoint':controller_image.controller.auth_url,
+                        'endpoint_v3': controller_image.controller.auth_url_v3,
+                        'admin_user':controller_image.controller.admin_user,
+                        'admin_password':controller_image.controller.admin_password,
+                        'domain': controller_image.controller.domain,
+                        'name':controller_image.image.name,
+                        'filepath':controller_image.image.path,
+                        'ansible_tag': '%s@%s'%(controller_image.image.name,controller_image.controller.name), # name of ansible playbook
+                        }
+
+	return image_fields
+
+    def map_sync_outputs(self, controller_image, res):
+        image_id = res[0]['id']
+        controller_image.glance_image_id = image_id
+	controller_image.backend_status = '1 - OK'
+        controller_image.save()
diff --git a/xos/synchronizer/steps/sync_controller_images.yaml b/xos/synchronizer/steps/sync_controller_images.yaml
new file mode 100644
index 0000000..6247a30
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_images.yaml
@@ -0,0 +1,13 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  - glance_image:
+        auth_url={{ endpoint }}
+        login_username="{{ admin_user }}"
+        login_tenant_name="admin"
+        login_password="{{ admin_password }}"
+        name="{{ name }}"
+        file="{{ filepath }}"
+        disk_format='raw'
+        is_public=true
diff --git a/xos/synchronizer/steps/sync_controller_networks.py b/xos/synchronizer/steps/sync_controller_networks.py
new file mode 100644
index 0000000..b61ef7b
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_networks.py
@@ -0,0 +1,163 @@
+import os
+import base64
+import struct
+import socket
+from collections import defaultdict
+from netaddr import IPAddress, IPNetwork
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from synchronizers.base.syncstep import *
+from core.models.network import *
+from core.models.slice import *
+from core.models.instance import Instance
+from xos.logger import observer_logger as logger
+from synchronizers.base.ansible import *
+from openstack_xos.driver import OpenStackDriver
+from xos.config import Config
+import json
+
+import pdb
+
+class SyncControllerNetworks(OpenStackSyncStep):
+    requested_interval = 0
+    provides=[Network]
+    observes=ControllerNetwork	
+    playbook='sync_controller_networks.yaml'
+
+    def alloc_subnet(self, uuid):
+        # 16 bits only
+        uuid_masked = uuid & 0xffff
+        a = 10
+        b = uuid_masked >> 8
+        c = uuid_masked & 0xff
+        d = 0
+
+        cidr = '%d.%d.%d.%d/24'%(a,b,c,d)
+        return cidr
+
+    def alloc_gateway(self, subnet):
+        # given a CIDR, allocate a default gateway using the .1 address within
+        # the subnet.
+        #    10.123.0.0/24 --> 10.123.0.1
+        #    207.141.192.128/28 --> 207.141.192.129
+        (network, bits) = subnet.split("/")
+        network=network.strip()
+        bits=int(bits.strip())
+        netmask = (~(pow(2,32-bits)-1) & 0xFFFFFFFF)
+        ip = struct.unpack("!L", socket.inet_aton(network))[0]
+        ip = ip & netmask | 1
+        return socket.inet_ntoa(struct.pack("!L", ip))
+
+    def save_controller_network(self, controller_network):
+        network_name = controller_network.network.name
+        subnet_name = '%s-%d'%(network_name,controller_network.pk)
+        if controller_network.subnet and controller_network.subnet.strip():
+            # If a subnet is already specified (pass in by the creator), then
+            # use that rather than auto-generating one.
+            cidr = controller_network.subnet.strip()
+            print "CIDR_MS", cidr
+        else:
+            cidr = self.alloc_subnet(controller_network.pk)
+            print "CIDR_AMS", cidr
+
+        if controller_network.network.start_ip and controller_network.network.start_ip.strip():
+            start_ip = controller_network.network.start_ip.strip()
+        else:
+            start_ip = None
+
+        if controller_network.network.end_ip and controller_network.network.end_ip.strip():
+            end_ip = controller_network.network.end_ip.strip()
+        else:
+            end_ip = None
+
+        self.cidr=cidr
+        slice = controller_network.network.owner
+
+        network_fields = {'endpoint':controller_network.controller.auth_url,
+                    'endpoint_v3': controller_network.controller.auth_url_v3,
+                    'admin_user':slice.creator.email,
+                    'admin_password':slice.creator.remote_password,
+                    'admin_project':slice.name,
+                    'domain': controller_network.controller.domain,
+                    'name':network_name,
+                    'subnet_name':subnet_name,
+                    'ansible_tag':'%s-%s@%s'%(network_name,slice.slicename,controller_network.controller.name),
+                    'cidr':cidr,
+                    'gateway':self.alloc_gateway(cidr),
+                    'start_ip':start_ip,
+                    'end_ip':end_ip,
+                    'use_vtn':getattr(Config(), "networking_use_vtn", False),
+                    'delete':False
+                    }
+        return network_fields
+
+    def map_sync_outputs(self, controller_network,res):
+        network_id = res[0]['network']['id']
+        subnet_id = res[1]['subnet']['id']
+        controller_network.net_id = network_id
+        controller_network.subnet = self.cidr
+        controller_network.subnet_id = subnet_id
+	controller_network.backend_status = '1 - OK'
+        controller_network.save()
+
+
+    def map_sync_inputs(self, controller_network):
+        # XXX This check should really be made from booleans, rather than using hardcoded network names
+        #if (controller_network.network.template.name not in ['Private', 'Private-Indirect', 'Private-Direct', 'management_template'):
+        #    logger.info("skipping network controller %s because it is not private" % controller_network)
+        #    # We only sync private networks
+        #    return SyncStep.SYNC_WITHOUT_RUNNING
+
+        # hopefully a better approach than above
+        if (controller_network.network.template.shared_network_name or controller_network.network.template.shared_network_id):
+            return SyncStep.SYNC_WITHOUT_RUNNING
+        
+        if not controller_network.controller.admin_user:
+            logger.info("controller %r has no admin_user, skipping" % controller_network.controller)
+            return
+
+        if controller_network.network.owner and controller_network.network.owner.creator:
+	    return self.save_controller_network(controller_network)
+        else:
+            raise Exception('Could not save network controller %s'%controller_network)
+
+    def map_delete_inputs(self, controller_network):
+        # XXX This check should really be made from booleans, rather than using hardcoded network names
+	if (controller_network.network.template.name not in ['Private', 'Private-Indirect', 'Private-Direct']):
+            # We only sync private networks
+            return
+	try:
+        	slice = controller_network.network.owner # XXX: FIXME!!
+        except:
+                raise Exception('Could not get slice for Network %s'%controller_network.network.name)
+
+	network_name = controller_network.network.name
+        subnet_name = '%s-%d'%(network_name,controller_network.pk)
+	cidr = controller_network.subnet
+	network_fields = {'endpoint':controller_network.controller.auth_url,
+                    'admin_user':slice.creator.email, # XXX: FIXME
+                    'tenant_name':slice.name, # XXX: FIXME
+                    'admin_password':slice.creator.remote_password,
+                    'name':network_name,
+                    'subnet_name':subnet_name,
+                    'ansible_tag':'%s-%s@%s'%(network_name,slice.slicename,controller_network.controller.name),
+                    'cidr':cidr,
+		    'delete':True	
+                    }
+
+        return network_fields
+
+	"""
+        driver = OpenStackDriver().client_driver(caller=controller_network.network.owner.creator,
+                                                 tenant=controller_network.network.owner.name,
+                                                 controller=controller_network.controller.name)
+        if (controller_network.router_id) and (controller_network.subnet_id):
+            driver.delete_router_interface(controller_network.router_id, controller_network.subnet_id)
+        if controller_network.subnet_id:
+            driver.delete_subnet(controller_network.subnet_id)
+        if controller_network.router_id:
+            driver.delete_router(controller_network.router_id)
+        if controller_network.net_id:
+            driver.delete_network(controller_network.net_id)
+	"""
diff --git a/xos/synchronizer/steps/sync_controller_networks.yaml b/xos/synchronizer/steps/sync_controller_networks.yaml
new file mode 100644
index 0000000..7b6075c
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_networks.yaml
@@ -0,0 +1,58 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  - os_network:
+      name: {{ name }}
+      shared: true
+      {% if not delete -%}
+      state: present
+      {% else -%}
+      state: absent
+      {% endif -%}
+      auth:
+        auth_url: {{ endpoint }}
+        username: {{ admin_user }}
+        password: {{ admin_password }}
+        project_name: {{ admin_project }}
+
+{% if not delete %}
+  - os_subnet:
+      name: {{ subnet_name }}
+      network_name: {{ name }}
+      {% if not delete -%}
+      state: present
+      cidr: {{ cidr }}
+      dns_nameservers: 8.8.8.8
+      {% if use_vtn -%}
+      gateway_ip: {{ gateway }}
+      {% endif -%}
+      {% if start_ip -%}
+      allocation_pool_start: {{ start_ip }}
+      {% endif -%}
+      {% if end_ip -%}
+      allocation_pool_end: {{ end_ip }}
+      {% endif -%}
+      {% else -%}
+      state: absent
+      {% endif -%}
+      auth:
+        auth_url: {{ endpoint }}
+        username: {{ admin_user }}
+        password: {{ admin_password }}
+        project_name: {{ admin_project }}
+
+{% if not use_vtn -%}
+# until we get 'no-gateway-ip' arg to os_subnet, in Ansible 2.2
+# https://github.com/ansible/ansible-modules-core/pull/3736
+  - command:
+      neutron \
+      --os-auth-url {{ endpoint }} \
+      --os-username {{ admin_user }} \
+      --os-password {{ admin_password }} \
+      --os-tenant-name {{ admin_project }} \
+      subnet-update --no-gateway {{ subnet_name }}
+{% endif -%}
+
+{% endif %}
+
diff --git a/xos/synchronizer/steps/sync_controller_site_privileges.py b/xos/synchronizer/steps/sync_controller_site_privileges.py
new file mode 100644
index 0000000..59919fe
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_site_privileges.py
@@ -0,0 +1,84 @@
+import os
+import base64
+from collections import defaultdict
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from synchronizers.base.syncstep import *
+from core.models.site import Controller, SitePrivilege 
+from core.models.user import User
+from core.models.controlleruser import ControllerUser, ControllerSitePrivilege
+from xos.logger import observer_logger as logger
+from synchronizers.base.ansible import *
+import json
+
+class SyncControllerSitePrivileges(OpenStackSyncStep):
+    provides=[SitePrivilege]
+    requested_interval=0
+    observes=ControllerSitePrivilege
+    playbook='sync_controller_users.yaml'
+
+    def map_sync_inputs(self, controller_site_privilege):
+	controller_register = json.loads(controller_site_privilege.controller.backend_register)
+        if not controller_site_privilege.controller.admin_user:
+            logger.info("controller %r has no admin_user, skipping" % controller_site_privilege.controller)
+            return
+
+        roles = [controller_site_privilege.site_privilege.role.role]
+	# setup user home site roles at controller 
+        if not controller_site_privilege.site_privilege.user.site:
+            raise Exception('Siteless user %s'%controller_site_privilege.site_privilege.user.email)
+        else:
+            # look up tenant id for the user's site at the controller
+            #ctrl_site_deployments = SiteDeployment.objects.filter(
+            #  site_deployment__site=controller_site_privilege.user.site,
+            #  controller=controller_site_privilege.controller)
+
+            #if ctrl_site_deployments:
+            #    # need the correct tenant id for site at the controller
+            #    tenant_id = ctrl_site_deployments[0].tenant_id  
+            #    tenant_name = ctrl_site_deployments[0].site_deployment.site.login_base
+            user_fields = {
+               'endpoint':controller_site_privilege.controller.auth_url,
+               'endpoint_v3': controller_site_privilege.controller.auth_url_v3,
+               'domain': controller_site_privilege.controller.domain,
+		       'name': controller_site_privilege.site_privilege.user.email,
+               'email': controller_site_privilege.site_privilege.user.email,
+               'password': controller_site_privilege.site_privilege.user.remote_password,
+               'admin_user': controller_site_privilege.controller.admin_user,
+		       'admin_password': controller_site_privilege.controller.admin_password,
+	           'ansible_tag':'%s@%s'%(controller_site_privilege.site_privilege.user.email.replace('@','-at-'),controller_site_privilege.controller.name),
+		       'admin_tenant': controller_site_privilege.controller.admin_tenant,
+		       'roles':roles,
+		       'tenant':controller_site_privilege.site_privilege.site.login_base}    
+	
+	    return user_fields
+
+    def map_sync_outputs(self, controller_site_privilege, res):
+	    # results is an array in which each element corresponds to an 
+	    # "ok" string received per operation. If we get as many oks as
+	    # the number of operations we issued, that means a grand success.
+	    # Otherwise, the number of oks tell us which operation failed.
+            controller_site_privilege.role_id = res[0]['id']
+            controller_site_privilege.save()
+
+    def delete_record(self, controller_site_privilege):
+	controller_register = json.loads(controller_site_privilege.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise InnocuousException('Controller %s is disabled'%controller_site_privilege.controller.name)
+
+        if controller_site_privilege.role_id:
+            driver = self.driver.admin_driver(controller=controller_site_privilege.controller)
+            user = ControllerUser.objects.get(
+                controller=controller_site_privilege.controller, 
+                user=controller_site_privilege.site_privilege.user
+            )
+            site = ControllerSite.objects.get(
+                controller=controller_site_privilege.controller, 
+                user=controller_site_privilege.site_privilege.user
+            )
+            driver.delete_user_role(
+                user.kuser_id, 
+                site.tenant_id, 
+                controller_site_privilege.site_prvilege.role.role
+            )
diff --git a/xos/synchronizer/steps/sync_controller_sites.py b/xos/synchronizer/steps/sync_controller_sites.py
new file mode 100644
index 0000000..1b3c2ba
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_sites.py
@@ -0,0 +1,67 @@
+import os
+import base64
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.openstack.openstacksyncstep import OpenStackSyncStep
+from core.models.site import *
+from synchronizers.base.syncstep import *
+from synchronizers.base.ansible import *
+from xos.logger import observer_logger as logger
+import json
+
+class SyncControllerSites(OpenStackSyncStep):
+    requested_interval=0
+    provides=[Site]
+    observes=ControllerSite
+    playbook = 'sync_controller_sites.yaml'
+
+    def fetch_pending(self, deleted=False):
+        lobjs = ControllerSite.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False),Q(controller__isnull=False))
+        return lobjs
+
+    def map_sync_inputs(self, controller_site):
+	tenant_fields = {'endpoint':controller_site.controller.auth_url,
+                 'endpoint_v3': controller_site.controller.auth_url_v3,
+                 'domain': controller_site.controller.domain,
+		         'admin_user': controller_site.controller.admin_user,
+		         'admin_password': controller_site.controller.admin_password,
+		         'admin_tenant': controller_site.controller.admin_tenant,
+	             'ansible_tag': '%s@%s'%(controller_site.site.login_base,controller_site.controller.name), # name of ansible playbook
+		         'tenant': controller_site.site.login_base,
+		         'tenant_description': controller_site.site.name}
+        return tenant_fields
+
+    def map_sync_outputs(self, controller_site, res):
+	controller_site.tenant_id = res[0]['id']
+	controller_site.backend_status = '1 - OK'
+        controller_site.save()
+            
+    def delete_record(self, controller_site):
+	controller_register = json.loads(controller_site.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise InnocuousException('Controller %s is disabled'%controller_site.controller.name)
+
+	if controller_site.tenant_id:
+            driver = self.driver.admin_driver(controller=controller_site.controller)
+            driver.delete_tenant(controller_site.tenant_id)
+
+	"""
+        Ansible does not support tenant deletion yet
+
+	import pdb
+	pdb.set_trace()
+        template = os_template_env.get_template('delete_controller_sites.yaml')
+	tenant_fields = {'endpoint':controller_site.controller.auth_url,
+		         'admin_user': controller_site.controller.admin_user,
+		         'admin_password': controller_site.controller.admin_password,
+		         'admin_tenant': 'admin',
+	                 'ansible_tag': 'controller_sites/%s@%s'%(controller_site.controller_site.site.login_base,controller_site.controller_site.deployment.name), # name of ansible playbook
+		         'tenant': controller_site.controller_site.site.login_base,
+		         'delete': True}
+
+	rendered = template.render(tenant_fields)
+	res = run_template('sync_controller_sites.yaml', tenant_fields)
+
+	if (len(res)!=1):
+		raise Exception('Could not assign roles for user %s'%tenant_fields['tenant'])
+	"""
diff --git a/xos/synchronizer/steps/sync_controller_sites.yaml b/xos/synchronizer/steps/sync_controller_sites.yaml
new file mode 100644
index 0000000..4129802
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_sites.yaml
@@ -0,0 +1,5 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}"
diff --git a/xos/synchronizer/steps/sync_controller_slice_privileges.py b/xos/synchronizer/steps/sync_controller_slice_privileges.py
new file mode 100644
index 0000000..e5513b0
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_slice_privileges.py
@@ -0,0 +1,79 @@
+import os
+import base64
+from collections import defaultdict
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from synchronizers.base.syncstep import *
+from core.models.slice import Controller, SlicePrivilege 
+from core.models.user import User
+from core.models.controlleruser import ControllerUser, ControllerSlicePrivilege
+from synchronizers.base.ansible import *
+from xos.logger import observer_logger as logger
+import json
+
+class SyncControllerSlicePrivileges(OpenStackSyncStep):
+    provides=[SlicePrivilege]
+    requested_interval=0
+    observes=ControllerSlicePrivilege
+    playbook = 'sync_controller_users.yaml'
+
+    def map_sync_inputs(self, controller_slice_privilege):
+        if not controller_slice_privilege.controller.admin_user:
+            logger.info("controller %r has no admin_user, skipping" % controller_slice_privilege.controller)
+            return
+
+	template = os_template_env.get_template('sync_controller_users.yaml')
+        roles = [controller_slice_privilege.slice_privilege.role.role]
+	# setup user home slice roles at controller 
+        if not controller_slice_privilege.slice_privilege.user.site:
+            raise Exception('Sliceless user %s'%controller_slice_privilege.slice_privilege.user.email)
+        else:
+            # look up tenant id for the user's slice at the controller
+            #ctrl_slice_deployments = SliceDeployment.objects.filter(
+            #  slice_deployment__slice=controller_slice_privilege.user.slice,
+            #  controller=controller_slice_privilege.controller)
+
+            #if ctrl_slice_deployments:
+            #    # need the correct tenant id for slice at the controller
+            #    tenant_id = ctrl_slice_deployments[0].tenant_id  
+            #    tenant_name = ctrl_slice_deployments[0].slice_deployment.slice.login_base
+            user_fields = {
+               'endpoint':controller_slice_privilege.controller.auth_url,
+               'endpoint_v3': controller_slice_privilege.controller.auth_url_v3,
+               'domain': controller_slice_privilege.controller.domain,
+		       'name': controller_slice_privilege.slice_privilege.user.email,
+               'email': controller_slice_privilege.slice_privilege.user.email,
+               'password': controller_slice_privilege.slice_privilege.user.remote_password,
+               'admin_user': controller_slice_privilege.controller.admin_user,
+		       'admin_password': controller_slice_privilege.controller.admin_password,
+               'ansible_tag':'%s@%s@%s'%(controller_slice_privilege.slice_privilege.user.email.replace('@','-at-'),controller_slice_privilege.slice_privilege.slice.name,controller_slice_privilege.controller.name),
+		       'admin_tenant': controller_slice_privilege.controller.admin_tenant,
+		       'roles':roles,
+		       'tenant':controller_slice_privilege.slice_privilege.slice.name}    
+            return user_fields
+	
+    def map_sync_outputs(self, controller_slice_privilege, res):
+        controller_slice_privilege.role_id = res[0]['id']
+        controller_slice_privilege.save()
+
+    def delete_record(self, controller_slice_privilege):
+	controller_register = json.loads(controller_slice_privilege.controller.backend_register)
+        if (controller_register.get('disabled',False)):
+                raise InnocuousException('Controller %s is disabled'%controller_slice_privilege.controller.name)
+
+        if controller_slice_privilege.role_id:
+            driver = self.driver.admin_driver(controller=controller_slice_privilege.controller)
+            user = ControllerUser.objects.get(
+                controller=controller_slice_privilege.controller, 
+                user=controller_slice_privilege.slice_privilege.user
+            )
+            slice = ControllerSlice.objects.get(
+                controller=controller_slice_privilege.controller, 
+                user=controller_slice_privilege.slice_privilege.user
+            )
+            driver.delete_user_role(
+                user.kuser_id, 
+                slice.tenant_id, 
+                controller_slice_privilege.slice_prvilege.role.role
+            )
diff --git a/xos/synchronizer/steps/sync_controller_slices.py b/xos/synchronizer/steps/sync_controller_slices.py
new file mode 100644
index 0000000..0666230
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_slices.py
@@ -0,0 +1,85 @@
+import os
+import base64
+from collections import defaultdict
+from netaddr import IPAddress, IPNetwork
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from synchronizers.base.syncstep import *
+from core.models import *
+from synchronizers.base.ansible import *
+from openstack_xos.driver import OpenStackDriver
+from xos.logger import observer_logger as logger
+import json
+
+class SyncControllerSlices(OpenStackSyncStep):
+    provides=[Slice]
+    requested_interval=0
+    observes=ControllerSlice
+    playbook='sync_controller_slices.yaml'
+
+    def map_sync_inputs(self, controller_slice):
+        logger.info("sync'ing slice controller %s" % controller_slice)
+
+        if not controller_slice.controller.admin_user:
+            logger.info("controller %r has no admin_user, skipping" % controller_slice.controller)
+            return
+
+        controller_users = ControllerUser.objects.filter(user=controller_slice.slice.creator,
+                                                             controller=controller_slice.controller)
+        if not controller_users:
+            raise Exception("slice createor %s has not accout at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name))
+        else:
+            controller_user = controller_users[0]
+            driver = OpenStackDriver().admin_driver(controller=controller_slice.controller)
+            roles = [driver.get_admin_role().name]
+
+        max_instances=int(controller_slice.slice.max_instances)
+        tenant_fields = {'endpoint':controller_slice.controller.auth_url,
+                         'endpoint_v3': controller_slice.controller.auth_url_v3,
+                         'domain': controller_slice.controller.domain,
+                         'admin_user': controller_slice.controller.admin_user,
+                         'admin_password': controller_slice.controller.admin_password,
+                         'admin_tenant': 'admin',
+                         'tenant': controller_slice.slice.name,
+                         'tenant_description': controller_slice.slice.description,
+                         'roles':roles,
+                         'name':controller_user.user.email,
+                         'ansible_tag':'%s@%s'%(controller_slice.slice.name,controller_slice.controller.name),
+                         'max_instances':max_instances}
+
+        return tenant_fields
+
+    def map_sync_outputs(self, controller_slice, res):
+        tenant_id = res[0]['id']
+        if (not controller_slice.tenant_id):
+            try:
+                driver = OpenStackDriver().admin_driver(controller=controller_slice.controller)
+                driver.shell.nova.quotas.update(tenant_id=tenant_id, instances=int(controller_slice.slice.max_instances))
+            except:
+                logger.log_exc('Could not update quota for %s'%controller_slice.slice.name)
+                raise Exception('Could not update quota for %s'%controller_slice.slice.name)
+
+            controller_slice.tenant_id = tenant_id
+            controller_slice.backend_status = '1 - OK'
+            controller_slice.save()
+
+
+    def map_delete_inputs(self, controller_slice):
+        controller_users = ControllerUser.objects.filter(user=controller_slice.slice.creator,
+                                                              controller=controller_slice.controller)
+        if not controller_users:
+            raise Exception("slice createor %s has not accout at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name))
+        else:
+            controller_user = controller_users[0]
+
+        tenant_fields = {'endpoint':controller_slice.controller.auth_url,
+                          'admin_user': controller_slice.controller.admin_user,
+                          'admin_password': controller_slice.controller.admin_password,
+                          'admin_tenant': 'admin',
+                          'tenant': controller_slice.slice.name,
+                          'tenant_description': controller_slice.slice.description,
+                          'name':controller_user.user.email,
+                          'ansible_tag':'%s@%s'%(controller_slice.slice.name,controller_slice.controller.name),
+                          'delete': True}
+	return tenant_fields
diff --git a/xos/synchronizer/steps/sync_controller_slices.yaml b/xos/synchronizer/steps/sync_controller_slices.yaml
new file mode 100644
index 0000000..61470ce
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_slices.yaml
@@ -0,0 +1,12 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  {% if delete -%}
+  - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}" state=absent
+  {% else -%}
+  - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}"
+  {% for role in roles %}
+  - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} user="{{ name }}" role={{ role }} tenant={{ tenant }}
+  {% endfor %}
+  {% endif %}
diff --git a/xos/synchronizer/steps/sync_controller_users.py b/xos/synchronizer/steps/sync_controller_users.py
new file mode 100644
index 0000000..c9de142
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_users.py
@@ -0,0 +1,72 @@
+import os
+import base64
+from collections import defaultdict
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from synchronizers.base.syncstep import *
+from core.models.site import Controller, SiteDeployment, SiteDeployment
+from core.models.user import User
+from core.models.controlleruser import ControllerUser
+from synchronizers.base.ansible import *
+from openstack_xos.driver import OpenStackDriver
+from xos.logger import observer_logger as logger
+import json
+
+class SyncControllerUsers(OpenStackSyncStep):
+    provides=[User]
+    requested_interval=0
+    observes=ControllerUser
+    playbook='sync_controller_users.yaml'
+
+    def map_sync_inputs(self, controller_user):
+        if not controller_user.controller.admin_user:
+            logger.info("controller %r has no admin_user, skipping" % controller_user.controller)
+            return
+
+        # All users will have at least the 'user' role at their home site/tenant.
+        # We must also check if the user should have the admin role
+
+        roles = ['user']
+        if controller_user.user.is_admin:
+            driver = OpenStackDriver().admin_driver(controller=controller_user.controller)
+            roles.append(driver.get_admin_role().name)
+
+        # setup user home site roles at controller
+        if not controller_user.user.site:
+            raise Exception('Siteless user %s'%controller_user.user.email)
+        else:
+            # look up tenant id for the user's site at the controller
+            #ctrl_site_deployments = SiteDeployment.objects.filter(
+            #  site_deployment__site=controller_user.user.site,
+            #  controller=controller_user.controller)
+
+            #if ctrl_site_deployments:
+            #    # need the correct tenant id for site at the controller
+            #    tenant_id = ctrl_site_deployments[0].tenant_id
+            #    tenant_name = ctrl_site_deployments[0].site_deployment.site.login_base
+            user_fields = {
+                'endpoint':controller_user.controller.auth_url,
+                'endpoint_v3': controller_user.controller.auth_url_v3,
+                'domain': controller_user.controller.domain,
+                'name': controller_user.user.email,
+                'email': controller_user.user.email,
+                'password': controller_user.user.remote_password,
+                'admin_user': controller_user.controller.admin_user,
+                'admin_password': controller_user.controller.admin_password,
+                'ansible_tag':'%s@%s'%(controller_user.user.email.replace('@','-at-'),controller_user.controller.name),
+                'admin_project': controller_user.controller.admin_tenant,
+                'roles':roles,
+                'project':controller_user.user.site.login_base
+                }
+	    return user_fields
+
+    def map_sync_outputs(self, controller_user, res):
+        controller_user.kuser_id = res[0]['user']['id']
+        controller_user.backend_status = '1 - OK'
+        controller_user.save()
+
+    def delete_record(self, controller_user):
+        if controller_user.kuser_id:
+            driver = self.driver.admin_driver(controller=controller_user.controller)
+            driver.delete_user(controller_user.kuser_id)
diff --git a/xos/synchronizer/steps/sync_controller_users.yaml b/xos/synchronizer/steps/sync_controller_users.yaml
new file mode 100644
index 0000000..5cb3cc9
--- /dev/null
+++ b/xos/synchronizer/steps/sync_controller_users.yaml
@@ -0,0 +1,50 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+
+  - name: Create user account for "{{ name }}"
+    os_user:
+      name: "{{ name }}"
+      email: "{{ email }}"
+      password: "{{ password }}"
+      auth:
+        auth_url: {{ endpoint }}
+        username: {{ admin_user }}
+        password: {{ admin_password }}
+        project_name: {{ admin_project }}
+
+  - name: Create project for "{{ project }}"
+    os_project:
+      name: "{{ project }}"
+      auth:
+        auth_url: {{ endpoint }}
+        username: {{ admin_user }}
+        password: {{ admin_password }}
+        project_name: {{ admin_project }}
+
+{% for role in roles %}
+  - name: Creating role "{{ role }}" for "{{ name }}" on "{{ project }}"
+    keystone_user:
+      user: "{{ name }}"
+      role: "{{ role }}"
+      tenant: "{{ project }}"
+      endpoint: {{ endpoint }}
+      login_user: {{ admin_user }}
+      login_password: {{ admin_password }}
+      login_tenant_name: {{ admin_project }}
+{% endfor %}
+
+# FIXME: the below should work in Ansible 2.1, once we get the Admin/admin and
+# Member/user role name issues straightened out.
+#
+#  - name: Creating role "{{ role }}" for "{{ name }}" on "{{ project }}"
+#    os_user_role:
+#      user: "{{ name }}"
+#      role: "{{ role }}"
+#      project: "{{ project }}"
+#      auth:
+#        auth_url: {{ endpoint }}
+#        username: {{ admin_user }}
+#        password: {{ admin_password }}
+#        project_name: {{ admin_project }}
diff --git a/xos/synchronizer/steps/sync_images.py b/xos/synchronizer/steps/sync_images.py
new file mode 100644
index 0000000..1638fd0
--- /dev/null
+++ b/xos/synchronizer/steps/sync_images.py
@@ -0,0 +1,52 @@
+import os
+import base64
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from core.models.image import Image
+from xos.logger import observer_logger as logger
+
+class SyncImages(OpenStackSyncStep):
+    provides=[Image]
+    requested_interval=0
+    observes=Image
+
+    def fetch_pending(self, deleted):
+        # Images come from the back end
+        # You can't delete them
+        if (deleted):
+            logger.info("SyncImages: returning because deleted=True")
+            return []
+
+        # get list of images on disk
+        images_path = Config().observer_images_directory
+
+        logger.info("SyncImages: deleted=False, images_path=%s" % images_path)
+
+        available_images = {}
+        if os.path.exists(images_path):
+            for f in os.listdir(images_path):
+                filename = os.path.join(images_path, f)
+                if os.path.isfile(filename) and filename.endswith(".img"):
+                    available_images[f] = filename
+
+        logger.info("SyncImages: available_images = %s" % str(available_images))
+
+        images = Image.objects.all()
+        image_names = [image.name for image in images]
+
+        for image_name in available_images:
+            #remove file extension
+            clean_name = ".".join(image_name.split('.')[:-1])
+            if clean_name not in image_names:
+                logger.info("SyncImages: adding %s" % clean_name)
+                image = Image(name=clean_name,
+                              disk_format='raw',
+                              container_format='bare', 
+                              path = available_images[image_name])
+                image.save()
+
+        return Image.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None)) 
+
+    def sync_record(self, image):
+        image.save()
diff --git a/xos/synchronizer/steps/sync_instances.py b/xos/synchronizer/steps/sync_instances.py
new file mode 100644
index 0000000..5cec50d
--- /dev/null
+++ b/xos/synchronizer/steps/sync_instances.py
@@ -0,0 +1,220 @@
+import os
+import base64
+import socket
+from django.db.models import F, Q
+from xos.config import Config
+from xos.settings import RESTAPI_HOSTNAME, RESTAPI_PORT
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from core.models.instance import Instance
+from core.models.slice import Slice, SlicePrivilege, ControllerSlice
+from core.models.network import Network, NetworkSlice, ControllerNetwork
+from synchronizers.base.ansible import *
+from synchronizers.base.syncstep import *
+from xos.logger import observer_logger as logger
+
+def escape(s):
+    s = s.replace('\n',r'\n').replace('"',r'\"')
+    return s
+
+class SyncInstances(OpenStackSyncStep):
+    provides=[Instance]
+    requested_interval=0
+    observes=Instance
+    playbook='sync_instances.yaml'
+
+    def fetch_pending(self, deletion=False):
+        objs = super(SyncInstances, self).fetch_pending(deletion)
+        objs = [x for x in objs if x.isolation=="vm"]
+        return objs
+
+    def get_userdata(self, instance, pubkeys):
+        userdata = '#cloud-config\n\nopencloud:\n   slicename: "%s"\n   hostname: "%s"\n   restapi_hostname: "%s"\n   restapi_port: "%s"\n' % (instance.slice.name, instance.node.name, RESTAPI_HOSTNAME, str(RESTAPI_PORT))
+        userdata += 'ssh_authorized_keys:\n'
+        for key in pubkeys:
+            userdata += '  - %s\n' % key
+        return userdata
+
+    def sort_nics(self, nics):
+        result = []
+
+        # Enforce VTN's network order requirement. The access network must be
+        # inserted into the first slot. The management network must be inserted
+        # into the second slot.
+
+        # move the private and/or access network to the first spot
+        for nic in nics[:]:
+            network=nic.get("network", None)
+            if network:
+                tem = network.template
+                if (tem.visibility == "private") and (tem.translation=="none") and ("management" not in tem.name):
+                    result.append(nic)
+                    nics.remove(nic)
+
+        # move the management network to the second spot
+        for net in nics[:]:
+            network=nic.get("network", None)
+            if network:
+                tem = network.template
+                if (tem.visibility == "private") and (tem.translation=="none") and ("management" in tem.name):
+#MCORD
+#                    if len(result)!=1:
+#                        raise Exception("Management network needs to be inserted in slot 1, but there are %d private nics" % len(result))
+                    result.append(nic)
+                    nics.remove(nic)
+
+        # add everything else. For VTN there probably shouldn't be any more.
+        result.extend(nics)
+
+        return result
+
+    def map_sync_inputs(self, instance):
+        inputs = {}
+	metadata_update = {}
+        if (instance.numberCores):
+            metadata_update["cpu_cores"] = str(instance.numberCores)
+
+        for tag in instance.slice.tags.all():
+            if tag.name.startswith("sysctl-"):
+                metadata_update[tag.name] = tag.value
+
+	slice_memberships = SlicePrivilege.objects.filter(slice=instance.slice)
+        pubkeys = set([sm.user.public_key for sm in slice_memberships if sm.user.public_key])
+        if instance.creator.public_key:
+            pubkeys.add(instance.creator.public_key)
+
+        if instance.slice.creator.public_key:
+            pubkeys.add(instance.slice.creator.public_key)
+
+        if instance.slice.service and instance.slice.service.public_key:
+            pubkeys.add(instance.slice.service.public_key)
+
+        nics=[]
+
+        # handle ports the were created by the user
+        port_ids=[]
+        for port in Port.objects.filter(instance=instance):
+            if not port.port_id:
+                raise DeferredException("Instance %s waiting on port %s" % (instance, port))
+            nics.append({"kind": "port", "value": port.port_id, "network": port.network})
+
+        # we want to exclude from 'nics' any network that already has a Port
+        existing_port_networks = [port.network for port in Port.objects.filter(instance=instance)]
+
+        networks = [ns.network for ns in NetworkSlice.objects.filter(slice=instance.slice) if ns.network not in existing_port_networks]
+        controller_networks = ControllerNetwork.objects.filter(network__in=networks,
+                                                                controller=instance.node.site_deployment.controller)
+
+        #controller_networks = self.sort_controller_networks(controller_networks)
+        for controller_network in controller_networks:
+            # Lenient exception - causes slow backoff
+            if controller_network.network.template.visibility == 'private' and \
+               controller_network.network.template.translation == 'none':
+                   if not controller_network.net_id:
+                        raise DeferredException("Instance %s Private Network %s has no id; Try again later" % (instance, controller_network.network.name))
+                   nics.append({"kind": "net", "value": controller_network.net_id, "network": controller_network.network})
+
+        # now include network template
+        network_templates = [network.template.shared_network_name for network in networks \
+                             if network.template.shared_network_name]
+
+        #driver = self.driver.client_driver(caller=instance.creator, tenant=instance.slice.name, controller=instance.controllerNetwork)
+        driver = self.driver.admin_driver(tenant='admin', controller=instance.node.site_deployment.controller)
+        nets = driver.shell.neutron.list_networks()['networks']
+        for net in nets:
+            if net['name'] in network_templates:
+                nics.append({"kind": "net", "value": net['id'], "network": None})
+
+        if (not nics):
+            for net in nets:
+                if net['name']=='public':
+                    nics.append({"kind": "net", "value": net['id'], "network": None})
+
+        nics = self.sort_nics(nics)
+
+        image_name = None
+        controller_images = instance.image.controllerimages.filter(controller=instance.node.site_deployment.controller)
+        if controller_images:
+            image_name = controller_images[0].image.name
+            logger.info("using image from ControllerImage object: " + str(image_name))
+
+        if image_name is None:
+            controller_driver = self.driver.admin_driver(controller=instance.node.site_deployment.controller)
+            images = controller_driver.shell.glanceclient.images.list()
+            for image in images:
+                if image.name == instance.image.name or not image_name:
+                    image_name = image.name
+                    logger.info("using image from glance: " + str(image_name))
+
+	try:
+            legacy = Config().observer_legacy
+        except:
+            legacy = False
+
+        if (legacy):
+            host_filter = instance.node.name.split('.',1)[0]
+        else:
+            host_filter = instance.node.name.strip()
+
+        availability_zone_filter = 'nova:%s'%host_filter
+        instance_name = '%s-%d'%(instance.slice.name,instance.id)
+        self.instance_name = instance_name
+
+        userData = self.get_userdata(instance, pubkeys)
+        if instance.userData:
+            userData += instance.userData
+
+        controller = instance.node.site_deployment.controller
+        fields = {'endpoint':controller.auth_url,
+                     'endpoint_v3': controller.auth_url_v3,
+                     'domain': controller.domain,
+                     'admin_user': instance.creator.email,
+                     'admin_password': instance.creator.remote_password,
+                     'project_name': instance.slice.name,
+                     'tenant': instance.slice.name,
+                     'tenant_description': instance.slice.description,
+                     'name':instance_name,
+                     'ansible_tag':instance_name,
+                     'availability_zone': availability_zone_filter,
+                     'image_name':image_name,
+                     'flavor_name':instance.flavor.name,
+                     'nics':nics,
+                     'meta':metadata_update,
+                     'user_data':r'%s'%escape(userData)}
+        return fields
+
+
+    def map_sync_outputs(self, instance, res):
+	instance_id = res[0]['openstack']['OS-EXT-SRV-ATTR:instance_name']
+        instance_uuid = res[0]['id']
+
+	try:
+            hostname = res[0]['openstack']['OS-EXT-SRV-ATTR:hypervisor_hostname']
+            ip = socket.gethostbyname(hostname)
+            instance.ip = ip
+        except:
+            pass
+
+        instance.instance_id = instance_id
+        instance.instance_uuid = instance_uuid
+        instance.instance_name = self.instance_name
+        instance.save()
+	
+	
+    def map_delete_inputs(self, instance):
+        controller_register = json.loads(instance.node.site_deployment.controller.backend_register)
+
+        if (controller_register.get('disabled',False)):
+            raise InnocuousException('Controller %s is disabled'%instance.node.site_deployment.controller.name)
+
+        instance_name = '%s-%d'%(instance.slice.name,instance.id)
+        controller = instance.node.site_deployment.controller
+        input = {'endpoint':controller.auth_url,
+                     'admin_user': instance.creator.email,
+                     'admin_password': instance.creator.remote_password,
+                     'admin_tenant': instance.slice.name,
+                     'tenant': instance.slice.name,
+                     'tenant_description': instance.slice.description,
+                     'name':instance_name,
+                     'ansible_tag':instance_name,
+                     'delete': True}
+        return input
diff --git a/xos/synchronizer/steps/sync_instances.yaml b/xos/synchronizer/steps/sync_instances.yaml
new file mode 100644
index 0000000..476890f
--- /dev/null
+++ b/xos/synchronizer/steps/sync_instances.yaml
@@ -0,0 +1,35 @@
+---
+- hosts: 127.0.0.1
+  connection: local
+  tasks:
+  - os_server:
+      name: {{ name }}
+      auth:
+        auth_url: {{ endpoint }}
+        username: {{ admin_user }}
+        password: {{ admin_password }}
+        project_name: {{ project_name }}
+      {% if delete -%}
+      state: absent
+      {% else -%}
+      state: present
+      availability_zone: "{{ availability_zone }}"
+      image: {{ image_name }}
+      flavor: {{ flavor_name }}
+      timeout: 200
+      userdata: "{{ user_data }}"
+      config_drive: yes
+      auto_ip: no
+      nics:
+      {% for nic in nics %}
+          - {{ nic.kind }}-id: {{ nic.value }}
+      {% endfor %}
+
+      {% if meta %}
+      meta:
+      {% for k,v in meta.items() %}
+          {{ k }} : "{{ v }}"
+      {% endfor %}
+      {% endif %}
+      {% endif %}
+
diff --git a/xos/synchronizer/steps/sync_object.py b/xos/synchronizer/steps/sync_object.py
new file mode 100644
index 0000000..aaf2f25
--- /dev/null
+++ b/xos/synchronizer/steps/sync_object.py
@@ -0,0 +1,20 @@
+import os
+import base64
+from collections import defaultdict
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from synchronizers.base.syncstep import *
+from core.models import *
+from synchronizers.base.ansible import *
+from openstack_xos.driver import OpenStackDriver
+from xos.logger import observer_logger as logger
+import json
+
+class SyncObject(OpenStackSyncStep):
+    provides=[] # Caller fills this in
+    requested_interval=0
+    observes=[] # Caller fills this in
+
+    def sync_record(self, r):
+        raise DeferredException('Waiting for Service dependency: %r'%r)
diff --git a/xos/synchronizer/steps/sync_ports.py b/xos/synchronizer/steps/sync_ports.py
new file mode 100644
index 0000000..5e0ff04
--- /dev/null
+++ b/xos/synchronizer/steps/sync_ports.py
@@ -0,0 +1,230 @@
+import os
+import base64
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from core.models import Controller
+from core.models.network import *
+from xos.logger import observer_logger as logger
+
+class SyncPorts(OpenStackSyncStep):
+    requested_interval = 0 # 3600
+    provides=[Port]
+    observes=Port
+
+    #     The way it works is to enumerate the all of the ports that neutron
+    #     has, and then work backward from each port's network-id to determine
+    #     which Network is associated from the port.
+
+    def call(self, failed=[], deletion=False):
+        if deletion:
+            self.delete_ports()
+        else:
+            self.sync_ports()
+
+    def get_driver(self, port):
+        # We need to use a client driver that specifies the tenant
+        # of the destination instance. Nova-compute will not connect
+        # ports to instances if the port's tenant does not match
+        # the instance's tenant.
+
+        # A bunch of stuff to compensate for OpenStackDriver.client_driver()
+        # not being in working condition.
+        from openstack_xos.client import OpenStackClient
+        from openstack_xos.driver import OpenStackDriver
+        controller = port.instance.node.site_deployment.controller
+        slice = port.instance.slice
+        caller = port.network.owner.creator
+        auth = {'username': caller.email,
+                'password': caller.remote_password,
+                'tenant': slice.name}
+        client = OpenStackClient(controller=controller, **auth)
+        driver = OpenStackDriver(client=client)
+
+        return driver
+
+    def sync_ports(self):
+        logger.info("sync'ing Ports [delete=False]")
+
+        ports = Port.objects.all()
+        ports_by_id = {}
+        ports_by_neutron_port = {}
+        for port in ports:
+            ports_by_id[port.id] = port
+            ports_by_neutron_port[port.port_id] = port
+
+        networks = Network.objects.all()
+        networks_by_id = {}
+        for network in networks:
+            for nd in network.controllernetworks.all():
+                networks_by_id[nd.net_id] = network
+
+        #logger.info("networks_by_id = ")
+        #for (network_id, network) in networks_by_id.items():
+        #    logger.info("   %s: %s" % (network_id, network.name))
+
+        instances = Instance.objects.all()
+        instances_by_instance_uuid = {}
+        for instance in instances:
+            instances_by_instance_uuid[instance.instance_uuid] = instance
+
+        # Get all ports in all controllers
+
+        ports_by_id = {}
+        templates_by_id = {}
+        for controller in Controller.objects.all():
+            if not controller.admin_tenant:
+                logger.info("controller %s has no admin_tenant" % controller)
+                continue
+            try:
+                driver = self.driver.admin_driver(controller = controller)
+                ports = driver.shell.neutron.list_ports()["ports"]
+            except:
+                logger.log_exc("failed to get ports from controller %s" % controller)
+                continue
+
+            for port in ports:
+                ports_by_id[port["id"]] = port
+
+            # public-nat and public-dedicated networks don't have a net-id anywhere
+            # in the data model, so build up a list of which ids map to which network
+            # templates.
+            try:
+                neutron_networks = driver.shell.neutron.list_networks()["networks"]
+            except:
+                print "failed to get networks from controller %s" % controller
+                continue
+            for network in neutron_networks:
+                for template in NetworkTemplate.objects.all():
+                    if template.shared_network_name == network["name"]:
+                        templates_by_id[network["id"]] = template
+
+        for port in ports_by_id.values():
+            #logger.info("port %s" % str(port))
+            if port["id"] in ports_by_neutron_port:
+                # we already have it
+                #logger.info("already accounted for port %s" % port["id"])
+                continue
+
+            if port["device_owner"] != "compute:nova":
+                # we only want the ports that connect to instances
+                #logger.info("port %s is not a compute port, it is a %s" % (port["id"], port["device_owner"]))
+                continue
+
+            instance = instances_by_instance_uuid.get(port['device_id'], None)
+            if not instance:
+                logger.info("no instance for port %s device_id %s" % (port["id"], port['device_id']))
+                continue
+
+            network = networks_by_id.get(port['network_id'], None)
+            if not network:
+                # maybe it's public-nat or public-dedicated. Search the templates for
+                # the id, then see if the instance's slice has some network that uses
+                # that template
+                template = templates_by_id.get(port['network_id'], None)
+                if template and instance.slice:
+                    for candidate_network in instance.slice.networks.all():
+                         if candidate_network.template == template:
+                             network=candidate_network
+            if not network:
+                logger.info("no network for port %s network %s" % (port["id"], port["network_id"]))
+
+                # we know it's associated with a instance, but we don't know
+                # which network it is part of.
+
+                continue
+
+            if network.template.shared_network_name:
+                # If it's a shared network template, then more than one network
+                # object maps to the neutron network. We have to do a whole bunch
+                # of extra work to find the right one.
+                networks = network.template.network_set.all()
+                network = None
+                for candidate_network in networks:
+                    if (candidate_network.owner == instance.slice):
+                        logger.info("found network %s" % candidate_network)
+                        network = candidate_network
+
+                if not network:
+                    logger.info("failed to find the correct network for a shared template for port %s network %s" % (port["id"], port["network_id"]))
+                    continue
+
+            if not port["fixed_ips"]:
+                logger.info("port %s has no fixed_ips" % port["id"])
+                continue
+
+            ip=port["fixed_ips"][0]["ip_address"]
+            mac=port["mac_address"]
+            logger.info("creating Port (%s, %s, %s, %s)" % (str(network), str(instance), ip, str(port["id"])))
+
+            ns = Port(network=network,
+                               instance=instance,
+                               ip=ip,
+                               mac=mac,
+                               port_id=port["id"])
+
+            try:
+                ns.save()
+            except:
+                logger.log_exc("failed to save port %s" % str(ns))
+                continue
+
+        # For ports that were created by the user, find that ones
+        # that don't have neutron ports, and create them.
+        for port in Port.objects.filter(Q(port_id__isnull=True), Q(instance__isnull=False) ):
+            logger.info("XXX working on port %s" % port)
+            controller = port.instance.node.site_deployment.controller
+            slice = port.instance.slice
+
+            if controller:
+                cn=port.network.controllernetworks.filter(controller=controller)
+                if not cn:
+                    logger.log_exc("no controllernetwork for %s" % port)
+                    continue
+                cn=cn[0]
+                if cn.lazy_blocked:
+                    cn.lazy_blocked=False
+                    cn.save()
+                    logger.info("deferring port %s because controllerNetwork was lazy-blocked" % port)
+                    continue
+                if not cn.net_id:
+                    logger.info("deferring port %s because controllerNetwork does not have a port-id yet" % port)
+                    continue
+                try:
+                    driver = self.get_driver(port)
+
+                    args = {"network_id": cn.net_id}
+                    neutron_port_name = port.get_parameters().get("neutron_port_name", None)
+                    if neutron_port_name:
+                        args["name"] = neutron_port_name
+
+                    neutron_port = driver.shell.neutron.create_port({"port": args})["port"]
+                    port.port_id = neutron_port["id"]
+                    if neutron_port["fixed_ips"]:
+                        port.ip = neutron_port["fixed_ips"][0]["ip_address"]
+                    port.mac = neutron_port["mac_address"]
+                    port.xos_created = True
+                    logger.info("created neutron port %s for %s" % (port.port_id, port))
+                except:
+                    logger.log_exc("failed to create neutron port for %s" % port)
+                    continue
+                port.save()
+
+    def delete_ports(self):
+        logger.info("sync'ing Ports [delete=True]")
+        for port in Port.deleted_objects.all():
+            self.delete_record(port)
+
+    def delete_record(self, port):
+        if port.xos_created and port.port_id:
+            logger.info("calling openstack to destroy port %s" % port.port_id)
+            try:
+                driver = self.get_driver(port)
+                driver.shell.neutron.delete_port(port.port_id)
+            except:
+                logger.log_exc("failed to delete port %s from neutron" % port.port_id)
+                return
+
+        logger.info("Purging port %s" % port)
+        port.delete(purge=True)
+
diff --git a/xos/synchronizer/steps/sync_roles.py b/xos/synchronizer/steps/sync_roles.py
new file mode 100644
index 0000000..e859316
--- /dev/null
+++ b/xos/synchronizer/steps/sync_roles.py
@@ -0,0 +1,23 @@
+import os
+import base64
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.openstacksyncstep import OpenStackSyncStep
+from core.models.role import Role
+from core.models.site import SiteRole, Controller, ControllerRole
+from core.models.slice import SliceRole
+from xos.logger import observer_logger as logger
+
+class SyncRoles(OpenStackSyncStep):
+    provides=[Role]
+    requested_interval=0
+    observes=[SiteRole,SliceRole,ControllerRole]
+
+    def sync_record(self, role):
+        if not role.enacted:
+            controllers = Controller.objects.all()
+       	    for controller in controllers:
+                driver = self.driver.admin_driver(controller=controller)
+                driver.create_role(role.role)
+            role.save()
+    
diff --git a/xos/synchronizer/steps/teardown_container.yaml b/xos/synchronizer/steps/teardown_container.yaml
new file mode 100644
index 0000000..5cabc78
--- /dev/null
+++ b/xos/synchronizer/steps/teardown_container.yaml
@@ -0,0 +1,33 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: {{ username }}
+  sudo: yes
+
+  vars:
+    container_name: {{ container_name }}
+    docker_image: {{ docker_image }}
+    ports:
+    {% for port in ports %}
+       - device: {{ port.device }}
+         xos_network_id: {{ port.xos_network_id }}
+         mac: {{ port.mac|default("") }}
+         ip: {{ port.ip }}
+         snoop_instance_mac: {{ port.snoop_instance_mac }}
+         snoop_instance_id: {{ port.snoop_instance_id }}
+         parent_mac: {{ port.parent_mac|default("") }}
+         s_tag: {{ port.s_tag|default("")  }}
+         c_tag: {{ port.c_tag|default("") }}
+         next_hop: {{ port.next_hop|default("") }}
+         bridge: {{ port.bridge }}
+    {% endfor %}
+    volumes:
+    {% for volume in volumes %}
+       - {{ volume }}
+    {% endfor %}
+
+  tasks:
+  - name: Make sure container is stopped
+    service: name=container-{{ container_name }} state=stopped
+