Merge branch 'master' of https://github.com/open-cloud/xos into AddHelloWorldService
diff --git a/xos/ceilometer/models.py b/xos/ceilometer/models.py
index 3da29dd..ae73b3e 100644
--- a/xos/ceilometer/models.py
+++ b/xos/ceilometer/models.py
@@ -39,6 +39,10 @@
         super(MonitoringChannel, self).__init__(*args, **kwargs)
         self.set_attribute("use_same_instance_for_multiple_tenants", True)
 
+    def can_update(self, user):
+        #Allow creation of this model instances for non-admin users also
+        return True
+
     def save(self, *args, **kwargs):
         if not self.creator:
             if not getattr(self, "caller", None):
diff --git a/xos/configurations/bash/Makefile.inside b/xos/configurations/bash/Makefile.inside
index 30bf6d6..176ef47 100644
--- a/xos/configurations/bash/Makefile.inside
+++ b/xos/configurations/bash/Makefile.inside
@@ -2,5 +2,6 @@
 
 setup_xos:
 	bash /opt/xos/scripts/docker_setup_xos
+	python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/fixtures.yaml
 	python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/cloudlab.yaml
 	python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/cloudlab-nodes.yaml
diff --git a/xos/configurations/common/fixtures.yaml b/xos/configurations/common/fixtures.yaml
index 0d77ca2..c5e9dd1 100644
--- a/xos/configurations/common/fixtures.yaml
+++ b/xos/configurations/common/fixtures.yaml
@@ -18,3 +18,6 @@
 
     device:
       type: tosca.nodes.NetworkParameterType
+
+    bridge:
+      type: tosca.nodes.NetworkParameterType
diff --git a/xos/configurations/cord/README.md b/xos/configurations/cord/README.md
index 4d64953..f0a0a9b 100644
--- a/xos/configurations/cord/README.md
+++ b/xos/configurations/cord/README.md
@@ -47,9 +47,9 @@
 * Wait until you get an email from CloudLab with title "OpenStack Instance Finished Setting Up".
 * Login to the *ctl* node of your experiment and run:
 ```
-$ git clone https://github.com/open-cloud/xos.git
-$ cd xos/xos/configurations/cord/
-$ make
+ctl:~$ git clone https://github.com/open-cloud/xos.git
+ctl:~$ cd xos/xos/configurations/cord/
+ctl:~/xos/xos/configurations/cord$ make
 ```
 
 Running `make` in this directory creates the XOS Docker container and runs the TOSCA engine with `cord.yaml` to
@@ -66,6 +66,37 @@
 ONOS app for the vCPE. To verify that it has received an IP address mapping, look at the **Routeable subnet:** field in 
 the appropriate *Vbng tenant* object in XOS.  It should contain an IP address in the 10.254.0.0/24 subnet.
 
+After launching the ONOS apps, it is necessary to configure software switches along the dataplane so that ONOS can control
+them.  To do this, from the `cord` configuration directory:
+```
+ctl:~/xos/xos/configurations/cord$ cd dataplane/
+ctl:~/xos/xos/configurations/cord/dataplane$ ./gen-inventory.sh > hosts
+ctl:~/xos/xos/configurations/cord/dataplane$ ansible-playbook -i hosts dataplane.yaml
+```
+
+Currently the vOLT switch is not forwarding ARP and so it is necessary to set up ARP mappings between the client
+and vCPE.  Log into the client and add an ARP entry for the vCPE: 
+```
+client:$ sudo arp -s 192.168.0.1 <mac-of-eth1-in-vCPE-container>
+```
+Inside the vCPE container add a similar entry for the client:
+```
+vcpe:$ arp -s 192.168.0.2 <mac-of-br-sub-on-client>
+```
+
+Now SSH into ONOS running the OLT app (see below) and activate the subscriber:
+```
+onos> add-subscriber-access of:0000000000000001 1 432
+```
+
+At this point you should be able to ping 192.168.0.1 from the client.  The final step is to set the 
+vCPE as the gateway on the client:
+```
+client:$ sudo route del default gw 10.11.10.5
+client:$ sudo route add default gw 192.168.0.1
+```
+The client should now be able to surf the Internet through the dataplane.
+
 ## How to log into ONOS
 
 The ONOS Docker container runs in the VMs belonging to the *mysite_onos* slice.  All ports exposed by the ONOS container are forwarded to the outside, and can be accessed from the *ctl* node using the `flat-lan-1-net` address of the hosting VM.  For example, if the IP addresss of the VM is 10.11.10.30, then it is possible to SSH to ONOS as follows (password is *karaf*):
diff --git a/xos/configurations/cord/dataplane/dataplane-bm.yaml b/xos/configurations/cord/dataplane/dataplane-bm.yaml
new file mode 100644
index 0000000..2fbbf6a
--- /dev/null
+++ b/xos/configurations/cord/dataplane/dataplane-bm.yaml
@@ -0,0 +1,32 @@
+---
+- hosts: switch_volt
+  sudo: yes
+  tasks:
+  - name: Create tunnel port on br-lan
+    openvswitch_port:
+      bridge=br-lan
+      port={{ grename }}
+      state=present
+
+  - name: Set up GRE tunnel to vCPE
+    shell: ovs-vsctl set Interface {{ grename }} type=gre options:remote_ip={{ bm_addr }}
+
+- hosts: baremetal
+
+  user: root
+  sudo: no
+  tasks:
+  - name: Create br-lan
+    openvswitch_bridge:
+      bridge=br-lan
+      state=present
+
+  - name: Create tunnel port
+    openvswitch_port:
+      bridge=br-lan
+      port={{ grename }}
+      state=present
+
+  - name: Configure GRE tunnel to vOLT switch
+    shell: ovs-vsctl set Interface {{ grename }} type=gre options:remote_ip={{ volt_addr }}
+
diff --git a/xos/configurations/cord/dataplane/dataplane.yaml b/xos/configurations/cord/dataplane/dataplane.yaml
index 026ec89..f43e4d7 100644
--- a/xos/configurations/cord/dataplane/dataplane.yaml
+++ b/xos/configurations/cord/dataplane/dataplane.yaml
@@ -47,6 +47,9 @@
       port={{ public_net.stdout }}
       state=present
 
+  - name: Remove IP address on public_network
+    command: /sbin/ifconfig {{ public_net.stdout }} 0.0.0.0
+
   - name: Change datapath ID of bridge to match config file
     command: /usr/bin/ovs-vsctl set bridge br-vbng other-config:datapath-id={{ ovs_dpid }}
 
@@ -198,13 +201,18 @@
       port={{ client_net.stdout }}
       state=present
 
+  - name: Run some commands on br-sub
+    shell: "{{ item }}"
+    with_items:
+    - ifconfig br-sub 192.168.0.2 mtu 1400 up
+    - ethtool -K br-sub tso off
+    - ethtool -K br-sub tx off
+
   # Run dhclient on br-sub internal interface to issue DHCP request to vCPE
 
 #
-# This play is just for testing.  The vCPE configuration below will be
-# integrated with the vCPE Synchronizer.
-#
-# Need to change the data model to store both s-tag and c-tag
+# Need to set up a tunnel between vCPE and vOLT to keep VLAN-tagged
+# packets from being swallowed by the network.
 #
 - hosts: vcpe
   sudo: yes
diff --git a/xos/configurations/cord/dataplane/gen-etc-hosts.sh b/xos/configurations/cord/dataplane/gen-etc-hosts.sh
new file mode 100755
index 0000000..ce98731
--- /dev/null
+++ b/xos/configurations/cord/dataplane/gen-etc-hosts.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# set -x
+
+source ../../common/admin-openrc.sh
+
+get_ip () {
+    LABEL=$1
+    NETWORK=$2
+    nova list --all-tenants|grep $LABEL|sed "s/^.*$NETWORK=//g"|sed 's/; .*$//g'|awk '{print $1}'
+}
+
+cat <<EOF
+$( get_ip mysite_onos_vbng flat-lan-1-net) onos_vbng
+$( get_ip mysite_vbng flat-lan-1-net) switch_vbng
+$( get_ip mysite_onos_volt flat-lan-1-net) onos_volt
+$( get_ip mysite_volt flat-lan-1-net) switch_volt
+$( get_ip mysite_clients flat-lan-1-net) client
+$( get_ip mysite_vcpe flat-lan-1-net) vcpe
+EOF
diff --git a/xos/configurations/cord/dataplane/generate.sh b/xos/configurations/cord/dataplane/gen-inventory.sh
similarity index 91%
rename from xos/configurations/cord/dataplane/generate.sh
rename to xos/configurations/cord/dataplane/gen-inventory.sh
index 49a12ef..590376d 100755
--- a/xos/configurations/cord/dataplane/generate.sh
+++ b/xos/configurations/cord/dataplane/gen-inventory.sh
@@ -1,7 +1,7 @@
 #!/bin/bash
 # set -x
 
-source ~/admin-openrc.sh
+source ../../common/admin-openrc.sh
 
 get_ip () {
     LABEL=$1
@@ -14,7 +14,7 @@
 switch_vbng  ansible_ssh_host=$( get_ip mysite_vbng flat-lan-1-net) wan_ip=$( get_ip mysite_vbng wan_network) public_ip=$( get_ip mysite_vbng tun0-net )
 
 onos_volt    ansible_ssh_host=$( get_ip mysite_onos_volt flat-lan-1-net)
-switch_volt  ansible_ssh_host=$( get_ip mysite_volt flat-lan-1-net) subscriber_ip=$( get_ip mysite_volt subscriber_network) lan_ip=$( get_ip mysite_volt lan_network) 
+switch_volt  ansible_ssh_host=$( get_ip mysite_volt flat-lan-1-net) subscriber_ip=$( get_ip mysite_volt subscriber_network) lan_ip=$( get_ip mysite_volt lan_network)
 
 client       ansible_ssh_host=$( get_ip mysite_clients flat-lan-1-net) subscriber_ip=$( get_ip mysite_clients subscriber_network)
 vcpe         ansible_ssh_host=$( get_ip mysite_vcpe flat-lan-1-net) lan_ip=$( get_ip mysite_vcpe lan_network)
diff --git a/xos/configurations/cord/dataplane/generate-bm.sh b/xos/configurations/cord/dataplane/generate-bm.sh
new file mode 100755
index 0000000..1a3ec78
--- /dev/null
+++ b/xos/configurations/cord/dataplane/generate-bm.sh
@@ -0,0 +1,32 @@
+source ~/admin-openrc.sh
+
+get_ip () {
+    LABEL=$1
+    NETWORK=$2
+    nova list --all-tenants|grep $LABEL|sed "s/^.*$NETWORK=//g"|sed 's/; .*$//g'|awk '{print $1}'
+    }
+
+NODES=`sudo bash -c "source /root/setup/admin-openrc.sh ; nova hypervisor-list" |grep cloudlab|awk '{print $4}'`
+I=1
+for NODE in $NODES; do
+    IP=`getent hosts $NODE | awk '{ print $1 }'`
+    echo switch_volt$I    ansible_ssh_host=$( get_ip mysite_volt flat-lan-1-net) grename=gre-bm-$I bm_addr=$IP
+    echo bm$I           ansible_ssh_host=$IP grename=gre-bm-$I volt_addr=$( get_ip mysite_volt lan_network)  ansible_ssh_private_key_file=/root/.ssh/id_rsa
+    I=$(( I+1 ))
+done
+
+# a kludge for now -- just rerun the onos_volt step for each baremetal machine
+
+echo "[switch_volt]"
+I=1
+for NODE in $NODES; do
+    echo switch_volt$I
+    I=$((I+1))
+done
+
+echo "[baremetal]"
+I=1
+for NODE in $NODES; do
+    echo bm$I
+    I=$((I+1))
+done
diff --git a/xos/cord/models.py b/xos/cord/models.py
index a4e7501..daac40c 100644
--- a/xos/cord/models.py
+++ b/xos/cord/models.py
@@ -700,6 +700,7 @@
                 port.set_parameter("c_tag", self.volt.c_tag)
                 port.set_parameter("s_tag", self.volt.s_tag)
                 port.set_parameter("device", "eth1")
+                port.set_parameter("bridge", "br-lan")
 
                 wan_networks = [x for x in instance.slice.networks.all() if "wan" in x.name]
                 if not wan_networks:
diff --git a/xos/core/models/service.py b/xos/core/models/service.py
index 49c534a..ba54a33 100644
--- a/xos/core/models/service.py
+++ b/xos/core/models/service.py
@@ -337,6 +337,106 @@
             return None
         return sorted(st, key=attrgetter('id'))[0]
 
+class Scheduler(object):
+    # XOS Scheduler Abstract Base Class
+    # Used to implement schedulers that pick which node to put instances on
+
+    def __init__(self, slice):
+        self.slice = slice
+
+    def pick(self):
+        # this method should return a tuple (node, parent)
+        #    node is the node to instantiate on
+        #    parent is for container_vm instances only, and is the VM that will
+        #      hold the container
+
+        raise Exception("Abstract Base")
+
+class LeastLoadedNodeScheduler(Scheduler):
+    # This scheduler always return the node with the fewest number of instances.
+
+    def __init__(self, slice):
+        super(LeastLoadedNodeScheduler, self).__init__(slice)
+
+    def pick(self):
+        from core.models import Node
+        nodes = list(Node.objects.all())
+        # TODO: logic to filter nodes by which nodes are up, and which
+        #   nodes the slice can instantiate on.
+        nodes = sorted(nodes, key=lambda node: node.instances.all().count())
+        return [nodes[0], None]
+
+class ContainerVmScheduler(Scheduler):
+    # This scheduler picks a VM in the slice with the fewest containers inside
+    # of it. If no VMs are suitable, then it creates a VM.
+
+    # this is a hack and should be replaced by something smarter...
+    LOOK_FOR_IMAGES=["ubuntu-vcpe4",        # ONOS demo machine -- preferred vcpe image
+                     "Ubuntu 14.04 LTS",    # portal
+                     "Ubuntu-14.04-LTS",    # ONOS demo machine
+                     "trusty-server-multi-nic", # CloudLab
+                    ]
+
+    MAX_VM_PER_CONTAINER = 10
+
+    def __init__(self, slice):
+        super(ContainerVmScheduler, self).__init__(slice)
+
+    @property
+    def image(self):
+        from core.models import Image
+
+        look_for_images = self.LOOK_FOR_IMAGES
+        for image_name in look_for_images:
+            images = Image.objects.filter(name = image_name)
+            if images:
+                return images[0]
+
+        raise XOSProgrammingError("No ContainerVM image (looked for %s)" % str(look_for_images))
+
+    def make_new_instance(self):
+        from core.models import Instance, Flavor
+
+        flavors = Flavor.objects.filter(name="m1.small")
+        if not flavors:
+            raise XOSConfigurationError("No m1.small flavor")
+
+        (node,parent) = LeastLoadedNodeScheduler(self.slice).pick()
+
+        instance = Instance(slice = self.slice,
+                        node = node,
+                        image = self.image,
+                        creator = self.slice.creator,
+                        deployment = node.site_deployment.deployment,
+                        flavor = flavors[0],
+                        isolation = "vm",
+                        parent = parent)
+        instance.save()
+        # We rely on a special naming convention to identify the VMs that will
+        # hole containers.
+        instance.name = "%s-outer-%s" % (instance.slice.name, instance.id)
+        instance.save()
+        return instance
+
+    def pick(self):
+        from core.models import Instance, Flavor
+
+        for vm in self.slice.instances.filter(isolation="vm"):
+            avail_vms = []
+            if (vm.name.startswith("%s-outer-" % self.slice.name)):
+                container_count = Instance.objects.filter(parent=vm).count()
+                if (container_count < self.MAX_VM_PER_CONTAINER):
+                    avail_vms.append( (vm, container_count) )
+            # sort by least containers-per-vm
+            avail_vms = sorted(avail_vms, key = lambda x: x[1])
+            print "XXX", avail_vms
+            if avail_vms:
+                instance = avail_vms[0][0]
+                return (instance.node, instance)
+
+        instance = self.make_new_instance()
+        return (instance.node, instance)
+
 class TenantWithContainer(Tenant):
     """ A tenant that manages a container """
 
@@ -357,7 +457,6 @@
         self.cached_instance=None
         self.orig_instance_id = self.get_initial_attribute("instance_id")
 
-
     @property
     def instance(self):
         from core.models import Instance
@@ -426,7 +525,7 @@
             if images:
                 return images[0]
 
-        raise XOSProgrammingError("No VPCE image (looked for %s)" % str(self.look_for_images))
+        raise XOSProgrammingError("No VPCE image (looked for %s)" % str(look_for_images))
 
     @creator.setter
     def creator(self, value):
@@ -436,23 +535,11 @@
             self.cached_creator=None
         self.set_attribute("creator_id", value)
 
-    def pick_node_for_instance(self):
-        from core.models import Node
-        nodes = list(Node.objects.all())
-        # TODO: logic to filter nodes by which nodes are up, and which
-        #   nodes the slice can instantiate on.
-        nodes = sorted(nodes, key=lambda node: node.instances.all().count())
-        return nodes[0]
-
     def save_instance(self, instance):
         # Override this function to do custom pre-save or post-save processing,
         # such as creating ports for containers.
         instance.save()
 
-    def pick_vm(self):
-        # for container-in-VM, pick a VM
-        raise "Not Implemented"
-
     def pick_least_loaded_instance_in_slice(self, slices):
         for slice in slices:
             if slice.instances.all().count() > 0:
@@ -498,13 +585,12 @@
                 if not flavors:
                     raise XOSConfigurationError("No m1.small flavor")
 
-                node =self.pick_node_for_instance()
                 slice = self.provider_service.slices.all()[0]
 
                 if slice.default_isolation == "container_vm":
-                    parent = self.pick_vm()
+                    (node, parent) = ContainerVmScheduler(slice).pick()
                 else:
-                    parent = None
+                    (node, parent) = LeastLoadedNodeScheduler(slice).pick()
 
                 instance = Instance(slice = slice,
                                 node = node,
diff --git a/xos/model_policies/model_policy_Instance.py b/xos/model_policies/model_policy_Instance.py
index 40f52bb..ffc9847 100644
--- a/xos/model_policies/model_policy_Instance.py
+++ b/xos/model_policies/model_policy_Instance.py
@@ -1,35 +1,44 @@
 def handle_container_on_metal(instance):
         from core.models import Instance, Flavor, Port, Image
 
+        print "MODEL POLICY: instance", instance, "handle container_on_metal"
+
         if instance.deleted:
             return
 
-        # Our current docker network strategy requires that there be some
-        # VM on the server that connects to the networks, so that
-        # the containers can piggyback off of that configuration.
-        if not Instance.objects.filter(slice=instance.slice, node=instance.node, isolation="vm").exists():
-            flavors = Flavor.objects.filter(name="m1.small")
-            if not flavors:
-                raise XOSConfigurationError("No m1.small flavor")
+        if (instance.isolation in ["container"]):
+            # Our current docker-on-metal network strategy requires that there be some
+            # VM on the server that connects to the networks, so that
+            # the containers can piggyback off of that configuration.
+            if not Instance.objects.filter(slice=instance.slice, node=instance.node, isolation="vm").exists():
+                flavors = Flavor.objects.filter(name="m1.small")
+                if not flavors:
+                    raise XOSConfigurationError("No m1.small flavor")
 
-            images = Image.objects.filter(kind="vm")
+                images = Image.objects.filter(kind="vm")
 
-            companion_instance = Instance(slice = instance.slice,
-                            node = instance.node,
-                            image = images[0],
-                            creator = instance.creator,
-                            deployment = instance.node.site_deployment.deployment,
-                            flavor = flavors[0])
-            companion_instance.save()
+                companion_instance = Instance(slice = instance.slice,
+                                node = instance.node,
+                                image = images[0],
+                                creator = instance.creator,
+                                deployment = instance.node.site_deployment.deployment,
+                                flavor = flavors[0])
+                companion_instance.save()
+
+                print "MODEL POLICY: instance", instance, "created companion", companion_instance
 
         # Add the ports for the container
         for network in instance.slice.networks.all():
+            # hmmm... The NAT ports never become ready, because sync_ports never
+            # instantiates them. Need to think about this.
+            print "MODEL POLICY: instance", instance, "handling network", network
             if (network.name.endswith("-nat")):
                 continue
 
             if not Port.objects.filter(network=network, instance=instance).exists():
                 port = Port(network = network, instance=instance)
                 port.save()
+                print "MODEL POLICY: instance", instance, "created port", port
 
 def handle(instance):
     from core.models import Controller, ControllerSlice, ControllerNetwork, NetworkSlice
@@ -40,6 +49,7 @@
 
     for cn in controller_networks:
         if (cn.lazy_blocked):
+                print "MODEL POLICY: instance", instance, "unblocking network", cn.network
 		cn.lazy_blocked=False
 		cn.backend_register = '{}'
 		cn.save()
diff --git a/xos/model_policy.py b/xos/model_policy.py
index ced785e..9462b35 100644
--- a/xos/model_policy.py
+++ b/xos/model_policy.py
@@ -105,7 +105,7 @@
 
 def run_policy_once():
         from core.models import Instance,Slice,Controller,Network,User,SlicePrivilege,Site,SitePrivilege,Image,ControllerSlice,ControllerUser,ControllerSite
-        models = [Instance,Slice, Controller, Network, User, SlicePrivilege, Site, SitePrivilege, Image, ControllerSlice, ControllerSite, ControllerUser]
+        models = [Controller, Site, SitePrivilege, Image, ControllerSlice, ControllerSite, ControllerUser, User, Slice, Network, Instance, SlicePrivilege]
         objects = []
         deleted_objects = []
 
diff --git a/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2 b/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2
index cba6f2a..4c712f1 100644
--- a/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2
+++ b/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2
@@ -9,6 +9,8 @@
 admin_password={{ admin_password }}
 
 [allowed_tenants]
+{% if allowed_tenant_ids %}
 {% for tenant_id in allowed_tenant_ids %}
 {{ tenant_id }}
 {% endfor %}
+{% endif %}
diff --git a/xos/openstack_observer/steps/sync_container.py b/xos/openstack_observer/steps/sync_container.py
index fefd415..b9b41c5 100644
--- a/xos/openstack_observer/steps/sync_container.py
+++ b/xos/openstack_observer/steps/sync_container.py
@@ -39,6 +39,16 @@
                 return p
         return None
 
+    def get_parent_port_mac(self, instance, port):
+        if not instance.parent:
+            raise Exception("instance has no parent")
+        for parent_port in instance.parent.ports.all():
+            if parent_port.network == port.network:
+                if not parent_port.mac:
+                     raise Exception("parent port on network %s does not have mac yet" % parent_port.network.name)
+                return parent_port.mac
+        raise Exception("failed to find corresponding parent port for network %s" % port.network.name)
+
     def get_ports(self, o):
         i=0
         ports = []
@@ -62,11 +72,13 @@
                 pd["snoop_instance_mac"] = instance_port.mac
                 pd["snoop_instance_id"] = instance_port.instance.instance_id
                 pd["src_device"] = ""
+                pd["bridge"] = "br-int"
             else:
                 # container in VM
                 pd["snoop_instance_mac"] = ""
                 pd["snoop_instance_id"] = ""
-                pd["src_device"] = "eth%d" % i
+                pd["parent_mac"] = self.get_parent_port_mac(o, port)
+                pd["bridge"] = ""
 
             for (k,v) in port.get_parameters().items():
                 pd[k] = v
diff --git a/xos/openstack_observer/steps/sync_container.yaml b/xos/openstack_observer/steps/sync_container.yaml
index 7fb6331..56edaea 100644
--- a/xos/openstack_observer/steps/sync_container.yaml
+++ b/xos/openstack_observer/steps/sync_container.yaml
@@ -16,10 +16,11 @@
          ip: {{ port.ip }}
          snoop_instance_mac: {{ port.snoop_instance_mac }}
          snoop_instance_id: {{ port.snoop_instance_id }}
-         src_device: {{ port.src_device }}
+         parent_mac: {{ port.parent_mac|default("") }}
          s_tag: {{ port.s_tag|default("")  }}
          c_tag: {{ port.c_tag|default("") }}
          next_hop: {{ port.next_hop|default("") }}
+         bridge: {{ port.bridge }}
     {% endfor %}
     volumes:
     {% for volume in volumes %}
diff --git a/xos/openstack_observer/steps/sync_instances.py b/xos/openstack_observer/steps/sync_instances.py
index 1130c24..815c83e 100644
--- a/xos/openstack_observer/steps/sync_instances.py
+++ b/xos/openstack_observer/steps/sync_instances.py
@@ -66,7 +66,7 @@
             if controller_network.network.template.visibility == 'private' and \
                controller_network.network.template.translation == 'none':
                    if not controller_network.net_id:
-                        raise DeferredException("Private Network %s has no id; Try again later" % controller_network.network.name)
+                        raise DeferredException("Instance %s Private Network %s has no id; Try again later" % (instance, controller_network.network.name))
                    nics.append(controller_network.net_id)
 
         # now include network template
diff --git a/xos/openstack_observer/steps/teardown_container.yaml b/xos/openstack_observer/steps/teardown_container.yaml
new file mode 100644
index 0000000..5cabc78
--- /dev/null
+++ b/xos/openstack_observer/steps/teardown_container.yaml
@@ -0,0 +1,33 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: {{ username }}
+  sudo: yes
+
+  vars:
+    container_name: {{ container_name }}
+    docker_image: {{ docker_image }}
+    ports:
+    {% for port in ports %}
+       - device: {{ port.device }}
+         xos_network_id: {{ port.xos_network_id }}
+         mac: {{ port.mac|default("") }}
+         ip: {{ port.ip }}
+         snoop_instance_mac: {{ port.snoop_instance_mac }}
+         snoop_instance_id: {{ port.snoop_instance_id }}
+         parent_mac: {{ port.parent_mac|default("") }}
+         s_tag: {{ port.s_tag|default("")  }}
+         c_tag: {{ port.c_tag|default("") }}
+         next_hop: {{ port.next_hop|default("") }}
+         bridge: {{ port.bridge }}
+    {% endfor %}
+    volumes:
+    {% for volume in volumes %}
+       - {{ volume }}
+    {% endfor %}
+
+  tasks:
+  - name: Make sure container is stopped
+    service: name=container-{{ container_name }} state=stopped
+
diff --git a/xos/openstack_observer/templates/start-container.sh.j2 b/xos/openstack_observer/templates/start-container.sh.j2
index 42d93a2..260666c 100644
--- a/xos/openstack_observer/templates/start-container.sh.j2
+++ b/xos/openstack_observer/templates/start-container.sh.j2
@@ -6,6 +6,32 @@
 CONTAINER={{ container_name }}
 IMAGE={{ docker_image }}
 
+function mac_to_iface {
+    PARENT_MAC=$1
+    ifconfig|grep $PARENT_MAC| awk '{print $1}'|grep -v '\.'
+}
+
+function encapsulate_stag {
+    LAN_IFACE=$1
+    STAG=$2
+    ifconfig $LAN_IFACE >> /dev/null
+    if [ "$?" == 0 ]; then
+        STAG_IFACE=$LAN_IFACE.$STAG
+        ifconfig $LAN_IFACE up
+        ifconfig $STAG_IFACE
+        if [ "$?" == 0 ]; then
+            echo $STAG_IFACE is already created
+        else
+            ifconfig $STAG_IFACE >> /dev/null || ip link add link $LAN_IFACE name $STAG_IFACE type vlan id $STAG
+        fi
+            ifconfig $STAG_IFACE up
+    else
+        echo There is no $LAN_IFACE. Aborting.
+        exit -1
+    fi
+}
+
+
 {% if volumes %}
 {% for volume in volumes %}
 DEST_DIR=/var/container_volumes/$CONTAINER/{{ volume }}
@@ -38,11 +64,10 @@
 CTAG_ARG=""
 {% endif %}
 
-{% if port.src_device %}
+{% if port.parent_mac %}
 # container-in-VM
-VLAN_ARG=""
-NEXTHOP_ARG=""
-CMD="docker exec $CONTAINER ifconfig {{ port.src_device }} >> /dev/null || pipework {{ port.src_device }} -i {{ port.device }} $CONTAINER {{ port.ip }}/24$NEXTHOP_ARG {{ port.mac }} $CTAG_ARG"
+SRC_DEV=$( mac_to_iface "{{ port.parent_mac }}" )
+CMD="docker exec $CONTAINER ifconfig $SRC_DEV >> /dev/null || pipework $SRC_DEV -i {{ port.device }} $CONTAINER {{ port.ip }}/24$NEXTHOP_ARG {{ port.mac }} $CTAG_ARG"
 echo $CMD
 eval $CMD
 
@@ -56,6 +81,17 @@
 {% endif %}
 
 DEVICE="{{ port.device }}"

+BRIDGE="{{ port.bridge }}"

+{% if port.s_tag %}

+# This is intended for lan_network. Assume that BRIDGE is set to br_lan. We

+# create a device that strips off the S-TAG.

+STAG="{{ port.s_tag }}"

+encapsulate_stag $BRIDGE $STAG

+SRC_DEV=$STAG_IFACE

+{% else %}

+# This is for a standard neutron private network. We use a donor VM to setup

+# openvswitch for us, and we snoop at its devices and create a tap using the

+# same settings.

 XOS_NETWORK_ID="{{ port.xos_network_id }}"

 INSTANCE_MAC="{{ port.snoop_instance_mac }}"
 INSTANCE_ID="{{ port.snoop_instance_id }}"
@@ -71,12 +107,14 @@
 ovs-vsctl show | grep -i $TAP
 if [[ $? == 1 ]]; then
     echo creating tap
-    ovs-vsctl add-port br-int $TAP tag=$VLAN_ID -- set interface $TAP type=internal
+    ovs-vsctl add-port $BRIDGE $TAP tag=$VLAN_ID -- set interface $TAP type=internal
 else
     echo tap exists
 fi
+SRC_DEV=$TAP
+{% endif %}
 
-CMD="docker exec $CONTAINER ifconfig $DEVICE >> /dev/null || pipework $TAP -i $DEVICE $CONTAINER $IP/24$NEXTHOP_ARG $MAC $CTAG_ARG"
+CMD="docker exec $CONTAINER ifconfig $DEVICE >> /dev/null || pipework $SRC_DEV -i $DEVICE $CONTAINER $IP/24$NEXTHOP_ARG $MAC $CTAG_ARG"
 echo $CMD
 eval $CMD
 {% endif %}
diff --git a/xos/tosca/tests/allObserverTests.py b/xos/tosca/tests/allObserverTests.py
index d06daba..6a566a9 100644
--- a/xos/tosca/tests/allObserverTests.py
+++ b/xos/tosca/tests/allObserverTests.py
@@ -1,11 +1,13 @@
-from observerComputeTest import ObserverComputeTest
+from observerVMTest import ObserverVMTest
+from observerContainerTest import ObserverContainerTest
 from observerImageTest import ObserverImageTest
 from observerUserTest import ObserverUserTest
 from observerSiteTest import ObserverSiteTest
 from observerSliceTest import ObserverSliceTest
 
 if __name__ == "__main__":
-    ObserverComputeTest()
+    ObserverVMTest()
+    ObserverContainerTest()
     ObserverImageTest()
     ObserverSiteTest()
     ObserverUserTest()
diff --git a/xos/tosca/tests/basetest.py b/xos/tosca/tests/basetest.py
index f7f04eb..d9701d7 100644
--- a/xos/tosca/tests/basetest.py
+++ b/xos/tosca/tests/basetest.py
@@ -67,15 +67,23 @@
 
         return yml
 
-    def make_compute(self, slice, name, caps={}, props={}, reqs=[], num_cpus="1", disk_size="10 GB", mem_size="4 MB"):
+    def make_compute(self, slice, name, caps={}, props={}, reqs=[], num_cpus="1", disk_size="10 GB", mem_size="4 MB", isolation="vm"):
         reqs = reqs[:]
+        props = props.copy()
         caps = caps.copy()
 
+        if isolation=="container":
+            type = "tosca.nodes.Compute.Container"
+        elif isolation=="container_vm":
+            type = "tosca.nodes.Compute.ContainerVM"
+        else:
+            type = "tosca.nodes.Compute"
+
         caps.update( {"host": {"num_cpus": num_cpus, "disk_size": disk_size, "mem_size": mem_size},
                       "os": {"architecture": "x86_64", "type": "linux", "distribution": "rhel", "version": "6.5"}} )
         reqs.append( (slice, "tosca.relationships.MemberOfSlice") )
 
-        return self.make_nodetemplate(name, "tosca.nodes.Compute",
+        return self.make_nodetemplate(name, type,
                                       caps= caps,
                                       props = props,
                                       reqs= reqs)
diff --git a/xos/tosca/tests/observerContainerTest.py b/xos/tosca/tests/observerContainerTest.py
new file mode 100644
index 0000000..a31b866
--- /dev/null
+++ b/xos/tosca/tests/observerContainerTest.py
@@ -0,0 +1,95 @@
+from observertest import BaseObserverToscaTest
+
+from core.models import Instance, Site
+
+# Note that as a side effect, these tests will also create a Site
+
+class ObserverContainerTest(BaseObserverToscaTest):
+    tests = ["create_container"]
+    # hide_observer_output = False # uncomment to display lots of stuff to screen
+
+    def cleanup(self):
+        # We don't want to leak resources, so we make sure to let the observer
+        # attempt to delete these objects.
+        self.try_to_delete(Instance, purge=False, name="test_compute1")
+        self.try_to_delete(Site, purge=False, name="testsite")
+        self.run_observer()
+        # The site objects don't seem to go away nicely, they linger about and
+        # cause an IntegrityError due to a duplicate login_base
+        self.try_to_delete(Site, purge=True, name="testsite")
+
+    def get_base_templates(self):
+        return self.make_nodetemplate("testsite", "tosca.nodes.Site") + \
+               self.make_nodetemplate("testsite_slice1", "tosca.nodes.Slice", reqs=[("testsite", "tosca.relationships.MemberOfSite")]) + \
+               self.make_nodetemplate("andybavier/docker-vcpe", "tosca.nodes.Image", props={"kind": "container", "container_format": "na", "disk_format": "na"})
+
+    def create_container(self):
+        self.assert_noobj(Instance, "test_compute1")
+        self.execute(self.get_base_templates() +
+                     self.make_compute("testsite_slice1", "test_compute1", disk_size="1 GB", mem_size="513 MB", isolation="container",
+                                       reqs=[("andybavier/docker-vcpe", "tosca.relationships.UsesImage")],
+                                       ))
+        instance = self.assert_obj(Instance, "test_compute1")
+        assert(instance.flavor.name == "m1.small")
+
+        # first pass makes the Networks
+        self.run_model_policy(save_output="/tmp/instancetest:create_container:model_policy_first")
+
+        # XXX deal with bug where
+        instance = self.assert_obj(Instance, "test_compute1")
+        instance.save()
+
+        # second pass makes the NetworkControllers
+        self.run_model_policy(save_output="/tmp/instancetest:create_container:model_policy_second")
+
+        # first observer pass should make any necessary networks or ports
+        self.run_observer(save_output="/tmp/instancetest:create_container:observer_first")
+
+        # reset the exponential backoff
+        instance = self.assert_obj(Instance, "test_compute1")
+        instance.backend_register="{}"
+        instance.save()
+
+        # we need to reset the companion instance's exponential backoff too
+        companion_instance = Instance.objects.filter(slice=instance.slice, isolation="vm")
+        assert(companion_instance)
+        companion_instance = companion_instance[0]
+        companion_instance.backend_register="{}"
+        companion_instance.save()
+
+        # third pass reset lazy_blocked
+        self.run_model_policy(save_output="/tmp/instancetest:create_container:model_policy_third")
+
+        # second observer pass should instantiate the controller networks
+        #    (might instantiate the instance, too)
+        self.run_observer(save_output="/tmp/instancetest:create_container:observer_second")
+
+        # reset the exponential backoff
+        instance = self.assert_obj(Instance, "test_compute1")
+        instance.backend_register="{}"
+        instance.save()
+
+        # we need to reset the companion instance's exponential backoff too
+        companion_instance = Instance.objects.filter(slice=instance.slice, isolation="vm")
+        assert(companion_instance)
+        companion_instance = companion_instance[0]
+        companion_instance.backend_register="{}"
+        companion_instance.save()
+
+        # third observer pass should instantiate the companion instance
+        self.run_observer(save_output="/tmp/instancetest:create_container:observer_third")
+
+        # third observer pass should instantiate the instance
+        self.run_observer(save_output="/tmp/instancetest:create_container:observer_fourth")
+
+        instance = self.assert_obj(Instance, "test_compute1")
+
+        assert(instance.instance_id is not None)
+        assert(instance.instance_name is not None)
+
+        # there should be one port on the private network
+        assert(instance.ports.count() == 1)
+
+if __name__ == "__main__":
+    ObserverContainerTest()
+
diff --git a/xos/tosca/tests/observerComputeTest.py b/xos/tosca/tests/observerVMTest.py
similarity index 86%
rename from xos/tosca/tests/observerComputeTest.py
rename to xos/tosca/tests/observerVMTest.py
index 972b62c..65cbde5 100644
--- a/xos/tosca/tests/observerComputeTest.py
+++ b/xos/tosca/tests/observerVMTest.py
@@ -4,8 +4,8 @@
 
 # Note that as a side effect, these tests will also create a Site
 
-class ObserverComputeTest(BaseObserverToscaTest):
-    tests = ["create_instance"]
+class ObserverVMTest(BaseObserverToscaTest):
+    tests = ["create_vm"]
     # hide_observer_output = False # uncomment to display lots of stuff to screen
 
     def cleanup(self):
@@ -22,7 +22,7 @@
         return self.make_nodetemplate("testsite", "tosca.nodes.Site") + \
                self.make_nodetemplate("testsite_slice1", "tosca.nodes.Slice", reqs=[("testsite", "tosca.relationships.MemberOfSite")])
 
-    def create_instance(self):
+    def create_vm(self):
         self.assert_noobj(Instance, "test_compute1")
         self.execute(self.get_base_templates() +
                      self.make_compute("testsite_slice1", "test_compute1", disk_size="1 GB", mem_size="513 MB"))
@@ -30,13 +30,13 @@
         assert(instance.flavor.name == "m1.small")
 
         # first pass makes the Networks
-        self.run_model_policy(save_output="/tmp/instancetest:create_instance:model_policy_first")
+        self.run_model_policy(save_output="/tmp/instancetest:create_vm:model_policy_first")
 
         # second pass makes the NetworkControllers
-        self.run_model_policy(save_output="/tmp/instancetest:create_instance:model_policy_second")
+        self.run_model_policy(save_output="/tmp/instancetest:create_vm:model_policy_second")
 
         # first observer pass should make any necessary networks or ports
-        self.run_observer(save_output="/tmp/instancetest:create_instance:observer_first")
+        self.run_observer(save_output="/tmp/instancetest:create_vm:observer_first")
 
         # reset the exponential backoff
         instance = self.assert_obj(Instance, "test_compute1")
@@ -44,11 +44,11 @@
         instance.save()
 
         # third pass reset lazy_blocked
-        self.run_model_policy(save_output="/tmp/instancetest:create_instance:model_policy_third")
+        self.run_model_policy(save_output="/tmp/instancetest:create_vm:model_policy_third")
 
         # second observer pass should instantiate the controller networks
         #    (might instantiate the instance, too)
-        self.run_observer(save_output="/tmp/instancetest:create_instance:observer_second")
+        self.run_observer(save_output="/tmp/instancetest:create_vm:observer_second")
 
         # reset the exponential backoff
         instance = self.assert_obj(Instance, "test_compute1")
@@ -56,13 +56,16 @@
         instance.save()
 
         # third observer pass should instantiate the instance
-        self.run_observer(save_output="/tmp/instancetest:create_instance:observer_third")
+        self.run_observer(save_output="/tmp/instancetest:create_vm:observer_third")
 
         instance = self.assert_obj(Instance, "test_compute1")
 
         assert(instance.instance_id is not None)
         assert(instance.instance_name is not None)
 
+        # there should be a port on the private network and a port on nat-net
+        assert(instance.ports.count() == 2)
+
 if __name__ == "__main__":
-    ObserverComputeTest()
+    ObserverVMTest()