Merge branch 'master' of https://github.com/open-cloud/xos into feature/bootstrap
diff --git a/containers/README b/containers/README
new file mode 100644
index 0000000..9891efe
--- /dev/null
+++ b/containers/README
@@ -0,0 +1,51 @@
+
+1. Introduction
+
+  XOS is comprised of 3 core services:
+  
+  * A database backend (postgres)
+  * A webserver front end (django)
+  * A synchronizer daemon that interacts with the openstack backend.
+
+  We have created separate dockerfiles for each of these services, making it easier to
+  build the services independently and also deploy and run them in isolated environments. 
+
+2. Database Container
+  
+  To build and run the database container:
+  
+  $ cd postgres; make build && make run;
+
+3. XOS container
+  
+  To build and run the xos webserver container:
+
+  $ cd xos; make build && make run;
+
+  You should now be able to access the login page by visiting http://localhost:80 and
+  log in using the default paadmin account. It may be helpful to bootstrap xos with 
+  some sample data; deployment, controllers, sites, slices, etc. You can get started by
+  loading tosca configuration for the opencloud demo dataset:
+
+  $ cd xos; make runtosca;
+
+  Or you can create you own tosca configuraton file and customize the dataset however you
+  want. You can all load your own tosca configuration by setting the TOSCA_CONFIG_PATH 
+  environment variable before executing the make command:
+
+  $ cd xos; TOSCA_CONFIG_PATH=/path/to/tosca/config.yaml make runtosca
+
+4. Synchronizer container
+
+  The syncornonizer shares many of the same dependencies as the xos container. The synchronizer 
+  container takes advantage of this by building itself on top of the xos image. This means
+  you must build the xos image before building the synchronizer image. The XOS and 
+  synchronizer containers can run on separate hosts, but you must build the xos image
+  on the host that you plan to run the synchronizer container. Assuming you have already 
+  built the xos container, executing the following will build and run the synchronizer container:
+
+  $ cd synchronizer; make build && make run
+
+  
+
+
diff --git a/containers/observer/Makefile b/containers/observer/Makefile
deleted file mode 100644
index e7fedf5..0000000
--- a/containers/observer/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-CONTAINER_NAME:=observer-server
-
-.PHONY: build
-build: ; docker build --rm -t observer .
-
-.PHONY: run
-run: ; docker run -d --name ${CONTAINER_NAME} observer
-
-.PHONY: stop
-stop: ; docker stop ${CONTAINER_NAME}
-
-.PHONY: rm
-rm: ; docker rm ${CONTAINER_NAME}
diff --git a/containers/observer/Dockerfile b/containers/synchronizer/Dockerfile
similarity index 87%
rename from containers/observer/Dockerfile
rename to containers/synchronizer/Dockerfile
index 7ec6592..44b058e 100644
--- a/containers/observer/Dockerfile
+++ b/containers/synchronizer/Dockerfile
@@ -25,7 +25,7 @@
 # For Observer
 RUN mkdir -p /usr/local/share /bin /etc/ansible
 
-RUN cp /tmp/xos/containers/observer/conf/ansible-hosts /etc/ansible/hosts
+RUN cp /tmp/xos/containers/synchronizer/conf/ansible-hosts /etc/ansible/hosts
 
 ADD http://phantomjs.googlecode.com/files/phantomjs-1.7.0-linux-x86_64.tar.bz2 /usr/local/share/
 
@@ -39,6 +39,6 @@
 
 
 # Supervisor
-RUN cp /tmp/xos/containers/observer/conf/observer.conf /etc/supervisor/conf.d/
+RUN cp /tmp/xos/containers/synchronizer/conf/synchronizer.conf /etc/supervisor/conf.d/
 
-CMD /usr/bin/supervisord -c /etc/supervisor/conf.d/observer.conf
+CMD /usr/bin/supervisord -c /etc/supervisor/conf.d/synchronizer.conf
diff --git a/containers/synchronizer/Makefile b/containers/synchronizer/Makefile
new file mode 100644
index 0000000..14520d9
--- /dev/null
+++ b/containers/synchronizer/Makefile
@@ -0,0 +1,13 @@
+CONTAINER_NAME:=synchronizer-server
+
+.PHONY: build
+build: ; docker build --rm -t synchronizer .
+
+.PHONY: run
+run: ; docker run -d --name ${CONTAINER_NAME} synchronizer
+
+.PHONY: stop
+stop: ; docker stop ${CONTAINER_NAME}
+
+.PHONY: rm
+rm: ; docker rm ${CONTAINER_NAME}
diff --git a/containers/observer/conf/ansible-hosts b/containers/synchronizer/conf/ansible-hosts
similarity index 100%
rename from containers/observer/conf/ansible-hosts
rename to containers/synchronizer/conf/ansible-hosts
diff --git a/containers/observer/conf/observer.conf b/containers/synchronizer/conf/synchronizer.conf
similarity index 62%
rename from containers/observer/conf/observer.conf
rename to containers/synchronizer/conf/synchronizer.conf
index 48f61dd..cda6716 100644
--- a/containers/observer/conf/observer.conf
+++ b/containers/synchronizer/conf/synchronizer.conf
@@ -3,7 +3,7 @@
 pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid)
 nodaemon=true
 
-[program:observer]
+[program:synchronizer]
 command=python /opt/xos/xos-observer.py
-stderr_logfile=/var/log/supervisor/observer.err.log
-stdout_logfile=/var/log/supervisor/observer.out.log
+stderr_logfile=/var/log/supervisor/synchronizer.err.log
+stdout_logfile=/var/log/supervisor/synchronizer.out.log
diff --git a/xos/ceilometer/admin.py b/xos/ceilometer/admin.py
index 987877c..3ed70b2 100644
--- a/xos/ceilometer/admin.py
+++ b/xos/ceilometer/admin.py
@@ -81,6 +81,18 @@
     form = MonitoringChannelForm
 
     suit_form_tabs = (('general','Details'),)
+    actions=['delete_selected_objects']
+
+    def get_actions(self, request):
+        actions = super(MonitoringChannelAdmin, self).get_actions(request)
+        if 'delete_selected' in actions:
+            del actions['delete_selected']
+        return actions
+
+    def delete_selected_objects(self, request, queryset):
+        for obj in queryset:
+            obj.delete()
+    delete_selected_objects.short_description = "Delete Selected MonitoringChannel Objects"
 
     def queryset(self, request):
         return MonitoringChannel.get_tenant_objects_by_user(request.user)
diff --git a/xos/ceilometer/models.py b/xos/ceilometer/models.py
index a838c4e..e149eb5 100644
--- a/xos/ceilometer/models.py
+++ b/xos/ceilometer/models.py
@@ -29,7 +29,7 @@
 
     sync_attributes = ("private_ip", "private_mac",
                        "ceilometer_ip", "ceilometer_mac",
-                       "nat_ip", "nat_mac",)
+                       "nat_ip", "nat_mac", "ceilometer_port",)
 
     default_attributes = {}
     def __init__(self, *args, **kwargs):
@@ -37,6 +37,11 @@
         if ceilometer_services:
             self._meta.get_field("provider_service").default = ceilometer_services[0].id
         super(MonitoringChannel, self).__init__(*args, **kwargs)
+        self.set_attribute("use_same_instance_for_multiple_tenants", True)
+
+    def can_update(self, user):
+        #Allow creation of this model instances for non-admin users also
+        return True
 
     def save(self, *args, **kwargs):
         if not self.creator:
@@ -62,7 +67,7 @@
 
     @property
     def addresses(self):
-        if not self.instance:
+        if (not self.id) or (not self.instance):
             return {}
 
         addresses = {}
@@ -121,6 +126,12 @@
             for cs in slice.controllerslices.all():
                 if cs.tenant_id:
                     tenant_ids.add(cs.tenant_id)
+        if self.creator.is_admin:
+            #TODO: Ceilometer publishes the SDN meters without associating to any tenant IDs.
+            #For now, ceilometer code is changed to pusblish all such meters with tenant
+            #id as "default_admin_tenant". Here add that default tenant as authroized tenant_id
+            #for all admin users. 
+            tenant_ids.add("default_admin_tenant")
         return tenant_ids
 
     @property
@@ -132,10 +143,17 @@
         return ", ".join(self.tenant_list)
 
     @property
+    def ceilometer_port(self):
+        # TODO: Find a better logic to choose unique ceilometer port number for each instance 
+        if not self.id:
+            return None
+        return 8888+self.id
+
+    @property
     def ceilometer_url(self):
         if not self.ceilometer_ip:
             return None
-        return "http://" + self.private_ip + ":8888/"
+        return "http://" + self.private_ip + ":" + str(self.ceilometer_port) + "/"
 
 def model_policy_monitoring_channel(pk):
     # TODO: this should be made in to a real model_policy
diff --git a/xos/configurations/bash/Makefile.inside b/xos/configurations/bash/Makefile.inside
index 30bf6d6..176ef47 100644
--- a/xos/configurations/bash/Makefile.inside
+++ b/xos/configurations/bash/Makefile.inside
@@ -2,5 +2,6 @@
 
 setup_xos:
 	bash /opt/xos/scripts/docker_setup_xos
+	python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/fixtures.yaml
 	python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/cloudlab.yaml
 	python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/cloudlab-nodes.yaml
diff --git a/xos/configurations/common/Dockerfile.common b/xos/configurations/common/Dockerfile.common
index ede0cb3..d78585a 100644
--- a/xos/configurations/common/Dockerfile.common
+++ b/xos/configurations/common/Dockerfile.common
@@ -60,7 +60,6 @@
 RUN pip install django_rest_swagger
 
 RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-setuptools
-RUN easy_install django_evolution
 RUN easy_install python_gflags
 RUN easy_install --upgrade httplib2
 RUN easy_install google_api_python_client
diff --git a/xos/configurations/common/Makefile.cloudlab b/xos/configurations/common/Makefile.cloudlab
index 43e7497..6e609d7 100644
--- a/xos/configurations/common/Makefile.cloudlab
+++ b/xos/configurations/common/Makefile.cloudlab
@@ -13,6 +13,7 @@
 
 flat_name:
 	sudo bash -c "source /root/setup/admin-openrc.sh ; neutron net-list" |grep flat|awk '{printf "%s",$$4}' > flat_net_name
+	[ -s flat_net_name ] # throw error if flat_net_name is empty
 
 nodes_yaml:
 	bash ./make-cloudlab-nodes-yaml.sh
diff --git a/xos/configurations/common/fixtures.yaml b/xos/configurations/common/fixtures.yaml
new file mode 100644
index 0000000..c5e9dd1
--- /dev/null
+++ b/xos/configurations/common/fixtures.yaml
@@ -0,0 +1,23 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Some basic fixtures
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    s_tag:
+      type: tosca.nodes.NetworkParameterType
+
+    c_tag:
+      type: tosca.nodes.NetworkParameterType
+
+    next_hop:
+      type: tosca.nodes.NetworkParameterType
+
+    device:
+      type: tosca.nodes.NetworkParameterType
+
+    bridge:
+      type: tosca.nodes.NetworkParameterType
diff --git a/xos/configurations/cord/Makefile b/xos/configurations/cord/Makefile
index 4c2e423..86b4ca1 100644
--- a/xos/configurations/cord/Makefile
+++ b/xos/configurations/cord/Makefile
@@ -2,7 +2,7 @@
 RUNNING_CONTAINER:=$(shell sudo docker ps|grep "xos"|awk '{print $$NF}')
 LAST_CONTAINER=$(shell sudo docker ps -l -q)
 
-test: common_cloudlab ceilometer_dashboard
+cord: common_cloudlab ceilometer_dashboard
 	echo "# Autogenerated -- do not edit" > Dockerfile
 	cat ../common/Dockerfile.common Dockerfile.cord >> Dockerfile
 	cd ../../..; sudo docker build -t xos -f xos/configurations/cord/Dockerfile .
diff --git a/xos/configurations/cord/Makefile.inside b/xos/configurations/cord/Makefile.inside
index c412f25..b8e23f8 100644
--- a/xos/configurations/cord/Makefile.inside
+++ b/xos/configurations/cord/Makefile.inside
@@ -2,6 +2,7 @@
 
 setup_xos:
 	bash /opt/xos/scripts/docker_setup_xos
+	python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/fixtures.yaml
 	python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/cloudlab.yaml
 	python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/cloudlab-nodes.yaml
 	python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/cord/cord.yaml
diff --git a/xos/configurations/cord/README.md b/xos/configurations/cord/README.md
index 4d64953..e1b2d2e 100644
--- a/xos/configurations/cord/README.md
+++ b/xos/configurations/cord/README.md
@@ -43,13 +43,17 @@
 
 To get started on CloudLab:
 * Create an experiment using the *OpenStack-CORD* profile.  (You can also use the *OpenStack* profile, but choose *Kilo*
-and disable security groups.)
+with two compute nodes and disable security groups.)
 * Wait until you get an email from CloudLab with title "OpenStack Instance Finished Setting Up".
 * Login to the *ctl* node of your experiment and run:
 ```
-$ git clone https://github.com/open-cloud/xos.git
-$ cd xos/xos/configurations/cord/
-$ make
+ctl:~$ git clone https://github.com/open-cloud/xos.git
+ctl:~$ cd xos/xos/configurations/cord/
+```
+Edit `cord.yaml` in this directory.  Change the hostnames `cp-1.devel.xos-pg0.clemson.cloudlab.us` and
+`cp-2.devel.xos-pg0.clemson.cloudlab.us` to the names of the compute nodes in your experiment.  Now run:
+```
+ctl:~/xos/xos/configurations/cord$ make
 ```
 
 Running `make` in this directory creates the XOS Docker container and runs the TOSCA engine with `cord.yaml` to
@@ -66,12 +70,67 @@
 ONOS app for the vCPE. To verify that it has received an IP address mapping, look at the **Routeable subnet:** field in 
 the appropriate *Vbng tenant* object in XOS.  It should contain an IP address in the 10.254.0.0/24 subnet.
 
+After launching the ONOS apps, it is necessary to configure software switches along the dataplane so that ONOS can control
+them.  To do this, from the `cord` configuration directory:
+```
+ctl:~/xos/xos/configurations/cord$ cd dataplane/
+ctl:~/xos/xos/configurations/cord/dataplane$ ./gen-inventory.sh > hosts
+ctl:~/xos/xos/configurations/cord/dataplane$ ansible-playbook -i hosts dataplane.yaml
+```
+
+To setup the dataplane for containers on bare metal, perform these steps in addition to the above (note: make sure to sudo when running the playbook):
+```
+ctl:~/xos/xos/configurations/cord/dataplane$ ./generate-bm.sh > hosts-bm   
+ctl:~/xos/xos/configurations/cord/dataplane$ sudo ansible-playbook -i hosts-bm dataplane-bm.yaml
+```
+
+Check that the vCPE container has started, by going into the XOS UI, selecting 'Services', 'service_vcpe', 'Administration', 'Vcpe Tenants', and make sure there's a green icon next to the vCPE. 
+
+If the vCPE Tenant is still red, then the Instance could be exponentially backed-off due to errors while trying to sync before dataplane.yaml was run. You can reset the exponential backoff by tracking down the vCPE Instance (Slices->mysite_vcpe->Instances, and find the Instance associated with the vCPE Tenant) and hitting the save button.
+
+Currently the vOLT switch is not forwarding ARP and so it is necessary to set up ARP mappings between the client
+and vCPE.  Log into the client and add an ARP entry for the vCPE: 
+```
+client:$ sudo arp -s 192.168.0.1 <mac-of-eth1-in-vCPE-container>
+```
+Inside the vCPE container add a similar entry for the client:
+```
+vcpe:$ arp -s 192.168.0.2 <mac-of-br-sub-on-client>
+```
+
+Now SSH into ONOS running the OLT app (see below) and activate the subscriber:
+```
+onos> add-subscriber-access of:0000000000000001 1 432
+```
+
+At this point you should be able to ping 192.168.0.1 from the client.  The final step is to set the 
+vCPE as the gateway on the client:
+```
+client:$ sudo route del default gw 10.11.10.5
+client:$ sudo route add default gw 192.168.0.1
+```
+The client should now be able to surf the Internet through the dataplane.
+
+## Setting up /etc/hosts
+
+To make it easy to log into the various VMs that make up the dataplane, add entries for them into `/etc/hosts` on the 
+*ctl* node.  As root, run:
+```
+ctl:~/xos/xos/configurations/cord/dataplane$ ./gen-etc-hosts.sh >> /etc/hosts
+```
+For example, to log into the client:
+```
+ctl:~$ ssh ubuntu@client
+```
+
 ## How to log into ONOS
 
-The ONOS Docker container runs in the VMs belonging to the *mysite_onos* slice.  All ports exposed by the ONOS container are forwarded to the outside, and can be accessed from the *ctl* node using the `flat-lan-1-net` address of the hosting VM.  For example, if the IP addresss of the VM is 10.11.10.30, then it is possible to SSH to ONOS as follows (password is *karaf*):
+ONOS apps are run inside Docker containers hosted in VMs.  All ports exposed by the ONOS container are forwarded to the 
+outside, and can be accessed from the *ctl* node over the `flat-lan-1-net` network.  Assuming that `/etc/hosts`
+has been configured as described above, it is possible to SSH to the ONOS running the `virtualbng` app as follows (password is *karaf*):
 
 ```
-$ ssh -p 8101 karaf@10.11.10.30
+$ ssh -p 8101 karaf@onos_vbng
 Password authentication
 Password:
 Welcome to Open Network Operating System (ONOS)!
@@ -95,3 +154,9 @@
    Private IP - Public IP
    10.0.1.3 - 10.254.0.129
 ```
+
+## Troubleshooting
+
+#### Problem: No external connectivity from vCPE container
+1. Make sure the hosts listed in `virtualbng.json` are the actual compute nodes used in your experiment.
+2. Try rebooting the ONOS container running the `virtualbng` app: `$ ssh ubuntu@onos-vbng "sudo docker restart ONOS"`
diff --git a/xos/configurations/cord/ceilometer_vcpe_notification_agent.tar.gz b/xos/configurations/cord/ceilometer_vcpe_notification_agent.tar.gz
deleted file mode 100644
index dcc6765..0000000
--- a/xos/configurations/cord/ceilometer_vcpe_notification_agent.tar.gz
+++ /dev/null
Binary files differ
diff --git a/xos/configurations/cord/cord.yaml b/xos/configurations/cord/cord.yaml
index e9a6291..9929a84 100644
--- a/xos/configurations/cord/cord.yaml
+++ b/xos/configurations/cord/cord.yaml
@@ -7,7 +7,6 @@
 
 topology_template:
   node_templates:
-
     # CORD Services
     service_volt:
       type: tosca.nodes.Service
@@ -15,6 +14,12 @@
           - vcpe_tenant:
               node: service_vcpe
               relationship: tosca.relationships.TenantOfService
+          - lan_network:
+              node: lan_network
+              relationship: tosca.relationships.UsesNetwork
+          - wan_network:
+              node: wan_network
+              relationship: tosca.relationships.UsesNetwork
       properties:
           view_url: /admin/cord/voltservice/$id$/
           kind: vOLT
@@ -29,6 +34,7 @@
           view_url: /admin/cord/vcpeservice/$id$/
           backend_network_label: hpc_client
           public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+          private_key_fn: /opt/xos/observers/vcpe/vcpe_private_key
       artifacts:
           pubkey: /opt/xos/observers/vcpe/vcpe_public_key
 
@@ -58,7 +64,7 @@
 # - (Synchronizer should copy the files to ONOS container immediately)
 # - Log into service_ONOS_vBNG VM and restart ONOS Docker container
 #   (Should roll this step into a Synchronizer)
-#
+#f
     vBNG_ONOS_app:
       type: tosca.nodes.ONOSvBNGApp
       requirements:
@@ -116,6 +122,7 @@
       artifacts:
           pubkey: /opt/xos/observers/onos/onos_key.pub
 
+
     vOLT_ONOS_app:
       type: tosca.nodes.ONOSvOLTApp
       requirements:
@@ -126,7 +133,22 @@
               node: service_volt
               relationship: tosca.relationships.UsedByService
       properties:
-          dependencies: org.onosproject.olt
+          dependencies: org.onosproject.openflow, org.onosproject.olt
+          config_network-cfg.json: >
+            {
+              "devices" : {
+                "of:0000000000000001" : {
+                  "accessDevice" : {
+                    "uplink" : "2",
+                    "vlan"   : "222",
+                    "defaultVlan" : "1"
+                  },
+                  "basic" : {
+                    "driver" : "default"
+                  }
+                }
+              }
+            }
 
     # Network templates
     Private:
@@ -225,6 +247,11 @@
           - site:
               node: mysite
               relationship: tosca.relationships.MemberOfSite
+          - vcpe_docker_image:
+              node: andybavier/docker-vcpe
+              relationship: tosca.relationships.UsesImage
+      properties:
+          default_isolation: container
 
     mysite_onos_vbng:
       description: ONOS Controller Slice for vBNG
@@ -387,6 +414,15 @@
                 node: mysite_clients
                 relationship: tosca.relationships.MemberOfSlice
 
+    # docker image for vcpe containers
+    andybavier/docker-vcpe:
+      # TODO: need to attach this to mydeployment
+      type: tosca.nodes.Image
+      properties:
+        kind: container
+        container_format: na
+        disk_format: na
+
     # A subscriber
     My House:
        type: tosca.nodes.CORDSubscriber
@@ -441,7 +477,8 @@
         type: tosca.nodes.VOLTTenant
         properties:
             service_specific_id: 123
-            vlan_id: 432
+            s_tag: 222
+            c_tag: 432
         requirements:
             - provider_service:
                 node: service_volt
diff --git a/xos/configurations/cord/dataplane/dataplane-bm.yaml b/xos/configurations/cord/dataplane/dataplane-bm.yaml
new file mode 100644
index 0000000..e1e78ee
--- /dev/null
+++ b/xos/configurations/cord/dataplane/dataplane-bm.yaml
@@ -0,0 +1,36 @@
+---
+- hosts: switch_volt
+  sudo: yes
+  tasks:
+  - name: Create tunnel port on br-lan
+    openvswitch_port:
+      bridge=br-lan
+      port={{ item }}
+      state=present
+    with_items: "grenames.split(' ')"
+
+  - name: Set up GRE tunnel to vCPE
+    shell: ovs-vsctl set Interface {{ item.0 }} type=gre options:remote_ip={{ item.1 }}
+    with_together:
+      - "grenames.split(' ')"
+      - "bm_ips.split(' ')"
+
+- hosts: baremetal
+
+  user: root
+  sudo: no
+  tasks:
+  - name: Create br-lan
+    openvswitch_bridge:
+      bridge=br-lan
+      state=present
+
+  - name: Create tunnel port
+    openvswitch_port:
+      bridge=br-lan
+      port={{ grename }}
+      state=present
+
+  - name: Configure GRE tunnel to vOLT switch
+    shell: ovs-vsctl set Interface {{ grename }} type=gre options:remote_ip={{ volt_addr }}
+
diff --git a/xos/configurations/cord/dataplane/dataplane.yaml b/xos/configurations/cord/dataplane/dataplane.yaml
index 4799515..f43e4d7 100644
--- a/xos/configurations/cord/dataplane/dataplane.yaml
+++ b/xos/configurations/cord/dataplane/dataplane.yaml
@@ -47,6 +47,9 @@
       port={{ public_net.stdout }}
       state=present
 
+  - name: Remove IP address on public_network
+    command: /sbin/ifconfig {{ public_net.stdout }} 0.0.0.0
+
   - name: Change datapath ID of bridge to match config file
     command: /usr/bin/ovs-vsctl set bridge br-vbng other-config:datapath-id={{ ovs_dpid }}
 
@@ -58,6 +61,7 @@
   vars:
     controller_ip: "{{ hostvars['onos_volt']['ansible_ssh_host'] }}"
     controller_port: 6653
+    vcpe_lan_ip: "{{ hostvars['vcpe']['lan_ip'] }}"
   tags:
   - volt
   tasks:
@@ -73,6 +77,7 @@
     with_items:
     - git
     - python-netifaces
+    - openvswitch-switch
 
   - name: Checkout the Mininet repo
     git: repo=https://github.com/mininet/mininet.git
@@ -87,14 +92,156 @@
     script: scripts/if_from_ip.py {{ subscriber_ip }}
     register: subscriber_net
 
-  - name: Find lan_network interface
-    script: scripts/if_from_ip.py {{ lan_ip }}
-    register: lan_net
+  - name: Create bridge br-sub
+    openvswitch_bridge:
+      bridge=br-sub
+      state=present
+
+  - name: Add subscriber_net to br-sub
+    openvswitch_port:
+      bridge=br-sub
+      port={{ subscriber_net.stdout }}
+      state=present
+
+  # The CPqD switch is expecting that packets coming from the client have
+  # VLAN tag 1.  However Neutron's OvS configuration eats VLAN-tagged packets.
+  # So tag them with VLAN 1 here before sending to CPqD.
+  #
+  # Note that the VLAN tag is 0 in the real-world setup, but the CPqD switch
+  # seems to have a problem with these packets.
+
+  # Using OvS to tag packets with VLAN ID 1 is not quite working for some reason.
+  # The packets from the client get tagged OK, but only the first packet from the
+  # VCPE gets its tag stripped off.  Very weird.  That's why we are using veth
+  # devices instead.
+  #- name: Add tag 1 to br-sub port
+  #  shell: ovs-vsctl set port {{ subscriber_net.stdout }} tag=1
+
+  - name: Create a pair of veth devices
+    shell: ifconfig veth0 >> /dev/null || ip link add veth0 type veth peer name veth1
+
+  - name: Create veth0.1
+    shell: ifconfig veth0.1 >> /dev/null || ip link add link veth0 name veth0.1 type vlan id 1
+
+  - name: Bring the interfaces up
+    shell: ip link set {{ item }} up
+    with_items:
+    - veth0
+    - veth1
+    - veth0.1
+
+  - name: Add veth0.1 to br-sub
+    openvswitch_port:
+      bridge=br-sub
+      port=veth0.1
+      state=present
+
+  - name: Create bridge br-lan
+    openvswitch_bridge:
+      bridge=br-lan
+      state=present
+
+  - name: Create tunnel port on br-lan
+    openvswitch_port:
+      bridge=br-lan
+      port=gre0
+      state=present
+
+  - name: Set up GRE tunnel to vCPE
+    shell: ovs-vsctl set Interface gre0 type=gre options:remote_ip={{ vcpe_lan_ip }}
+
+  - name: Check if br-lan has an IPv6 address
+    shell: ip addr show br-lan|grep inet6|awk '{print $2}'
+    register: ipv6
+
+  - name: Remove br-lan IPv6 address if present
+    shell: ifconfig br-lan inet6 del {{ ipv6.stdout }}
+    when: ipv6.stdout != ""
 
   - name: Run the datapath
-    command: /usr/local/bin/ofdatapath -i {{ subscriber_net.stdout_lines[0] }},{{ lan_net.stdout_lines[0] }} punix:/tmp/s1 -d 000000000001 --no-slicing -D -P
+    command: /usr/local/bin/ofdatapath -i veth1,br-lan punix:/tmp/s1 -d 000000000001 --no-slicing -D -P
       creates=/usr/local/var/run/ofdatapath.pid
 
   - name: Run the control program
     command: /usr/local/bin/ofprotocol unix:/tmp/s1 tcp:{{ controller_ip }}:{{ controller_port }} --fail=closed --listen=punix:/tmp/s1.listen -D -P
       creates=/usr/local/var/run/ofprotocol.pid
+
+- hosts: client
+  sudo: yes
+  tags:
+  - client
+  tasks:
+
+  - name: Fix /etc/hosts
+    lineinfile:
+      dest=/etc/hosts
+      regexp="127.0.0.1 localhost"
+      line="127.0.0.1 localhost {{ ansible_hostname }}"
+
+  - name: Install packages
+    apt: name={{ item }}
+      state=latest
+      update_cache=yes
+    with_items:
+    - openvswitch-switch
+    - python-netifaces
+
+  - name: Create br-sub
+    openvswitch_bridge:
+      bridge=br-sub
+      state=present
+
+  - name: Find subscriber_network interface
+    script: scripts/if_from_ip.py {{ subscriber_ip }}
+    register: client_net
+
+  - name: Hook up subscriber-network to OvS
+    openvswitch_port:
+      bridge=br-sub
+      port={{ client_net.stdout }}
+      state=present
+
+  - name: Run some commands on br-sub
+    shell: "{{ item }}"
+    with_items:
+    - ifconfig br-sub 192.168.0.2 mtu 1400 up
+    - ethtool -K br-sub tso off
+    - ethtool -K br-sub tx off
+
+  # Run dhclient on br-sub internal interface to issue DHCP request to vCPE
+
+#
+# Need to set up a tunnel between vCPE and vOLT to keep VLAN-tagged
+# packets from being swallowed by the network.
+#
+- hosts: vcpe
+  sudo: yes
+  vars:
+    volt_lan_ip: "{{ hostvars['switch_volt']['lan_ip'] }}"
+  tags:
+  - vcpe
+  tasks:
+
+  - name: Install packages
+    apt: name={{ item }}
+      state=latest
+      update_cache=yes
+    with_items:
+    - openvswitch-switch
+
+  - name: Create br-lan
+    openvswitch_bridge:
+      bridge=br-lan
+      state=present
+
+  - name: Create tunnel port
+    openvswitch_port:
+      bridge=br-lan
+      port=gre0
+      state=present
+
+  - name: Configure GRE tunnel to vOLT switch
+    shell: ovs-vsctl set Interface gre0 type=gre options:remote_ip={{ volt_lan_ip }}
+
+  - name: Restart vCPEs
+    script: scripts/restart-vcpes.sh
diff --git a/xos/configurations/cord/dataplane/gen-etc-hosts.sh b/xos/configurations/cord/dataplane/gen-etc-hosts.sh
new file mode 100755
index 0000000..ce98731
--- /dev/null
+++ b/xos/configurations/cord/dataplane/gen-etc-hosts.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# set -x
+
+source ../../common/admin-openrc.sh
+
+get_ip () {
+    LABEL=$1
+    NETWORK=$2
+    nova list --all-tenants|grep $LABEL|sed "s/^.*$NETWORK=//g"|sed 's/; .*$//g'|awk '{print $1}'
+}
+
+cat <<EOF
+$( get_ip mysite_onos_vbng flat-lan-1-net) onos_vbng
+$( get_ip mysite_vbng flat-lan-1-net) switch_vbng
+$( get_ip mysite_onos_volt flat-lan-1-net) onos_volt
+$( get_ip mysite_volt flat-lan-1-net) switch_volt
+$( get_ip mysite_clients flat-lan-1-net) client
+$( get_ip mysite_vcpe flat-lan-1-net) vcpe
+EOF
diff --git a/xos/configurations/cord/dataplane/generate.sh b/xos/configurations/cord/dataplane/gen-inventory.sh
similarity index 65%
rename from xos/configurations/cord/dataplane/generate.sh
rename to xos/configurations/cord/dataplane/gen-inventory.sh
index 360ed67..590376d 100755
--- a/xos/configurations/cord/dataplane/generate.sh
+++ b/xos/configurations/cord/dataplane/gen-inventory.sh
@@ -1,7 +1,7 @@
 #!/bin/bash
 # set -x
 
-source ~/admin-openrc.sh
+source ../../common/admin-openrc.sh
 
 get_ip () {
     LABEL=$1
@@ -14,5 +14,8 @@
 switch_vbng  ansible_ssh_host=$( get_ip mysite_vbng flat-lan-1-net) wan_ip=$( get_ip mysite_vbng wan_network) public_ip=$( get_ip mysite_vbng tun0-net )
 
 onos_volt    ansible_ssh_host=$( get_ip mysite_onos_volt flat-lan-1-net)
-switch_volt  ansible_ssh_host=$( get_ip mysite_volt flat-lan-1-net) subscriber_ip=$( get_ip mysite_volt subscriber_network) lan_ip=$( get_ip mysite_volt lan_network) vcpe_lan_ip=$( get_ip mysite_vcpe lan_network)
+switch_volt  ansible_ssh_host=$( get_ip mysite_volt flat-lan-1-net) subscriber_ip=$( get_ip mysite_volt subscriber_network) lan_ip=$( get_ip mysite_volt lan_network)
+
+client       ansible_ssh_host=$( get_ip mysite_clients flat-lan-1-net) subscriber_ip=$( get_ip mysite_clients subscriber_network)
+vcpe         ansible_ssh_host=$( get_ip mysite_vcpe flat-lan-1-net) lan_ip=$( get_ip mysite_vcpe lan_network)
 EOF
diff --git a/xos/configurations/cord/dataplane/generate-bm.sh b/xos/configurations/cord/dataplane/generate-bm.sh
new file mode 100755
index 0000000..25f6fa5
--- /dev/null
+++ b/xos/configurations/cord/dataplane/generate-bm.sh
@@ -0,0 +1,37 @@
+source ../../common/admin-openrc.sh
+
+get_ip () {
+    LABEL=$1
+    NETWORK=$2
+    nova list --all-tenants|grep $LABEL|sed "s/^.*$NETWORK=//g"|sed 's/; .*$//g'|awk '{print $1}'
+    }
+
+GRENAMES=()
+BM_IPS=()
+
+NODES=`sudo bash -c "source /root/setup/admin-openrc.sh ; nova hypervisor-list" |grep cloudlab|awk '{print $4}'`
+I=1
+for NODE in $NODES; do
+    BM_SSH_IP=`getent hosts $NODE | awk '{ print $1 }'`
+    IFS=. read BM_NAME BM_REMAINDER <<< $NODE
+    BM_IP=`sudo grep -i $BM_NAME /root/setup/data-hosts.flat-lan-1 | awk '{print $1}'`
+
+    GRE_NAMES+=("gre-bm-$I")
+    BM_IPS+=("$BM_IP")
+
+    #echo switch_volt$I    ansible_ssh_host=$( get_ip mysite_volt flat-lan-1-net) grename=gre-bm-$I bm_addr=$BM_IP
+    echo bm$I           ansible_ssh_host=$BM_SSH_IP grename=gre-bm-$I volt_addr=$( get_ip mysite_volt flat-lan-1-net)  ansible_ssh_private_key_file=/root/.ssh/id_rsa
+    I=$(( I+1 ))
+done
+
+GRE_NAMES=${GRE_NAMES[@]}
+BM_IPS=${BM_IPS[@]}
+
+echo switch_volt ansible_ssh_host=$( get_ip mysite_volt flat-lan-1-net) grenames=\"$GRE_NAMES\" bm_ips=\"$BM_IPS\"
+
+echo "[baremetal]"
+I=1
+for NODE in $NODES; do
+    echo bm$I
+    I=$((I+1))
+done
diff --git a/xos/configurations/cord/dataplane/scripts/if_from_ip.py b/xos/configurations/cord/dataplane/scripts/if_from_ip.py
index be1da48..28524fe 100644
--- a/xos/configurations/cord/dataplane/scripts/if_from_ip.py
+++ b/xos/configurations/cord/dataplane/scripts/if_from_ip.py
@@ -8,7 +8,7 @@
     for iface in netifaces.interfaces():
         addrs = netifaces.ifaddresses(iface)
         if 2 in addrs and addrs[2][0]['addr'] == addr:
-            print iface
-    
+            sys.stdout.write(iface)
+
 if __name__ == "__main__":
     main(sys.argv[1:])
diff --git a/xos/configurations/cord/dataplane/scripts/restart-vcpes.sh b/xos/configurations/cord/dataplane/scripts/restart-vcpes.sh
new file mode 100644
index 0000000..d1c9fce
--- /dev/null
+++ b/xos/configurations/cord/dataplane/scripts/restart-vcpes.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+for VCPE in $( docker ps|grep vcpe|awk '{print $NF}' )
+do
+  service $VCPE stop
+  sleep 1
+  service $VCPE start
+done
diff --git a/xos/configurations/cord/install_ceilometer_patch.sh b/xos/configurations/cord/install_ceilometer_patch.sh
new file mode 100755
index 0000000..279e07c
--- /dev/null
+++ b/xos/configurations/cord/install_ceilometer_patch.sh
@@ -0,0 +1,20 @@
+if [ -d /usr/lib/python2.7/dist-packages/ceilometer/network/ext_services ]; then
+    echo "Seems VCPE notification listeners are already enabled in ceilometer... so exiting gracefully..."
+    exit 0
+fi
+echo "Verifying if all the required files are present"
+if [ ! -f openstack_ceilometer_patch.tar.gz ];
+then
+    echo "File openstack_ceilometer_patch.tar.gz not found"
+    exit 1
+fi
+echo "Copying the ceilometer patch files to /usr/lib/python2.7/dist-packages/ceilometer"
+tar -xzf openstack_ceilometer_patch.tar.gz
+sudo mv ceilometer/network/ext_services /usr/lib/python2.7/dist-packages/ceilometer/network/
+sudo mv ceilometer/network/statistics /usr/lib/python2.7/dist-packages/ceilometer/network/
+sudo mv ceilometer-2015.1.1.egg-info/entry_points.txt /usr/lib/python2.7/dist-packages/ceilometer-*egg-info/
+sudo mv pipeline.yaml /etc/ceilometer/
+echo "Restarting ceilometer-agent-notification"
+sudo service ceilometer-agent-notification restart
+echo "Restarting ceilometer-agent-central"
+sudo service ceilometer-agent-central restart
diff --git a/xos/configurations/cord/install_ceilometer_vcpe_notification_listener.sh b/xos/configurations/cord/install_ceilometer_vcpe_notification_listener.sh
deleted file mode 100755
index 50a4132..0000000
--- a/xos/configurations/cord/install_ceilometer_vcpe_notification_listener.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-if [ -d /usr/lib/python2.7/dist-packages/ceilometer/network/ext_services ]; then
-    echo "Seems VCPE notification listeners are already enabled in ceilometer... so exiting gracefully..."
-    exit 0
-fi
-echo "Verifying if all the required files are present"
-if [ ! -f ceilometer_vcpe_notification_agent.tar.gz ];
-then
-    echo "File ceilometer_vcpe_notification_agent.tar.gz not found"
-    exit 1
-fi
-echo "Copying the ceilometer vcpe notification agent files /usr/lib/python2.7/dist-packages/ceilometer"
-tar -xzf ceilometer_vcpe_notification_agent.tar.gz
-sudo mv ceilometer/network/ext_services /usr/lib/python2.7/dist-packages/ceilometer/network/
-sudo mv ceilometer-2015.1.1.egg-info/entry_points.txt /usr/lib/python2.7/dist-packages/ceilometer-2015.1.1.egg-info/
-echo "Restarting ceilometer-agent-notification"
-sudo service ceilometer-agent-notification restart
diff --git a/xos/configurations/cord/openstack_ceilometer_patch.tar.gz b/xos/configurations/cord/openstack_ceilometer_patch.tar.gz
new file mode 100644
index 0000000..dc1852a
--- /dev/null
+++ b/xos/configurations/cord/openstack_ceilometer_patch.tar.gz
Binary files differ
diff --git a/xos/configurations/cord/xos_metering_dashboard.tar.gz b/xos/configurations/cord/xos_metering_dashboard.tar.gz
index 648f91a..3b4d127 100644
--- a/xos/configurations/cord/xos_metering_dashboard.tar.gz
+++ b/xos/configurations/cord/xos_metering_dashboard.tar.gz
Binary files differ
diff --git a/xos/cord/admin.py b/xos/cord/admin.py
index 6137212..686d8ae 100644
--- a/xos/cord/admin.py
+++ b/xos/cord/admin.py
@@ -51,7 +51,8 @@
         return VOLTService.get_service_objects_by_user(request.user)
 
 class VOLTTenantForm(forms.ModelForm):
-    vlan_id = forms.CharField()
+    s_tag = forms.CharField()
+    c_tag = forms.CharField()
     creator = forms.ModelChoiceField(queryset=User.objects.all())
 
     def __init__(self,*args,**kwargs):

@@ -60,7 +61,8 @@
         self.fields['provider_service'].queryset = VOLTService.get_service_objects().all()

         if self.instance:

             # fields for the attributes

-            self.fields['vlan_id'].initial = self.instance.vlan_id

+            self.fields['c_tag'].initial = self.instance.c_tag

+            self.fields['s_tag'].initial = self.instance.s_tag

             self.fields['creator'].initial = self.instance.creator

         if (not self.instance) or (not self.instance.pk):

             # default fields for an 'add' form

@@ -70,7 +72,8 @@
                self.fields["provider_service"].initial = VOLTService.get_service_objects().all()[0]

 

     def save(self, commit=True):

-        self.instance.vlan_id = self.cleaned_data.get("vlan_id")

+        self.instance.s_tag = self.cleaned_data.get("s_tag")

+        self.instance.c_tag = self.cleaned_data.get("c_tag")

         self.instance.creator = self.cleaned_data.get("creator")

         return super(VOLTTenantForm, self).save(commit=commit)

 

@@ -78,10 +81,10 @@
         model = VOLTTenant
 
 class VOLTTenantAdmin(ReadOnlyAwareAdmin):
-    list_display = ('backend_status_icon', 'id', 'service_specific_id', 'vlan_id', 'subscriber_root' )
+    list_display = ('backend_status_icon', 'id', 'service_specific_id', 's_tag', 'c_tag', 'subscriber_root' )
     list_display_links = ('backend_status_icon', 'id')
     fieldsets = [ (None, {'fields': ['backend_status_text', 'kind', 'provider_service', 'subscriber_root', 'service_specific_id', # 'service_specific_attribute',
-                                     'vlan_id', 'creator'],
+                                     's_tag', 'c_tag', 'creator'],
                           'classes':['suit-tab suit-tab-general']})]
     readonly_fields = ('backend_status_text', 'service_specific_attribute')
     form = VOLTTenantForm
diff --git a/xos/cord/models.py b/xos/cord/models.py
index 67ffdc7..daac40c 100644
--- a/xos/cord/models.py
+++ b/xos/cord/models.py
@@ -1,5 +1,5 @@
 from django.db import models
-from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber
+from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber, NetworkParameter, NetworkParameterType, Port
 from core.models.plcorebase import StrippedCharField
 import os
 from django.db import models, transaction
@@ -254,7 +254,7 @@
 
     KIND = VOLT_KIND
 
-    default_attributes = {"vlan_id": None, }
+    default_attributes = {"vlan_id": None, "s_tag": None, "c_tag": None}
     def __init__(self, *args, **kwargs):
         volt_services = VOLTService.get_service_objects().all()
         if volt_services:
@@ -263,12 +263,30 @@
         self.cached_vcpe = None
 
     @property
+    def s_tag(self):
+        return self.get_attribute("s_tag", self.default_attributes["s_tag"])
+
+    @s_tag.setter
+    def s_tag(self, value):
+        self.set_attribute("s_tag", value)
+
+    @property
+    def c_tag(self):
+        return self.get_attribute("c_tag", self.default_attributes["c_tag"])
+
+    @c_tag.setter
+    def c_tag(self, value):
+        self.set_attribute("c_tag", value)
+
+    # for now, vlan_id is a synonym for c_tag
+
+    @property
     def vlan_id(self):
-        return self.get_attribute("vlan_id", self.default_attributes["vlan_id"])
+        return self.c_tag
 
     @vlan_id.setter
     def vlan_id(self, value):
-        self.set_attribute("vlan_id", value)
+        self.c_tag = value
 
     @property
     def vcpe(self):
@@ -470,6 +488,7 @@
                        "hpc_client_ip", "hpc_client_mac")
 
     default_attributes = {"instance_id": None,
+                          "container_id": None,
                           "users": [],
                           "bbs_account": None,
                           "last_ansible_hash": None}
@@ -534,11 +553,15 @@
 
     @property
     def addresses(self):
-        if not self.instance:
+        if self.instance:
+            ports = self.instance.ports.all()
+        elif self.container:
+            ports = self.container.ports.all()
+        else:
             return {}
 
         addresses = {}
-        for ns in self.instance.ports.all():
+        for ns in ports:
             if "lan" in ns.network.name.lower():
                 addresses["lan"] = (ns.ip, ns.mac)
             elif "wan" in ns.network.name.lower():
@@ -655,6 +678,37 @@
                 self.bbs_account = None
                 super(VCPETenant, self).save()
 
+    def find_or_make_port(self, instance, network, **kwargs):
+        port = Port.objects.filter(instance=instance, network=network)
+        if port:
+            port = port[0]
+        else:
+            port = Port(instance=instance, network=network, **kwargs)
+            port.save()
+        return port
+
+    def save_instance(self, instance):
+        with transaction.atomic():
+            instance.volumes = "/etc/dnsmasq.d"
+            super(VCPETenant, self).save_instance(instance)
+
+            if instance.isolation in ["container", "container_vm"]:
+                lan_networks = [x for x in instance.slice.networks.all() if "lan" in x.name]
+                if not lan_networks:
+                    raise XOSProgrammingError("No lan_network")
+                port = self.find_or_make_port(instance, lan_networks[0], ip="192.168.0.1", port_id="unmanaged")
+                port.set_parameter("c_tag", self.volt.c_tag)
+                port.set_parameter("s_tag", self.volt.s_tag)
+                port.set_parameter("device", "eth1")
+                port.set_parameter("bridge", "br-lan")
+
+                wan_networks = [x for x in instance.slice.networks.all() if "wan" in x.name]
+                if not wan_networks:
+                    raise XOSProgrammingError("No wan_network")
+                port = self.find_or_make_port(instance, wan_networks[0])
+                port.set_parameter("next_hop", value="10.0.1.253")   # FIX ME
+                port.set_parameter("device", "eth0")
+
     def save(self, *args, **kwargs):
         if not self.creator:
             if not getattr(self, "caller", None):
diff --git a/xos/cord/rest_examples/add_volt_tenant.sh b/xos/cord/rest_examples/add_volt_tenant.sh
index 5dd3dd4..4bbe2bb 100755
--- a/xos/cord/rest_examples/add_volt_tenant.sh
+++ b/xos/cord/rest_examples/add_volt_tenant.sh
@@ -3,8 +3,9 @@
 source ./config.sh
 
 SERVICE_SPECIFIC_ID=1238
-VLAN_ID=1238
+C_TAG=1238
+S_TAG=3333
 
-echo curl "-H \"Accept: application/json; indent=4\" -H \"Content-Type: application/json\" -u $AUTH -X POST -d \"{\\\"service_specific_id\\\": \\\"$SERVICE_SPECIFIC_ID\\\", \\\"vlan_id\\\": \\\"$VLAN_ID\\\"}\" $HOST/xoslib/volttenant/"
+echo curl "-H \"Accept: application/json; indent=4\" -H \"Content-Type: application/json\" -u $AUTH -X POST -d \"{\\\"service_specific_id\\\": \\\"$SERVICE_SPECIFIC_ID\\\", \\\"c_tag\\\": \\\"$C_TAG\\\", \\\"s_tag\\\": \\\"$S_TAG\\\"}\" $HOST/xoslib/volttenant/"
 
-curl -H "Accept: application/json; indent=4" -H "Content-Type: application/json" -u $AUTH -X POST -d "{\"service_specific_id\": \"$SERVICE_SPECIFIC_ID\", \"vlan_id\": \"$VLAN_ID\"}" $HOST/xoslib/volttenant/  
+curl -H "Accept: application/json; indent=4" -H "Content-Type: application/json" -u $AUTH -X POST -d "{\"service_specific_id\": \"$SERVICE_SPECIFIC_ID\", \"c_tag\": \"$C_TAG\", \"s_tag\": \"$S_TAG\"}" $HOST/xoslib/volttenant/  
diff --git a/xos/cord/rest_examples/config.sh b/xos/cord/rest_examples/config.sh
index 7b8c8e1..06162ee 100644
--- a/xos/cord/rest_examples/config.sh
+++ b/xos/cord/rest_examples/config.sh
@@ -1,5 +1,6 @@
 #HOST=198.0.0.44:8000
-HOST=10.254.1.22:8000
+#HOST=10.254.1.22:8000
+HOST=clnode050.clemson.cloudlab.us:9999
 
 #AUTH=scott@onlab.us:letmein
 AUTH=padmin@vicci.org:letmein
diff --git a/xos/core/admin.py b/xos/core/admin.py
index a0cabd1..be9dcc0 100644
--- a/xos/core/admin.py
+++ b/xos/core/admin.py
@@ -23,7 +23,6 @@
 from cgi import escape as html_escape
 from django.contrib import messages
 
-import django_evolution
 import threading
 
 # thread locals necessary to work around a django-suit issue
@@ -106,6 +105,10 @@
             # this 'if' might be redundant if save_by_user is implemented right
             raise PermissionDenied
 
+        # reset exponential backoff
+        if hasattr(obj, "backend_register"):
+            obj.backend_register = "{}"
+
         obj.caller = request.user
         # update openstack connection to use this site/tenant
         obj.save_by_user(request.user)
@@ -880,7 +883,7 @@
 class ServiceAdmin(XOSBaseAdmin):
     list_display = ("backend_status_icon","name","kind","versionNumber","enabled","published")
     list_display_links = ('backend_status_icon', 'name', )
-    fieldList = ["backend_status_text","name","kind","description","versionNumber","enabled","published","view_url","icon_url","public_key","service_specific_attribute","service_specific_id"]
+    fieldList = ["backend_status_text","name","kind","description","versionNumber","enabled","published","view_url","icon_url","public_key","private_key_fn","service_specific_attribute","service_specific_id"]
     fieldsets = [(None, {'fields': fieldList, 'classes':['suit-tab suit-tab-general']})]
     inlines = [ServiceAttrAsTabInline,SliceInline,ProviderTenantInline,SubscriberTenantInline,ServicePrivilegeInline]
     readonly_fields = ('backend_status_text', )
@@ -1051,7 +1054,7 @@
 
 class SliceAdmin(XOSBaseAdmin):
     form = SliceForm
-    fieldList = ['backend_status_text', 'site', 'name', 'serviceClass', 'enabled','description', 'service', 'slice_url', 'max_instances']
+    fieldList = ['backend_status_text', 'site', 'name', 'serviceClass', 'enabled','description', 'service', 'slice_url', 'max_instances', "default_isolation"]
     fieldsets = [('Slice Details', {'fields': fieldList, 'classes':['suit-tab suit-tab-general']}),]
     readonly_fields = ('backend_status_text', )
     list_display = ('backend_status_icon', 'name', 'site','serviceClass', 'slice_url', 'max_instances')
@@ -1203,7 +1206,7 @@
 class ImageAdmin(XOSBaseAdmin):
 
     fieldsets = [('Image Details',
-                   {'fields': ['backend_status_text', 'name', 'disk_format', 'container_format'],
+                   {'fields': ['backend_status_text', 'name', 'kind', 'disk_format', 'container_format'],
                     'classes': ['suit-tab suit-tab-general']})
                ]
     readonly_fields = ('backend_status_text', )
@@ -1214,7 +1217,7 @@
 
     user_readonly_fields = ['name', 'disk_format', 'container_format']
 
-    list_display = ['backend_status_icon', 'name']
+    list_display = ['backend_status_icon', 'name', 'kind']
     list_display_links = ('backend_status_icon', 'name', )
 
 class NodeForm(forms.ModelForm):
@@ -1273,7 +1276,7 @@
     fields = ['backend_status_icon', 'network', 'instance', 'ip', 'mac']
     readonly_fields = ("backend_status_icon", "ip", "mac")
     model = Port
-    selflink_fieldname = "network"
+    #selflink_fieldname = "network"
     extra = 0
     verbose_name_plural = "Ports"
     verbose_name = "Port"
@@ -1282,13 +1285,14 @@
 class InstanceAdmin(XOSBaseAdmin):
     form = InstanceForm
     fieldsets = [
-        ('Instance Details', {'fields': ['backend_status_text', 'slice', 'deployment', 'flavor', 'image', 'node', 'all_ips_string', 'instance_id', 'instance_name', 'ssh_command'], 'classes': ['suit-tab suit-tab-general'], })
+        ('Instance Details', {'fields': ['backend_status_text', 'slice', 'deployment', 'isolation', 'flavor', 'image', 'node', 'parent', 'all_ips_string', 'instance_id', 'instance_name', 'ssh_command', ], 'classes': ['suit-tab suit-tab-general'], }),
+        ('Container Settings', {'fields': ['volumes'], 'classes': ['suit-tab suit-tab-container'], }),
     ]
     readonly_fields = ('backend_status_text', 'ssh_command', 'all_ips_string')
-    list_display = ['backend_status_icon', 'all_ips_string', 'instance_id', 'instance_name', 'slice', 'flavor', 'image', 'node', 'deployment']
+    list_display = ['backend_status_icon', 'all_ips_string', 'instance_id', 'instance_name', 'isolation', 'slice', 'flavor', 'image', 'node', 'deployment']
     list_display_links = ('backend_status_icon', 'all_ips_string', 'instance_id', )
 
-    suit_form_tabs =(('general', 'Instance Details'), ('ports', 'Ports'))
+    suit_form_tabs =(('general', 'Instance Details'), ('ports', 'Ports'), ('container', 'Container Settings'))
 
     inlines = [TagInline, InstancePortInline]
 
@@ -1372,38 +1376,38 @@
     #    obj.os_manager = OpenStackManager(auth=auth, caller=request.user)
     #    obj.delete()
 
-class ContainerPortInline(XOSTabularInline):
-    fields = ['backend_status_icon', 'network', 'container', 'ip', 'mac', 'segmentation_id']
-    readonly_fields = ("backend_status_icon", "ip", "mac", "segmentation_id")
-    model = Port
-    selflink_fieldname = "network"
-    extra = 0
-    verbose_name_plural = "Ports"
-    verbose_name = "Port"
-    suit_classes = 'suit-tab suit-tab-ports'
+#class ContainerPortInline(XOSTabularInline):
+#    fields = ['backend_status_icon', 'network', 'container', 'ip', 'mac', 'segmentation_id']
+#    readonly_fields = ("backend_status_icon", "ip", "mac", "segmentation_id")
+#    model = Port
+#    selflink_fieldname = "network"
+#    extra = 0
+#    verbose_name_plural = "Ports"
+#    verbose_name = "Port"
+#    suit_classes = 'suit-tab suit-tab-ports'
 
-class ContainerAdmin(XOSBaseAdmin):
-    fieldsets = [
-        ('Container Details', {'fields': ['backend_status_text', 'slice', 'node', 'docker_image', 'no_sync'], 'classes': ['suit-tab suit-tab-general'], })
-    ]
-    readonly_fields = ('backend_status_text', )
-    list_display = ['backend_status_icon', 'id']
-    list_display_links = ('backend_status_icon', 'id', )
-
-    suit_form_tabs =(('general', 'Container Details'), ('ports', 'Ports'))
-
-    inlines = [TagInline, ContainerPortInline]
-
-    def formfield_for_foreignkey(self, db_field, request, **kwargs):
-        if db_field.name == 'slice':
-            kwargs['queryset'] = Slice.select_by_user(request.user)
-
-        return super(ContainerAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
-
-    def queryset(self, request):
-        # admins can see all instances. Users can only see instances of
-        # the slices they belong to.
-        return Container.select_by_user(request.user)
+#class ContainerAdmin(XOSBaseAdmin):
+#    fieldsets = [
+#        ('Container Details', {'fields': ['backend_status_text', 'slice', 'node', 'docker_image', 'volumes', 'no_sync'], 'classes': ['suit-tab suit-tab-general'], })
+#    ]
+#    readonly_fields = ('backend_status_text', )
+#    list_display = ['backend_status_icon', 'id']
+#    list_display_links = ('backend_status_icon', 'id', )
+#
+#    suit_form_tabs =(('general', 'Container Details'), ('ports', 'Ports'))
+#
+#    inlines = [TagInline, ContainerPortInline]
+#
+#    def formfield_for_foreignkey(self, db_field, request, **kwargs):
+#        if db_field.name == 'slice':
+#            kwargs['queryset'] = Slice.select_by_user(request.user)
+#
+#        return super(ContainerAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
+#
+#    def queryset(self, request):
+#        # admins can see all instances. Users can only see instances of
+#        # the slices they belong to.
+#        return Container.select_by_user(request.user)
 
 class UserCreationForm(forms.ModelForm):
     """A form for creating new users. Includes all the required
@@ -1760,10 +1764,10 @@
     readonly_fields = ('backend_status_icon', )
 
 class NetworkPortInline(XOSTabularInline):
-    fields = ['backend_status_icon', 'network', 'instance', 'container', 'ip', 'mac']
+    fields = ['backend_status_icon', 'network', 'instance', 'ip', 'mac']
     readonly_fields = ("backend_status_icon", "ip", "mac")
     model = Port
-    selflink_fieldname = "instance"
+    #selflink_fieldname = "instance"
     extra = 0
     verbose_name_plural = "Ports"
     verbose_name = "Port"
@@ -1842,10 +1846,25 @@
     list_display_links = ('backend_status_icon', 'name', )
     user_readonly_fields = ["name", "guaranteed_bandwidth", "visibility"]
     user_readonly_inlines = []
+    inlines = [NetworkParameterInline,]
     fieldsets = [
         (None, {'fields': ['name', 'description', 'guaranteed_bandwidth', 'visibility', 'translation', 'shared_network_name', 'shared_network_id', 'topology_kind', 'controller_kind'],
                 'classes':['suit-tab suit-tab-general']}),]
-    suit_form_tabs = (('general','Network Template Details'), )
+    suit_form_tabs = (('general','Network Template Details'), ('netparams', 'Parameters') )
+
+class PortAdmin(XOSBaseAdmin):
+    list_display = ("backend_status_icon", "name", "id", "ip")
+    list_display_links = ('backend_status_icon', 'id')
+    readonly_fields = ("subnet", )
+    inlines = [NetworkParameterInline]
+
+    fieldsets = [
+        (None, {'fields': ['backend_status_text', 'network', 'instance', 'ip', 'port_id', 'mac'],
+                'classes':['suit-tab suit-tab-general']}),
+                ]
+
+    readonly_fields = ('backend_status_text', )
+    suit_form_tabs = (('general', 'Port Details'), ('netparams', 'Parameters'))
 
 class FlavorAdmin(XOSBaseAdmin):
     list_display = ("backend_status_icon", "name", "flavor", "order", "default")
@@ -2017,12 +2036,6 @@
 # unregister the Group model from admin.
 #admin.site.unregister(Group)
 
-#Do not show django evolution in the admin interface
-from django_evolution.models import Version, Evolution
-#admin.site.unregister(Version)
-#admin.site.unregister(Evolution)
-
-
 # When debugging it is often easier to see all the classes, but for regular use 
 # only the top-levels should be displayed
 showAll = False
@@ -2034,6 +2047,7 @@
 admin.site.register(Service, ServiceAdmin)
 #admin.site.register(Reservation, ReservationAdmin)
 admin.site.register(Network, NetworkAdmin)
+admin.site.register(Port, PortAdmin)
 admin.site.register(Router, RouterAdmin)
 admin.site.register(NetworkTemplate, NetworkTemplateAdmin)
 admin.site.register(Program, ProgramAdmin)
@@ -2057,5 +2071,5 @@
     admin.site.register(TenantRoot, TenantRootAdmin)
     admin.site.register(TenantRootRole, TenantRootRoleAdmin)
     admin.site.register(TenantAttribute, TenantAttributeAdmin)
-    admin.site.register(Container, ContainerAdmin)
+#    admin.site.register(Container, ContainerAdmin)
 
diff --git a/xos/core/models/__init__.py b/xos/core/models/__init__.py
index bc97dab..c380e9c 100644
--- a/xos/core/models/__init__.py
+++ b/xos/core/models/__init__.py
@@ -24,7 +24,6 @@
 from .node import Node
 from .slicetag import SliceTag
 from .instance import Instance
-from .container import Container
 from .reservation import ReservedResource
 from .reservation import Reservation
 from .network import Network, NetworkParameterType, NetworkParameter, Port, NetworkTemplate, Router, NetworkSlice, ControllerNetwork
diff --git a/xos/core/models/container.py b/xos/core/models/container.py
deleted file mode 100644
index 151b576..0000000
--- a/xos/core/models/container.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import os
-from django.db import models
-from django.db.models import Q
-from django.core import exceptions
-from core.models import PlCoreBase,PlCoreBaseManager,PlCoreBaseDeletionManager
-from core.models.plcorebase import StrippedCharField
-from core.models import Image
-from core.models import Slice, SlicePrivilege
-from core.models import Node
-from core.models import Site
-from core.models import Deployment
-from core.models import Controller
-from core.models import User
-from core.models import Tag
-from core.models import Flavor
-from django.contrib.contenttypes import generic
-from xos.config import Config
-from monitor import driver as monitor
-from django.core.exceptions import PermissionDenied, ValidationError
-
-config = Config()
-
-
-# Create your models here.
-class Container(PlCoreBase):
-    name = StrippedCharField(max_length=200, help_text="Container name")
-    slice = models.ForeignKey(Slice, related_name='containers')
-    node = models.ForeignKey(Node, related_name='containers')
-    creator = models.ForeignKey(User, related_name='containers', blank=True, null=True)
-    docker_image = StrippedCharField(null=True, blank=True, max_length=200, help_text="name of docker container to instantiate")
-
-    def __unicode__(self):
-        return u'container-%s' % str(self.id)
-
-    def save(self, *args, **kwds):
-        if not self.name:
-            self.name = self.slice.name
-        if not self.creator and hasattr(self, 'caller'):
-            self.creator = self.caller
-        if not self.creator:
-            raise ValidationError('container has no creator')
-
-        if (self.slice.creator != self.creator):
-            # Check to make sure there's a slice_privilege for the user. If there
-            # isn't, then keystone will throw an exception inside the observer.
-            slice_privs = SlicePrivilege.objects.filter(slice=self.slice, user=self.creator)
-            if not slice_privs:
-                raise ValidationError('container creator has no privileges on slice')
-
-# XXX smbaker - disabled for now, was causing fault in tenant view create slice
-#        if not self.controllerNetwork.test_acl(slice=self.slice):
-#            raise exceptions.ValidationError("Deployment %s's ACL does not allow any of this slice %s's users" % (self.controllerNetwork.name, self.slice.name))
-
-        super(Container, self).save(*args, **kwds)
-
-    def can_update(self, user):
-        return True
-
-    @staticmethod
-    def select_by_user(user):
-        if user.is_admin:
-            qs = Container.objects.all()
-        else:
-            slices = Slice.select_by_user(user)
-            qs = Container.objects.filter(slice__in=slices)
-        return qs
-
-    def get_public_keys(self):
-        slice_memberships = SlicePrivilege.objects.filter(slice=self.slice)
-        pubkeys = set([sm.user.public_key for sm in slice_memberships if sm.user.public_key])
-
-        if self.creator.public_key:
-            pubkeys.add(self.creator.public_key)
-
-        if self.slice.creator.public_key:
-            pubkeys.add(self.slice.creator.public_key)
-
-        if self.slice.service and self.slice.service.public_key:
-            pubkeys.add(self.slice.service.public_key)
-
-        return pubkeys
-
-
diff --git a/xos/core/models/image.py b/xos/core/models/image.py
index 21d4f23..1a3cbf7 100644
--- a/xos/core/models/image.py
+++ b/xos/core/models/image.py
@@ -7,7 +7,10 @@
 # Create your models here.
 
 class Image(PlCoreBase):
+    KIND_CHOICES = (('vm', 'Virtual Machine'), ('container', 'Container'), )
+
     name = StrippedCharField(max_length=256, unique=True)
+    kind = models.CharField(null=False, blank=False, max_length=30, choices=KIND_CHOICES, default="vm")
     disk_format = StrippedCharField(max_length=256)
     container_format = StrippedCharField(max_length=256)
     path = StrippedCharField(max_length=256, null=True, blank=True, help_text="Path to image on local disk")
diff --git a/xos/core/models/instance.py b/xos/core/models/instance.py
index 75826f6..cd7dd26 100644
--- a/xos/core/models/instance.py
+++ b/xos/core/models/instance.py
@@ -80,6 +80,8 @@
 
 # Create your models here.
 class Instance(PlCoreBase):
+    ISOLATION_CHOICES = (('vm', 'Virtual Machine'), ('container', 'Container'), ('container_vm', 'Container In VM'))
+
     objects = InstanceManager()
     deleted_objects = InstanceDeletionManager()
     instance_id = StrippedCharField(null=True, blank=True, max_length=200, help_text="Nova instance id")
@@ -88,7 +90,6 @@
     instance_name = StrippedCharField(blank=True, null=True, max_length=200, help_text="OpenStack generated name")
     ip = models.GenericIPAddressField(help_text="Instance ip address", blank=True, null=True)
     image = models.ForeignKey(Image, related_name='instances')
-    #key = models.ForeignKey(Key, related_name='instances')
     creator = models.ForeignKey(User, related_name='instances', blank=True, null=True)
     slice = models.ForeignKey(Slice, related_name='instances')
     deployment = models.ForeignKey(Deployment, verbose_name='deployment', related_name='instance_deployment')
@@ -97,6 +98,9 @@
     flavor = models.ForeignKey(Flavor, help_text="Flavor of this instance", default=get_default_flavor)
     tags = generic.GenericRelation(Tag)
     userData = models.TextField(blank=True, null=True, help_text="user_data passed to instance during creation")
+    isolation = models.CharField(null=False, blank=False, max_length=30, choices=ISOLATION_CHOICES, default="vm")
+    volumes = models.TextField(null=True, blank=True, help_text="Comma-separated list of directories to expose to parent context")
+    parent = models.ForeignKey("Instance", null=True, blank=True, help_text="Parent Instance for containers nested inside of VMs")
 
     def __unicode__(self):
         if self.name and Slice.objects.filter(id=self.slice_id) and (self.name != self.slice.name):
@@ -120,6 +124,19 @@
         if not self.creator:
             raise ValidationError('instance has no creator')
 
+        if (self.isolation == "container") or (self.isolation == "container_vm"):
+            if (self.image.kind != "container"):
+                raise ValidationError("Container instance must use container image")
+        elif (self.isolation == "vm"):
+            if (self.image.kind != "vm"):
+                raise ValidationError("VM instance must use VM image")
+
+        if (self.isolation == "container_vm") and (not self.parent):
+            raise ValidationError("Container-vm instance must have a parent")
+
+        if (self.parent) and (self.isolation != "container_vm"):
+            raise ValidationError("Parent field can only be set on Container-vm instances")
+
         if (self.slice.creator != self.creator):
             # Check to make sure there's a slice_privilege for the user. If there
             # isn't, then keystone will throw an exception inside the observer.
diff --git a/xos/core/models/network.py b/xos/core/models/network.py
index 48af5a6..6894f9f 100644
--- a/xos/core/models/network.py
+++ b/xos/core/models/network.py
@@ -2,7 +2,7 @@
 import socket
 import sys
 from django.db import models
-from core.models import PlCoreBase, Site, Slice, Instance, Controller, Container
+from core.models import PlCoreBase, Site, Slice, Instance, Controller
 from core.models import ControllerLinkManager,ControllerLinkDeletionManager
 from django.contrib.contenttypes.models import ContentType
 from django.contrib.contenttypes import generic
@@ -64,7 +64,38 @@
     except Exception,e:
         raise ValidationError(str(e))
 
-class NetworkTemplate(PlCoreBase):
+class ParameterMixin(object):
+    # helper classes for dealing with NetworkParameter
+
+    def get_parameters(self):
+        parameter_dict = {}
+
+        instance_type = ContentType.objects.get_for_model(self)
+        for param in NetworkParameter.objects.filter(content_type__pk=instance_type.id, object_id=self.id):
+            parameter_dict[param.parameter.name] = param.value
+
+        return parameter_dict
+
+    def set_parameter(self, name, value):
+        instance_type = ContentType.objects.get_for_model(self)
+        existing_params = NetworkParameter.objects.filter(parameter__name=name, content_type__pk=instance_type.id, object_id=self.id)
+        if existing_params:
+            p=existing_params[0]
+            p.value = value
+            p.save()
+        else:
+            pt = NetworkParameterType.objects.get(name=name)
+            p = NetworkParameter(parameter=pt, content_type=instance_type, object_id=self.id, value=value)
+            p.save()
+
+    def unset_parameter(self, name):
+        instance_type = ContentType.objects.get_for_model(self)
+        existing_params = NetworkParameter.objects.filter(parameter__name=name, content_type__pk=instance_type.id, object_id=self.id)
+        for p in existing_params:
+            p.delete()
+
+
+class NetworkTemplate(PlCoreBase, ParameterMixin):
     VISIBILITY_CHOICES = (('public', 'public'), ('private', 'private'))
     TRANSLATION_CHOICES = (('none', 'none'), ('NAT', 'NAT'))
     TOPOLOGY_CHOICES = (('bigswitch', 'BigSwitch'), ('physical', 'Physical'), ('custom', 'Custom'))
@@ -97,7 +128,7 @@
 
     def __unicode__(self):  return u'%s' % (self.name)
 
-class Network(PlCoreBase):
+class Network(PlCoreBase, ParameterMixin):
     name = models.CharField(max_length=32)
     template = models.ForeignKey(NetworkTemplate)
     subnet = models.CharField(max_length=32, blank=True)
@@ -147,6 +178,14 @@
             qs = Network.objects.filter(owner__in=slices)
         return qs
 
+    def get_parameters(self):
+        # returns parameters from the template, updated by self.
+        p={}
+        if self.template:
+            p = self.template.get_parameters()
+        p.update(ParameterMixin.get_parameters(self))
+        return p
+
 class ControllerNetwork(PlCoreBase):
     objects = ControllerLinkManager()
     deleted_objects = ControllerLinkDeletionManager()
@@ -161,7 +200,7 @@
 
     class Meta:
         unique_together = ('network', 'controller')
-        
+
     @staticmethod
     def select_by_user(user):
         if user.is_admin:
@@ -208,15 +247,12 @@
             qs = NetworkSlice.objects.filter(Q(slice__in=slice_ids) | Q(network__in=network_ids))
         return qs
 
-class Port(PlCoreBase):
+class Port(PlCoreBase, ParameterMixin):
     network = models.ForeignKey(Network,related_name='links')
     instance = models.ForeignKey(Instance, null=True, blank=True, related_name='ports')
-    container = models.ForeignKey(Container, null=True, blank=True, related_name='ports')
     ip = models.GenericIPAddressField(help_text="Instance ip address", blank=True, null=True)
-    port_id = models.CharField(null=True, blank=True, max_length=256, help_text="Quantum port id")
+    port_id = models.CharField(null=True, blank=True, max_length=256, help_text="Neutron port id")
     mac = models.CharField(null=True, blank=True, max_length=256, help_text="MAC address associated with this port")
-    #unattached = models.BooleanField(default=False, help_text="create this port even if no Instance is attached")
-    segmentation_id = models.CharField(null=True, blank=True, max_length=256, help_text="GRE segmentation id for port")
 
     class Meta:
         unique_together = ('network', 'instance')
@@ -258,6 +294,14 @@
             qs = Port.objects.filter(Q(instance__in=instance_ids) | Q(network__in=network_ids))
         return qs
 
+    def get_parameters(self):
+        # returns parameters from the network, updated by self.
+        p={}
+        if self.network:
+            p = self.network.get_parameters()
+        p.update(ParameterMixin.get_parameters(self))
+        return p
+
 class Router(PlCoreBase):
     name = models.CharField(max_length=32)
     owner = models.ForeignKey(Slice, related_name="routers")
diff --git a/xos/core/models/node.py b/xos/core/models/node.py
index ec67975..5496d6b 100644
--- a/xos/core/models/node.py
+++ b/xos/core/models/node.py
@@ -16,6 +16,10 @@
 
     def __unicode__(self):  return u'%s' % (self.name)
 
+    def __init__(self, *args, **kwargs):
+        super(Node, self).__init__(*args, **kwargs)
+        self.no_sync=True
+
     def save(self, *args, **kwds):
         if self.site is None and self.site_deployment is not None:
             self.site = self.site_deployment.site
diff --git a/xos/core/models/service.py b/xos/core/models/service.py
index 950ce02..ba54a33 100644
--- a/xos/core/models/service.py
+++ b/xos/core/models/service.py
@@ -52,6 +52,7 @@
     view_url = StrippedCharField(blank=True, null=True, max_length=1024)
     icon_url = StrippedCharField(blank=True, null=True, max_length=1024)
     public_key = models.TextField(null=True, blank=True, max_length=1024, help_text="Public key string")
+    private_key_fn = StrippedCharField(blank=True, null=True, max_length=1024)
 
     # Service_specific_attribute and service_specific_id are opaque to XOS
     service_specific_id = StrippedCharField(max_length=30, blank=True, null=True)
@@ -336,6 +337,106 @@
             return None
         return sorted(st, key=attrgetter('id'))[0]
 
+class Scheduler(object):
+    # XOS Scheduler Abstract Base Class
+    # Used to implement schedulers that pick which node to put instances on
+
+    def __init__(self, slice):
+        self.slice = slice
+
+    def pick(self):
+        # this method should return a tuple (node, parent)
+        #    node is the node to instantiate on
+        #    parent is for container_vm instances only, and is the VM that will
+        #      hold the container
+
+        raise Exception("Abstract Base")
+
+class LeastLoadedNodeScheduler(Scheduler):
+    # This scheduler always return the node with the fewest number of instances.
+
+    def __init__(self, slice):
+        super(LeastLoadedNodeScheduler, self).__init__(slice)
+
+    def pick(self):
+        from core.models import Node
+        nodes = list(Node.objects.all())
+        # TODO: logic to filter nodes by which nodes are up, and which
+        #   nodes the slice can instantiate on.
+        nodes = sorted(nodes, key=lambda node: node.instances.all().count())
+        return [nodes[0], None]
+
+class ContainerVmScheduler(Scheduler):
+    # This scheduler picks a VM in the slice with the fewest containers inside
+    # of it. If no VMs are suitable, then it creates a VM.
+
+    # this is a hack and should be replaced by something smarter...
+    LOOK_FOR_IMAGES=["ubuntu-vcpe4",        # ONOS demo machine -- preferred vcpe image
+                     "Ubuntu 14.04 LTS",    # portal
+                     "Ubuntu-14.04-LTS",    # ONOS demo machine
+                     "trusty-server-multi-nic", # CloudLab
+                    ]
+
+    MAX_VM_PER_CONTAINER = 10
+
+    def __init__(self, slice):
+        super(ContainerVmScheduler, self).__init__(slice)
+
+    @property
+    def image(self):
+        from core.models import Image
+
+        look_for_images = self.LOOK_FOR_IMAGES
+        for image_name in look_for_images:
+            images = Image.objects.filter(name = image_name)
+            if images:
+                return images[0]
+
+        raise XOSProgrammingError("No ContainerVM image (looked for %s)" % str(look_for_images))
+
+    def make_new_instance(self):
+        from core.models import Instance, Flavor
+
+        flavors = Flavor.objects.filter(name="m1.small")
+        if not flavors:
+            raise XOSConfigurationError("No m1.small flavor")
+
+        (node,parent) = LeastLoadedNodeScheduler(self.slice).pick()
+
+        instance = Instance(slice = self.slice,
+                        node = node,
+                        image = self.image,
+                        creator = self.slice.creator,
+                        deployment = node.site_deployment.deployment,
+                        flavor = flavors[0],
+                        isolation = "vm",
+                        parent = parent)
+        instance.save()
+        # We rely on a special naming convention to identify the VMs that will
+        # hole containers.
+        instance.name = "%s-outer-%s" % (instance.slice.name, instance.id)
+        instance.save()
+        return instance
+
+    def pick(self):
+        from core.models import Instance, Flavor
+
+        for vm in self.slice.instances.filter(isolation="vm"):
+            avail_vms = []
+            if (vm.name.startswith("%s-outer-" % self.slice.name)):
+                container_count = Instance.objects.filter(parent=vm).count()
+                if (container_count < self.MAX_VM_PER_CONTAINER):
+                    avail_vms.append( (vm, container_count) )
+            # sort by least containers-per-vm
+            avail_vms = sorted(avail_vms, key = lambda x: x[1])
+            print "XXX", avail_vms
+            if avail_vms:
+                instance = avail_vms[0][0]
+                return (instance.node, instance)
+
+        instance = self.make_new_instance()
+        return (instance.node, instance)
+
 class TenantWithContainer(Tenant):
     """ A tenant that manages a container """
 
@@ -346,6 +447,8 @@
                      "trusty-server-multi-nic", # CloudLab
                     ]
 
+    LOOK_FOR_CONTAINER_IMAGES=["andybavier/docker-vcpe"]
+
     class Meta:
         proxy = True
 
@@ -406,20 +509,55 @@
         from core.models import Image
         # Implement the logic here to pick the image that should be used when
         # instantiating the VM that will hold the container.
-        for image_name in self.LOOK_FOR_IMAGES:
+
+        slice = self.provider_service.slices.all()
+        if not slice:
+            raise XOSProgrammingError("provider service has no slice")
+        slice = slice[0]
+
+        if slice.default_isolation in ["container", "container_vm"]:
+            look_for_images = self.LOOK_FOR_CONTAINER_IMAGES
+        else:
+            look_for_images = self.LOOK_FOR_IMAGES
+
+        for image_name in look_for_images:
             images = Image.objects.filter(name = image_name)
             if images:
                 return images[0]
 
-        raise XOSProgrammingError("No VPCE image (looked for %s)" % str(self.LOOK_FOR_IMAGES))
+        raise XOSProgrammingError("No VPCE image (looked for %s)" % str(look_for_images))
 
-    def pick_node(self):
-        from core.models import Node
-        nodes = list(Node.objects.all())
-        # TODO: logic to filter nodes by which nodes are up, and which
-        #   nodes the slice can instantiate on.
-        nodes = sorted(nodes, key=lambda node: node.instances.all().count())
-        return nodes[0]
+    @creator.setter
+    def creator(self, value):
+        if value:
+            value = value.id
+        if (value != self.get_attribute("creator_id", None)):
+            self.cached_creator=None
+        self.set_attribute("creator_id", value)
+
+    def save_instance(self, instance):
+        # Override this function to do custom pre-save or post-save processing,
+        # such as creating ports for containers.
+        instance.save()
+
+    def pick_least_loaded_instance_in_slice(self, slices):
+        for slice in slices:
+            if slice.instances.all().count() > 0:
+                for instance in slice.instances.all():
+                     #Pick the first instance that has lesser than 5 tenants 
+                     if self.count_of_tenants_of_an_instance(instance) < 5:
+                         return instance
+        return None
+
+    #TODO: Ideally the tenant count for an instance should be maintained using a 
+    #many-to-one relationship attribute, however this model being proxy, it does 
+    #not permit any new attributes to be defined. Find if any better solutions 
+    def count_of_tenants_of_an_instance(self, instance):
+        tenant_count = 0
+        for tenant in self.get_tenant_objects().all():
+            if tenant.get_attribute("instance_id", None) == instance.id:
+                tenant_count += 1
+        return tenant_count
 
     def manage_container(self):
         from core.models import Instance, Flavor
@@ -433,32 +571,55 @@
 
         if self.instance is None:
             if not self.provider_service.slices.count():
-                raise XOSConfigurationError("The VCPE service has no slices")
+                raise XOSConfigurationError("The service has no slices")
 
-            flavors = Flavor.objects.filter(name="m1.small")
-            if not flavors:
-                raise XOSConfigurationError("No m1.small flavor")
+            new_instance_created = False
+            instance = None
+            if self.get_attribute("use_same_instance_for_multiple_tenants", default=False):
+                #Find if any existing instances can be used for this tenant
+                slices = self.provider_service.slices.all()
+                instance = self.pick_least_loaded_instance_in_slice(slices)
 
-            node =self.pick_node()
-            instance = Instance(slice = self.provider_service.slices.all()[0],
-                            node = node,
-                            image = self.image,
-                            creator = self.creator,
-                            deployment = node.site_deployment.deployment,
-                            flavor = flavors[0])
-            instance.save()
+            if not instance:
+                flavors = Flavor.objects.filter(name="m1.small")
+                if not flavors:
+                    raise XOSConfigurationError("No m1.small flavor")
+
+                slice = self.provider_service.slices.all()[0]
+
+                if slice.default_isolation == "container_vm":
+                    (node, parent) = ContainerVmScheduler(slice).pick()
+                else:
+                    (node, parent) = LeastLoadedNodeScheduler(slice).pick()
+
+                instance = Instance(slice = slice,
+                                node = node,
+                                image = self.image,
+                                creator = self.creator,
+                                deployment = node.site_deployment.deployment,
+                                flavor = flavors[0],
+                                isolation = slice.default_isolation,
+                                parent = parent)
+                self.save_instance(instance)
+                new_instance_created = True
 
             try:
                 self.instance = instance
                 super(TenantWithContainer, self).save()
             except:
-                instance.delete()
+                if new_instance_created:
+                    instance.delete()
                 raise
 
     def cleanup_container(self):
         if self.instance:
-            # print "XXX cleanup instance", self.instance
-            self.instance.delete()
+            if self.get_attribute("use_same_instance_for_multiple_tenants", default=False):
+                #Delete the instance only if this is last tenant in that instance
+                tenant_count = self.count_of_tenants_of_an_instance(self.instance)
+                if tenant_count == 0:
+                    self.instance.delete()
+            else:
+                self.instance.delete()
             self.instance = None
 
 class CoarseTenant(Tenant):
diff --git a/xos/core/models/slice.py b/xos/core/models/slice.py
index 18d3cb6..df36b26 100644
--- a/xos/core/models/slice.py
+++ b/xos/core/models/slice.py
@@ -19,6 +19,8 @@
 # Create your models here.
 
 class Slice(PlCoreBase):
+    ISOLATION_CHOICES = (('vm', 'Virtual Machine'), ('container', 'Container'), ('container_vm', 'Container In VM'))
+
     name = StrippedCharField(unique=True, help_text="The Name of the Slice", max_length=80)
     enabled = models.BooleanField(default=True, help_text="Status for this Slice")
     omf_friendly = models.BooleanField(default=False)
@@ -37,6 +39,8 @@
     default_image = models.ForeignKey(Image, related_name = "slices", null=True, blank=True);
     mount_data_sets = StrippedCharField(default="GenBank",null=True, blank=True, max_length=256)
 
+    default_isolation = models.CharField(null=False, blank=False, max_length=30, choices=ISOLATION_CHOICES, default="vm")
+
     def __unicode__(self):  return u'%s' % (self.name)
 
     @property
diff --git a/xos/core/xoslib/methods/cordsubscriber.py b/xos/core/xoslib/methods/cordsubscriber.py
index c26ac54..297ac4a 100644
--- a/xos/core/xoslib/methods/cordsubscriber.py
+++ b/xos/core/xoslib/methods/cordsubscriber.py
@@ -28,7 +28,9 @@
 class CordSubscriberIdSerializer(serializers.ModelSerializer, PlusSerializerMixin):
         id = ReadOnlyField()
         service_specific_id = ReadOnlyField()
-        vlan_id = ReadOnlyField()
+        vlan_id = ReadOnlyField()      # XXX remove this
+        c_tag = ReadOnlyField()
+        s_tag = ReadOnlyField()
         vcpe_id = ReadOnlyField()
         instance = ReadOnlyField()
         image = ReadOnlyField()
@@ -59,7 +61,7 @@
         class Meta:
             model = CordSubscriber
             fields = ('humanReadableName', 'id',
-                      'service_specific_id', 'vlan_id',
+                      'service_specific_id', 'vlan_id', 's_tag', 'c_tag',
                       'vcpe_id', 'instance', 'instance_name', 'image', 'image_name',
                       'firewall_enable', 'firewall_rules',
                       'url_filter_enable', 'url_filter_rules', 'url_filter_level',
diff --git a/xos/core/xoslib/methods/volttenant.py b/xos/core/xoslib/methods/volttenant.py
index e5998da..bf48290 100644
--- a/xos/core/xoslib/methods/volttenant.py
+++ b/xos/core/xoslib/methods/volttenant.py
@@ -26,7 +26,9 @@
 class VOLTTenantIdSerializer(serializers.ModelSerializer, PlusSerializerMixin):
         id = ReadOnlyField()
         service_specific_id = serializers.CharField()
-        vlan_id = serializers.CharField()
+        #vlan_id = serializers.CharField()
+        s_tag = serializers.CharField()
+        c_tag = serializers.CharField()
         provider_service = serializers.PrimaryKeyRelatedField(queryset=VOLTService.get_service_objects().all(), default=get_default_volt_service)
 
         humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
@@ -35,7 +37,7 @@
 
         class Meta:
             model = VOLTTenant
-            fields = ('humanReadableName', 'id', 'provider_service', 'service_specific_id', 'vlan_id', 'computeNodeName' )
+            fields = ('humanReadableName', 'id', 'provider_service', 'service_specific_id', 's_tag', 'c_tag', 'computeNodeName' )
 
         def getHumanReadableName(self, obj):
             return obj.__unicode__()
@@ -62,9 +64,19 @@
         if service_specific_id is not None:
             queryset = queryset.filter(service_specific_id=service_specific_id)
 
-        vlan_id = self.request.QUERY_PARAMS.get('vlan_id', None)
-        if vlan_id is not None:
-            ids = [x.id for x in queryset if x.get_attribute("vlan_id", None)==vlan_id]
+#        vlan_id = self.request.QUERY_PARAMS.get('vlan_id', None)
+#        if vlan_id is not None:
+#            ids = [x.id for x in queryset if x.get_attribute("vlan_id", None)==vlan_id]
+#            queryset = queryset.filter(id__in=ids)
+
+        c_tag = self.request.QUERY_PARAMS.get('c_tag', None)
+        if c_tag is not None:
+            ids = [x.id for x in queryset if x.get_attribute("c_tag", None)==c_tag]
+            queryset = queryset.filter(id__in=ids)
+
+        s_tag = self.request.QUERY_PARAMS.get('s_tag', None)
+        if s_tag is not None:
+            ids = [x.id for x in queryset if x.get_attribute("s_tag", None)==s_tag]
             queryset = queryset.filter(id__in=ids)
 
         return queryset
@@ -74,7 +86,7 @@
 
         existing_obj = None
         for obj in VOLTTenant.get_tenant_objects().all():
-            if (obj.vlan_id == data.get("vlan_id", None)) and (obj.service_specific_id == data.get("service_specific_id",None)):
+            if (obj.c_tag == data.get("c_tag", None)) and (obj.s_tag == data.get("s_tag", None)) and  (obj.service_specific_id == data.get("service_specific_id",None)):
                existing_obj = obj
 
         if existing_obj:
diff --git a/xos/core/xoslib/objects/cordsubscriber.py b/xos/core/xoslib/objects/cordsubscriber.py
index 318d54c..089c91b 100644
--- a/xos/core/xoslib/objects/cordsubscriber.py
+++ b/xos/core/xoslib/objects/cordsubscriber.py
@@ -113,7 +113,9 @@
                      # ("services", "vcpe.services"),
                      # ("cdn_enable", "vcpe.cdn_enable"),
 
-                     ("vlan_id", "volt.vlan_id"),
+                     ("vlan_id", "volt.vlan_id"),      # XXX remove this
+                     ("c_tag", "volt.c_tag"),
+                     ("s_tag", "volt.s_tag"),
 
                      ("bbs_account", "volt.vcpe.bbs_account"),
                      ("ssh_command", "volt.vcpe.ssh_command"),
diff --git a/xos/core/xoslib/templates/xosCordSubscriber.html b/xos/core/xoslib/templates/xosCordSubscriber.html
index b7e2163..db42fb8 100644
--- a/xos/core/xoslib/templates/xosCordSubscriber.html
+++ b/xos/core/xoslib/templates/xosCordSubscriber.html
@@ -7,7 +7,8 @@
   <table class="xos-detail-table cord-subscriber-table">
   <tr><td class="xos-label-cell">Id:</td><td><%= model.attributes.id %></td></tr>
   <tr><td class="xos-label-cell">Service Specific Id:</td><td><%= model.attributes.service_specific_id %></td></tr>

-  <tr><td class="xos-label-cell">VLAN Id:</td><td><%= model.attributes.vlan_id %></td></tr>

+  <tr><td class="xos-label-cell">S-Tag:</td><td><%= model.attributes.s_tag %></td></tr>

+  <tr><td class="xos-label-cell">C-Tag:</td><td><%= model.attributes.c_tag %></td></tr>

   </table>

   </div>

 

diff --git a/xos/helloworld/models.py b/xos/helloworld/models.py
index a657f3a..9bb343e 100644
--- a/xos/helloworld/models.py
+++ b/xos/helloworld/models.py
@@ -11,7 +11,7 @@
 
 class Hello(PlCoreBase):
     name = models.CharField(max_length=254,help_text="Salutation e.g. Hello or Bonjour")
-    instance_backref = models.ForeignKey(Instance)
+    instance_backref = models.ForeignKey(Instance,related_name="hellos")
     
 class World(PlCoreBase):
     name = models.CharField(max_length=254,help_text="Name of planet")
diff --git a/xos/helloworld/view.py b/xos/helloworld/view.py
index b3eec29..7024747 100644
--- a/xos/helloworld/view.py
+++ b/xos/helloworld/view.py
@@ -34,9 +34,9 @@
             i.instance_name=None
             i.enacted=None
             i.save()
-            h = Hello(name=hello_name,sliver_backref=i)
-            w = World(hello=h,name=world_name)
+            h = Hello(name=hello_name,instance_backref=i)
             h.save()
+            w = World(hello=h,name=world_name)
             w.save()
 
             t = template.Template(head_template + 'Done. New instance id: %r'%i.pk + self.tail_template)
diff --git a/xos/model-deps b/xos/model-deps
index ea32eb9..59bbe25 100644
--- a/xos/model-deps
+++ b/xos/model-deps
@@ -1,8 +1,6 @@
 {
     "Slice": [
         "Site", 
-        "Service", 
-        "ServiceClass", 
         "User"
     ], 
     "ImageDeployments": [
@@ -65,9 +63,6 @@
     "Reservation": [
         "Slice"
     ], 
-    "ServiceResource": [
-        "ServiceClass"
-    ], 
     "Instance": [
         "Image", 
         "User", 
@@ -79,9 +74,6 @@
     "Account": [
         "Site"
     ], 
-    "ServiceAttribute": [
-        "Service"
-    ], 
     "ControllerSlicePrivilege": [
         "Controller"
     ], 
@@ -116,9 +108,6 @@
         "Controller", 
         "DashboardView"
     ], 
-    "Tag": [
-        "Service"
-    ], 
     "Invoice": [
         "Account"
     ], 
diff --git a/xos/model_policies/model_policy_Image.py b/xos/model_policies/model_policy_Image.py
index 72f76fa..c77d5bb 100644
--- a/xos/model_policies/model_policy_Image.py
+++ b/xos/model_policies/model_policy_Image.py
@@ -2,6 +2,10 @@
     from core.models import Controller, ControllerImages, Image
     from collections import defaultdict
 
+    if (image.kind == "container"):
+        # container images do not get instantiated
+        return
+
     controller_images = ControllerImages.objects.filter(image=image)
     existing_controllers = [cs.controller for cs in controller_images] 
     
diff --git a/xos/model_policies/model_policy_Instance.py b/xos/model_policies/model_policy_Instance.py
index a13428d..ffc9847 100644
--- a/xos/model_policies/model_policy_Instance.py
+++ b/xos/model_policies/model_policy_Instance.py
@@ -1,3 +1,44 @@
+def handle_container_on_metal(instance):
+        from core.models import Instance, Flavor, Port, Image
+
+        print "MODEL POLICY: instance", instance, "handle container_on_metal"
+
+        if instance.deleted:
+            return
+
+        if (instance.isolation in ["container"]):
+            # Our current docker-on-metal network strategy requires that there be some
+            # VM on the server that connects to the networks, so that
+            # the containers can piggyback off of that configuration.
+            if not Instance.objects.filter(slice=instance.slice, node=instance.node, isolation="vm").exists():
+                flavors = Flavor.objects.filter(name="m1.small")
+                if not flavors:
+                    raise XOSConfigurationError("No m1.small flavor")
+
+                images = Image.objects.filter(kind="vm")
+
+                companion_instance = Instance(slice = instance.slice,
+                                node = instance.node,
+                                image = images[0],
+                                creator = instance.creator,
+                                deployment = instance.node.site_deployment.deployment,
+                                flavor = flavors[0])
+                companion_instance.save()
+
+                print "MODEL POLICY: instance", instance, "created companion", companion_instance
+
+        # Add the ports for the container
+        for network in instance.slice.networks.all():
+            # hmmm... The NAT ports never become ready, because sync_ports never
+            # instantiates them. Need to think about this.
+            print "MODEL POLICY: instance", instance, "handling network", network
+            if (network.name.endswith("-nat")):
+                continue
+
+            if not Port.objects.filter(network=network, instance=instance).exists():
+                port = Port(network = network, instance=instance)
+                port.save()
+                print "MODEL POLICY: instance", instance, "created port", port
 
 def handle(instance):
     from core.models import Controller, ControllerSlice, ControllerNetwork, NetworkSlice
@@ -7,7 +48,11 @@
                                                                 controller=instance.node.site_deployment.controller)
 
     for cn in controller_networks:
-        if (cn.lazy_blocked):	
+        if (cn.lazy_blocked):
+                print "MODEL POLICY: instance", instance, "unblocking network", cn.network
 		cn.lazy_blocked=False
 		cn.backend_register = '{}'
 		cn.save()
+
+    if (instance.isolation in ["container", "container_vm"]):
+        handle_container_on_metal(instance)
diff --git a/xos/model_policy.py b/xos/model_policy.py
index ced785e..9462b35 100644
--- a/xos/model_policy.py
+++ b/xos/model_policy.py
@@ -105,7 +105,7 @@
 
 def run_policy_once():
         from core.models import Instance,Slice,Controller,Network,User,SlicePrivilege,Site,SitePrivilege,Image,ControllerSlice,ControllerUser,ControllerSite
-        models = [Instance,Slice, Controller, Network, User, SlicePrivilege, Site, SitePrivilege, Image, ControllerSlice, ControllerSite, ControllerUser]
+        models = [Controller, Site, SitePrivilege, Image, ControllerSlice, ControllerSite, ControllerUser, User, Slice, Network, Instance, SlicePrivilege]
         objects = []
         deleted_objects = []
 
diff --git a/xos/observers/base/SyncInstanceUsingAnsible.py b/xos/observers/base/SyncInstanceUsingAnsible.py
index 901bc97..5bb8250 100644
--- a/xos/observers/base/SyncInstanceUsingAnsible.py
+++ b/xos/observers/base/SyncInstanceUsingAnsible.py
@@ -44,9 +44,11 @@
 
         return o.instance
 
-    def run_playbook(self, o, fields):
+    def run_playbook(self, o, fields, template_name=None):
+        if not template_name:
+            template_name = self.template_name
         tStart = time.time()
-        run_template_ssh(self.template_name, fields)
+        run_template_ssh(template_name, fields)
         logger.info("playbook execution time %d" % int(time.time()-tStart))
 
     def pre_sync_hook(self, o, fields):
@@ -61,14 +63,83 @@
     def prepare_record(self, o):
         pass
 
+    def get_node(self,o):
+        return o.node
+
+    def get_node_key(self, node):
+        return "/root/setup/node_key"
+
+    def get_ansible_fields(self, instance):
+        # return all of the fields that tell Ansible how to talk to the context
+        # that's setting up the container.
+
+        if (instance.isolation == "vm"):
+            # legacy where container was configured by sync_vcpetenant.py
+
+            fields = { "instance_name": instance.name,
+                       "hostname": instance.node.name,
+                       "instance_id": instance.instance_id,
+                       "username": "ubuntu",
+                     }
+            key_name = self.service_key_name
+        elif (instance.isolation == "container"):
+            # container on bare metal
+            node = self.get_node(instance)
+            hostname = node.name
+            fields = { "hostname": hostname,
+                       "baremetal_ssh": True,
+                       "instance_name": "rootcontext",
+                       "username": "root",
+                       "container_name": "%s-%s" % (instance.slice.name, str(instance.id))
+                     }
+            key_name = self.get_node_key(node)
+        else:
+            # container in a VM
+            if not instance.parent:
+                raise Exception("Container-in-VM has no parent")
+            if not instance.parent.instance_id:
+                raise Exception("Container-in-VM parent is not yet instantiated")
+            if not instance.parent.slice.service:
+                raise Exception("Container-in-VM parent has no service")
+            if not instance.parent.slice.service.private_key_fn:
+                raise Exception("Container-in-VM parent service has no private_key_fn")
+            fields = { "hostname": instance.parent.node.name,
+                       "instance_name": instance.parent.name,
+                       "instance_id": instance.parent.instance_id,
+                       "username": "ubuntu",
+                       "nat_ip": instance.parent.get_ssh_ip(),
+                       "container_name": "%s-%s" % (instance.slice.name, str(instance.id))
+                         }
+            key_name = instance.parent.slice.service.private_key_fn
+
+        if not os.path.exists(key_name):
+            raise Exception("Node key %s does not exist" % node_key_name)
+
+        key = file(key_name).read()
+
+        fields["private_key"] = key
+
+        # now the ceilometer stuff
+
+        cslice = ControllerSlice.objects.get(slice=instance.slice)
+        if not cslice:
+            raise Exception("Controller slice object for %s does not exist" % instance.slice.name)
+
+        cuser = ControllerUser.objects.get(user=instance.creator)
+        if not cuser:
+            raise Exception("Controller user object for %s does not exist" % instance.creator)
+
+        fields.update({"keystone_tenant_id": cslice.tenant_id,
+                       "keystone_user_id": cuser.kuser_id,
+                       "rabbit_user": instance.controller.rabbit_user,
+                       "rabbit_password": instance.controller.rabbit_password,
+                       "rabbit_host": instance.controller.rabbit_host})
+
+        return fields
+
     def sync_record(self, o):
         logger.info("sync'ing object %s" % str(o))
 
-        if not os.path.exists(self.service_key_name):
-            raise Exception("Service key %s does not exist" % self.service_key_name)
-
-        service_key = file(self.service_key_name).read()
-
         self.prepare_record(o)
 
         instance = self.get_instance(o)
@@ -92,25 +163,9 @@
                 self.defer_sync(o, "waiting on instance.instance_name")
                 return
 
-            cslice = ControllerSlice.objects.get(slice=instance.slice)
-            if not cslice:
-                raise Exception("Controller slice object for %s does not exist" % instance.slice.name)
+            fields = self.get_ansible_fields(instance)
 
-            cuser = ControllerUser.objects.get(user=instance.creator)
-            if not cuser:
-                raise Exception("Controller user object for %s does not exist" % instance.creator)
-
-            fields = { "instance_name": instance.name,
-                       "hostname": instance.node.name,
-                       "instance_id": instance.instance_id,
-                       "private_key": service_key,
-                       "keystone_tenant_id": cslice.tenant_id,
-                       "keystone_user_id": cuser.kuser_id,
-                       "rabbit_user": instance.controller.rabbit_user,
-                       "rabbit_password": instance.controller.rabbit_password,
-                       "rabbit_host": instance.controller.rabbit_host,
-                       "ansible_tag": o.__class__.__name__ + "_" + str(o.id)
-                     }
+            fields["ansible_tag"] =  o.__class__.__name__ + "_" + str(o.id)
 
         # If 'o' defines a 'sync_attributes' list, then we'll copy those
         # attributes into the Ansible recipe's field list automatically.
diff --git a/xos/observers/helloworld/helloworld_config b/xos/observers/helloworld/helloworld_config
index 671af51..e32ee0c 100644
--- a/xos/observers/helloworld/helloworld_config
+++ b/xos/observers/helloworld/helloworld_config
@@ -38,6 +38,7 @@
 dependency_graph=/opt/xos/model-deps
 logfile=/var/log/xos_backend.log
 steps_dir=/opt/xos/observers/helloworld/steps
+applist=helloworld
 
 [gui]
 disable_minidashboard=True
diff --git a/xos/observers/helloworld/steps/sync_hello.py b/xos/observers/helloworld/steps/sync_hello.py
index 1fb8c2b..7071ea0 100644
--- a/xos/observers/helloworld/steps/sync_hello.py
+++ b/xos/observers/helloworld/steps/sync_hello.py
@@ -18,7 +18,7 @@
     requested_interval=0
     
     def sync_record(self, record):
-        instance = record.sliver_backref        
+        instance = record.instance_backref        
         instance.userData="packages:\n  - apache2\nruncmd:\n  - update-rc.d apache2 enable\n  - service apache2 start\nwrite_files:\n-   content: Hello %s\n    path: /var/www/html/hello.txt"%record.name
         instance.save()
         
diff --git a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
index bbe284f..fb4b73d 100644
--- a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
+++ b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
@@ -15,6 +15,7 @@
       headnode_flat_lan_ip: {{ rabbit_host }}
       ceilometer_client_acess_ip: {{ ceilometer_ip }}
       ceilometer_client_acess_mac: {{ ceilometer_mac }}
+      ceilometer_host_port: {{ ceilometer_port }}
       allowed_tenant_ids:
         {% for allowed_tenant_id in allowed_tenant_ids %}
         - {{ allowed_tenant_id }}
diff --git a/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2 b/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2
index cba6f2a..4c712f1 100644
--- a/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2
+++ b/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2
@@ -9,6 +9,8 @@
 admin_password={{ admin_password }}
 
 [allowed_tenants]
+{% if allowed_tenant_ids %}
 {% for tenant_id in allowed_tenant_ids %}
 {{ tenant_id }}
 {% endfor %}
+{% endif %}
diff --git a/xos/observers/monitoring_channel/templates/start-monitoring-channel.sh.j2 b/xos/observers/monitoring_channel/templates/start-monitoring-channel.sh.j2
index 10d9ef5..f56c247 100755
--- a/xos/observers/monitoring_channel/templates/start-monitoring-channel.sh.j2
+++ b/xos/observers/monitoring_channel/templates/start-monitoring-channel.sh.j2
@@ -15,13 +15,14 @@
 
 MONITORING_CHANNEL=monitoring-channel-{{ unique_id }}
 HEADNODEFLATLANIP={{ headnode_flat_lan_ip }}
+HOST_FORWARDING_PORT_FOR_CEILOMETER={{ ceilometer_host_port }}
 
 docker inspect $MONITORING_CHANNEL > /dev/null 2>&1
 if [ "$?" == 1 ]
 then
     #sudo docker build -t monitoring-channel -f Dockerfile.monitoring_channel .
     sudo docker pull srikanthvavila/monitoring-channel
-    docker run -d --name=$MONITORING_CHANNEL --add-host="ctl:$HEADNODEFLATLANIP" --privileged=true -p 8888:8000 srikanthvavila/monitoring-channel
+    docker run -d --name=$MONITORING_CHANNEL --add-host="ctl:$HEADNODEFLATLANIP" --privileged=true -p $HOST_FORWARDING_PORT_FOR_CEILOMETER:8000 srikanthvavila/monitoring-channel
 else
     docker start $MONITORING_CHANNEL
 fi
diff --git a/xos/observers/vcpe/steps/sync_vcpetenant.py b/xos/observers/vcpe/steps/sync_vcpetenant.py
index 1a45b54..4f3886e 100644
--- a/xos/observers/vcpe/steps/sync_vcpetenant.py
+++ b/xos/observers/vcpe/steps/sync_vcpetenant.py
@@ -113,19 +113,26 @@
             logger.info("neither bbs_slice nor bbs_server is configured in the vCPE")
 
         vlan_ids = []
+        s_tags = []
+        c_tags = []
         if o.volt:
-            vlan_ids.append(o.volt.vlan_id)
+            vlan_ids.append(o.volt.vlan_id)  # XXX remove this
+            s_tags.append(o.volt.s_tag)
+            c_tags.append(o.volt.c_tag)
 
         try:
             full_setup = Config().observer_full_setup
         except:
             full_setup = True
 
-        fields = {"vlan_ids": vlan_ids,
+        fields = {"vlan_ids": vlan_ids,   # XXX remove this
+                "s_tags": s_tags,
+                "c_tags": c_tags,
                 "dnsdemux_ip": dnsdemux_ip,
                 "cdn_prefixes": cdn_prefixes,
                 "bbs_addrs": bbs_addrs,
-                "full_setup": full_setup}
+                "full_setup": full_setup,
+                "isolation": o.instance.isolation}
 
         # add in the sync_attributes that come from the SubscriberRoot object
 
@@ -203,7 +210,10 @@
         if quick_update:
             logger.info("quick_update triggered; skipping ansible recipe")
         else:
-            super(SyncVCPETenant, self).run_playbook(o, fields)
+            if o.instance.isolation in ["container", "container_vm"]:
+                super(SyncVCPETenant, self).run_playbook(o, fields, "sync_vcpetenant_new.yaml")
+            else:
+                super(SyncVCPETenant, self).run_playbook(o, fields)
 
         o.last_ansible_hash = ansible_hash
 
diff --git a/xos/observers/vcpe/steps/sync_vcpetenant.yaml b/xos/observers/vcpe/steps/sync_vcpetenant.yaml
index b5a112a..c3b7246 100644
--- a/xos/observers/vcpe/steps/sync_vcpetenant.yaml
+++ b/xos/observers/vcpe/steps/sync_vcpetenant.yaml
@@ -13,6 +13,14 @@
         {% for vlan_id in vlan_ids %}
         - {{ vlan_id }}
         {% endfor %}
+      c_tags:
+        {% for c_tag in c_tags %}
+        - {{ c_tag }}
+        {% endfor %}
+      s_tags:
+        {% for s_tag in s_tags %}
+        - {{ s_tag }}
+        {% endfor %}
       firewall_rules:
         {% for firewall_rule in firewall_rules.split("\n") %}
         - {{ firewall_rule }}
@@ -109,27 +117,27 @@
 {% endif %}
 
   - name: vCPE upstart
-    template: src=/opt/xos/observers/vcpe/templates/vcpe.conf.j2 dest=/etc/init/vcpe-{{ vlan_ids[0] }}.conf
+    template: src=/opt/xos/observers/vcpe/templates/vcpe.conf.j2 dest=/etc/init/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}.conf
 
   - name: vCPE startup script
-    template: src=/opt/xos/observers/vcpe/templates/start-vcpe.sh.j2 dest=/usr/local/sbin/start-vcpe-{{ vlan_ids[0] }}.sh mode=0755
+    template: src=/opt/xos/observers/vcpe/templates/start-vcpe.sh.j2 dest=/usr/local/sbin/start-vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}.sh mode=0755
     notify:
 #    - restart vcpe
      - stop vcpe
      - remove container
      - start vcpe
 
-  - name: create /etc/vcpe-{{ vlan_ids[0] }}/dnsmasq.d
-    file: path=/etc/vcpe-{{ vlan_ids[0] }}/dnsmasq.d state=directory owner=root group=root
+  - name: create /etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d
+    file: path=/etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d state=directory owner=root group=root
 
   - name: vCPE basic dnsmasq config
-    copy: src=/opt/xos/observers/vcpe/files/vcpe.dnsmasq dest=/etc/vcpe-{{ vlan_ids[0] }}/dnsmasq.d/vcpe.conf owner=root group=root
+    copy: src=/opt/xos/observers/vcpe/files/vcpe.dnsmasq dest=/etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d/vcpe.conf owner=root group=root
     notify:
     - stop dnsmasq
     - start dnsmasq
 
   - name: dnsmasq config
-    template: src=/opt/xos/observers/vcpe/templates/dnsmasq_servers.j2 dest=/etc/vcpe-{{ vlan_ids[0] }}/dnsmasq.d/servers.conf owner=root group=root
+    template: src=/opt/xos/observers/vcpe/templates/dnsmasq_servers.j2 dest=/etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d/servers.conf owner=root group=root
     notify:
     - stop dnsmasq
     - start dnsmasq
@@ -143,23 +151,23 @@
 #    template: src=/opt/xos/observers/vcpe/templates/firewall_sample.j2 dest=/etc/firewall_sample owner=root group=root
 
   - name: Make sure vCPE service is running
-    service: name=vcpe-{{ vlan_ids[0] }} state=started
+    service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=started
 
   handlers:
   - name: stop dnsmasq
-    shell: docker exec vcpe-{{ vlan_ids[0] }} /usr/bin/killall dnsmasq
+    shell: docker exec vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} /usr/bin/killall dnsmasq
 
   - name: start dnsmasq
-    shell: docker exec vcpe-{{ vlan_ids[0] }} /usr/sbin/service dnsmasq start
+    shell: docker exec vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} /usr/sbin/service dnsmasq start
 
   - name: restart vcpe
-    shell: service vcpe-{{ vlan_ids[0] }} stop; sleep 1; service vcpe-{{ vlan_ids[0] }} start
+    shell: service vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} stop; sleep 1; service vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} start
 
   - name: stop vcpe
-    service: name=vcpe-{{ vlan_ids[0] }} state=stopped
+    service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=stopped
 
   - name: remove container
-    docker: name=vcpe-{{ vlan_ids[0] }} state=absent image=docker-vcpe
+    docker: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=absent image=docker-vcpe
 
   - name: start vcpe
-    service: name=vcpe-{{ vlan_ids[0] }} state=started
+    service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=started
diff --git a/xos/observers/vcpe/steps/sync_vcpetenant_new.yaml b/xos/observers/vcpe/steps/sync_vcpetenant_new.yaml
new file mode 100644
index 0000000..e96f3c5
--- /dev/null
+++ b/xos/observers/vcpe/steps/sync_vcpetenant_new.yaml
@@ -0,0 +1,85 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: {{ username }}
+  sudo: yes
+  vars:
+      container_name: {{ container_name }}
+      cdn_enable: {{ cdn_enable }}
+      dnsdemux_ip: {{ dnsdemux_ip }}
+      firewall_enable: {{ firewall_enable }}
+      url_filter_enable: {{ url_filter_enable }}
+      vlan_ids:
+        {% for vlan_id in vlan_ids %}
+        - {{ vlan_id }}
+        {% endfor %}
+      c_tags:
+        {% for c_tag in c_tags %}
+        - {{ c_tag }}
+        {% endfor %}
+      s_tags:
+        {% for s_tag in s_tags %}
+        - {{ s_tag }}
+        {% endfor %}
+      firewall_rules:
+        {% for firewall_rule in firewall_rules.split("\n") %}
+        - {{ firewall_rule }}
+        {% endfor %}
+      cdn_prefixes:
+        {% for prefix in cdn_prefixes %}
+        - {{ prefix }}
+        {% endfor %}
+      bbs_addrs:
+        {% for bbs_addr in bbs_addrs %}
+        - {{ bbs_addr }}
+        {% endfor %}
+      nat_ip: {{ nat_ip }}
+      nat_mac: {{ nat_mac }}
+      lan_ip: {{ lan_ip }}
+      lan_mac: {{ lan_mac }}
+      wan_ip: {{ wan_ip }}
+      wan_mac: {{ wan_mac }}
+      wan_container_mac: {{ wan_container_mac }}
+      wan_next_hop: 10.0.1.253   # FIX ME
+      private_ip: {{ private_ip }}
+      private_mac: {{ private_mac }}
+      hpc_client_ip: {{ hpc_client_ip }}
+      hpc_client_mac: {{ hpc_client_mac }}
+      keystone_tenant_id: {{ keystone_tenant_id }}
+      keystone_user_id: {{ keystone_user_id }}
+      rabbit_user: {{ rabbit_user }}
+      rabbit_password: {{ rabbit_password }}
+      rabbit_host: {{ rabbit_host }}
+
+  tasks:
+  - name: vCPE basic dnsmasq config
+    copy: src=/opt/xos/observers/vcpe/files/vcpe.dnsmasq dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/vcpe.conf owner=root group=root
+    notify:
+    - stop dnsmasq
+    - start dnsmasq
+
+  - name: dnsmasq config
+    template: src=/opt/xos/observers/vcpe/templates/dnsmasq_servers.j2 dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/servers.conf owner=root group=root
+    notify:
+    - stop dnsmasq
+    - start dnsmasq
+
+  handlers:
+  - name: stop dnsmasq
+    shell: docker exec {{ container_name }} /usr/bin/killall dnsmasq
+
+  - name: start dnsmasq
+    shell: docker exec {{ container_name }} /usr/sbin/service dnsmasq start
+
+  - name: restart vcpe
+    shell: service {{ container_name }} stop; sleep 1; service vcpe-{{ vlan_ids[0] }} start
+
+  - name: stop vcpe
+    service: name={{ container_name }} state=stopped
+
+  - name: remove container
+    docker: name={{ container_name }} state=absent image=docker-vcpe
+
+  - name: start vcpe
+    service: name={{ container_name }} state=started
diff --git a/xos/observers/vcpe/templates/start-vcpe.sh.j2 b/xos/observers/vcpe/templates/start-vcpe.sh.j2
index a3533fa..c4128f3 100755
--- a/xos/observers/vcpe/templates/start-vcpe.sh.j2
+++ b/xos/observers/vcpe/templates/start-vcpe.sh.j2
@@ -8,7 +8,9 @@
 iptables -L > /dev/null
 ip6tables -L > /dev/null
 
-VCPE=vcpe-{{ vlan_ids[0] }}
+STAG={{ s_tags[0] }}
+CTAG={{ c_tags[0] }}
+VCPE=vcpe-$STAG-$CTAG
 
 docker inspect $VCPE > /dev/null 2>&1
 if [ "$?" == 1 ]
@@ -23,14 +25,23 @@
 WAN_IFACE=$( mac_to_iface {{ wan_mac }} )
 docker exec $VCPE ifconfig eth0 >> /dev/null || pipework $WAN_IFACE -i eth0 $VCPE {{ wan_ip }}/24@{{ wan_next_hop }} {{ wan_container_mac }}
 
-LAN_IFACE=$( mac_to_iface {{ lan_mac }} )
-docker exec $VCPE ifconfig eth1 >> /dev/null || pipework $LAN_IFACE -i eth1 $VCPE 192.168.0.1/24 @{{ vlan_ids[0] }}
+# LAN_IFACE=$( mac_to_iface {{ lan_mac }} )
+# Need to encapsulate VLAN traffic so that Neutron doesn't eat it
+# Assumes that br-lan has been set up appropriately by a previous step
+LAN_IFACE=br-lan
+ifconfig $LAN_IFACE >> /dev/null
+if [ "$?" == 0 ]
+then
+    ifconfig $LAN_IFACE.$STAG >> /dev/null || ip link add link $LAN_IFACE name $LAN_IFACE.$STAG type vlan id $STAG
+    ifconfig $LAN_IFACE.$STAG up
+    docker exec $VCPE ifconfig eth1 >> /dev/null || pipework $LAN_IFACE.$STAG -i eth1 $VCPE 192.168.0.1/24 @$CTAG
+fi
 
-HPC_IFACE=$( mac_to_iface {{ hpc_client_mac }} )
-docker exec $VCPE ifconfig eth2 >> /dev/null || pipework $HPC_IFACE -i eth2 $VCPE {{ hpc_client_ip }}/24
+#HPC_IFACE=$( mac_to_iface {{ hpc_client_mac }} )
+#docker exec $VCPE ifconfig eth2 >> /dev/null || pipework $HPC_IFACE -i eth2 $VCPE {{ hpc_client_ip }}/24
 
 # Make sure VM's eth0 (hpc_client) has no IP address
-ifconfig $HPC_IFACE 0.0.0.0
+#ifconfig $HPC_IFACE 0.0.0.0
 
 # Now can start up dnsmasq
 docker exec $VCPE service dnsmasq start
diff --git a/xos/observers/vcpe/templates/vcpe.conf.j2 b/xos/observers/vcpe/templates/vcpe.conf.j2
index 1951322..fa7885e 100644
--- a/xos/observers/vcpe/templates/vcpe.conf.j2
+++ b/xos/observers/vcpe/templates/vcpe.conf.j2
@@ -6,5 +6,5 @@
 respawn
 
 script
-  /usr/local/sbin/start-vcpe-{{ vlan_ids[0] }}.sh
+  /usr/local/sbin/start-vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}.sh
 end script
diff --git a/xos/openstack_observer/event_loop.py b/xos/openstack_observer/event_loop.py
index 17d5c7a..0ac626b 100644
--- a/xos/openstack_observer/event_loop.py
+++ b/xos/openstack_observer/event_loop.py
@@ -27,6 +27,24 @@
 from toposort import toposort
 from observer.error_mapper import *
 from openstack_observer.openstacksyncstep import OpenStackSyncStep
+from observer.steps.sync_object import SyncObject
+
+# Load app models
+
+try:
+    app_module_names = Config().observer_applist.split(',')
+except AttributeError:
+    app_module_names = []
+
+if (type(app_module_names)!=list):
+    app_module_names=[app_module_names]
+
+app_modules = []
+
+for m in app_module_names:
+    model_path = m+'.models'
+    module = __import__(model_path,fromlist=[m])
+    app_modules.append(module)
 
 
 debug_mode = False
@@ -166,6 +184,7 @@
 					provides_dict[m.__name__]=[s.__name__]
 
 		step_graph = {}
+                phantom_steps = []
 		for k,v in self.model_dependency_graph.items():
 			try:
 				for source in provides_dict[k]:
@@ -183,7 +202,12 @@
 									step_graph[source]=[dest]
 						except KeyError:
 							if (not provides_dict.has_key(m)):
-								step_graph[source]='#%s'%m	
+                                                                try:
+								    step_graph[source]+=['#%s'%m]
+                                                                except:
+                                                                    step_graph[source]=['#%s'%m]
+
+                                                                phantom_steps+=['#%s'%m]
 							pass
 					
 			except KeyError:
@@ -196,7 +220,8 @@
 
 		pp = pprint.PrettyPrinter(indent=4)
                 logger.info(pp.pformat(step_graph))
-		self.ordered_steps = toposort(self.dependency_graph, map(lambda s:s.__name__,self.sync_steps))
+		self.ordered_steps = toposort(self.dependency_graph, phantom_steps+map(lambda s:s.__name__,self.sync_steps))
+		self.ordered_steps = [i for i in self.ordered_steps if i!='SyncObject']
 
 		logger.info("Order of steps=%s" % self.ordered_steps)
 
@@ -245,15 +270,31 @@
 			for e in self.ordered_steps:
 				self.last_deletion_run_times[e]=0
 
+        def lookup_step_class(self,s):
+		if ('#' in s):
+			return SyncObject
+		else:
+			step = self.step_lookup[s]
+		return step
+
 	def lookup_step(self,s):
 		if ('#' in s):
 			objname = s[1:]
 			so = SyncObject()
-			so.provides=[globals()[objname]]
-			so.observes=globals()[objname]
+			
+                        try:
+			    obj = globals()[objname]
+                        except:
+                            for m in app_modules:
+                                if (hasattr(m,objname)):
+                                    obj = getattr(m,objname)
+
+			so.provides=[obj]
+			so.observes=[obj]
 			step = so
 		else:
-			step = self.step_lookup[s]
+			step_class = self.step_lookup[s]
+                        step = step_class(driver=self.driver,error_map=self.error_mapper)
 		return step
 			
 	def save_run_times(self):
@@ -275,7 +316,7 @@
 
 	def sync(self, S, deletion):
             try:
-		step = self.lookup_step(S)
+		step = self.lookup_step_class(S)
 		start_time=time.time()
 
                 logger.info("Starting to work on step %s, deletion=%s" % (step.__name__, str(deletion)))
@@ -324,16 +365,20 @@
 			self.failed_steps.append(step)
 			my_status = STEP_STATUS_KO
 		else:
-			sync_step = step(driver=self.driver,error_map=self.error_mapper)
+			sync_step = self.lookup_step(S)
 			sync_step. __name__= step.__name__
 			sync_step.dependencies = []
 			try:
 				mlist = sync_step.provides
 
-				for m in mlist:
-				        lst =  self.model_dependency_graph[m.__name__]
-			                nlst = map(lambda(a,b):b,lst)
-					sync_step.dependencies.extend(nlst)
+                                try:
+                                    for m in mlist:
+                                            lst =  self.model_dependency_graph[m.__name__]
+                                            nlst = map(lambda(a,b):b,lst)
+                                            sync_step.dependencies.extend(nlst)
+                                except Exception,e:
+                                    raise e
+
 			except KeyError:
 				pass
 			sync_step.debug_mode = debug_mode
diff --git a/xos/openstack_observer/steps/sync_container.py b/xos/openstack_observer/steps/sync_container.py
index de4a2ce..272e5f8 100644
--- a/xos/openstack_observer/steps/sync_container.py
+++ b/xos/openstack_observer/steps/sync_container.py
@@ -6,9 +6,10 @@
 import time
 from django.db.models import F, Q
 from xos.config import Config
-from observer.syncstep import SyncStep
+from observers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+from observer.syncstep import SyncStep, DeferredException
 from observer.ansible import run_template_ssh
-from core.models import Service, Slice, Container
+from core.models import Service, Slice, Instance
 from services.onos.models import ONOSService, ONOSApp
 from util.logger import Logger, logging
 
@@ -18,88 +19,102 @@
 
 logger = Logger(level=logging.INFO)
 
-class SyncContainer(SyncStep):
-    provides=[Container]
-    observes=Container
+class SyncContainer(SyncInstanceUsingAnsible):
+    provides=[Instance]
+    observes=Instance
     requested_interval=0
     template_name = "sync_container.yaml"
 
     def __init__(self, *args, **kwargs):
         super(SyncContainer, self).__init__(*args, **kwargs)
 
-#    def fetch_pending(self, deleted):
-#        if (not deleted):
-#            objs = ONOSService.get_service_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
-#        else:
-#            objs = ONOSService.get_deleted_service_objects()
-#
-#        return objs
-
-    def get_node(self,o):
-        return o.node
-
-    def get_node_key(self, node):
-        return "/root/setup/node_key"
-        #return "/opt/xos/node-key"
+    def fetch_pending(self, deletion=False):
+        objs = super(SyncContainer, self).fetch_pending(deletion)
+        objs = [x for x in objs if x.isolation in ["container", "container_vm"]]
+        return objs
 
     def get_instance_port(self, container_port):
-        print container_port
-        print container_port.network
         for p in container_port.network.links.all():
-            if (p.instance) and (p.instance.node == container_port.container.node) and (p.mac):
+            if (p.instance) and (p.instance.isolation=="vm") and (p.instance.node == container_port.instance.node) and (p.mac):
                 return p
         return None
 
+    def get_parent_port_mac(self, instance, port):
+        if not instance.parent:
+            raise Exception("instance has no parent")
+        for parent_port in instance.parent.ports.all():
+            if parent_port.network == port.network:
+                if not parent_port.mac:
+                     raise DeferredException("parent port on network %s does not have mac yet" % parent_port.network.name)
+                return parent_port.mac
+        raise Exception("failed to find corresponding parent port for network %s" % port.network.name)
+
     def get_ports(self, o):
         i=0
         ports = []
         for port in o.ports.all():
-            if not port.mac:
-                raise Exception("Port on network %s is not yet ready" % port.network.name)
+            if (not port.ip):
+                # 'unmanaged' ports may have an ip, but no mac
+                # XXX: are there any ports that have a mac but no ip?
+                raise DeferredException("Port on network %s is not yet ready" % port.network.name)
 
             pd={}
-            pd["device"] = "eth%d" % i
-            pd["mac"] = port.mac
-            pd["ip"] = port.ip
+            pd["mac"] = port.mac or ""
+            pd["ip"] = port.ip or ""
+            pd["xos_network_id"] = port.network.id
 
-            instance_port = self.get_instance_port(port)
-            if not instance_port:
-                raise Exception("No instance on slice for port on network %s" % port.network.name)
+            if port.network.name == "wan_network":
+                if port.ip:
+                    (a, b, c, d) = port.ip.split('.')
+                    pd["mac"] = "02:42:%02x:%02x:%02x:%02x" % (int(a), int(b), int(c), int(d))
 
-            pd["snoop_instance_mac"] = instance_port.mac
-            pd["snoop_instance_id"] = instance_port.instance.instance_id
+
+            if o.isolation == "container":
+                # container on bare metal
+                instance_port = self.get_instance_port(port)
+                if not instance_port:
+                    raise DeferredException("No instance on slice for port on network %s" % port.network.name)
+
+                pd["snoop_instance_mac"] = instance_port.mac
+                pd["snoop_instance_id"] = instance_port.instance.instance_id
+                pd["src_device"] = ""
+                pd["bridge"] = "br-int"
+            else:
+                # container in VM
+                pd["snoop_instance_mac"] = ""
+                pd["snoop_instance_id"] = ""
+                pd["parent_mac"] = self.get_parent_port_mac(o, port)
+                pd["bridge"] = ""
+
+            for (k,v) in port.get_parameters().items():
+                pd[k] = v
 
             ports.append(pd)
+
+        # for any ports that don't have a device, assign one
+        used_ports = [x["device"] for x in ports if ("device" in x)]
+        avail_ports = ["eth%d"%i for i in range(0,64) if ("eth%d"%i not in used_ports)]
+        for port in ports:
+            if not port.get("device",None):
+                port["device"] = avail_ports.pop(0)
+
         return ports
 
     def get_extra_attributes(self, o):
         fields={}
         fields["ansible_tag"] = "container-%s" % str(o.id)
-        fields["baremetal_ssh"] = True
-        fields["instance_name"] = "rootcontext"
-        fields["container_name"] = "%s-%s" % (o.slice.name, str(o.id))
-        fields["docker_image"] = o.docker_image
-        fields["username"] = "root"
+        fields["docker_image"] = o.image.name
         fields["ports"] = self.get_ports(o)
+        if o.volumes:
+            fields["volumes"] = [x.strip() for x in o.volumes.split(",")]
+        else:
+            fields["volumes"] = ""
         return fields
 
-    def sync_fields(self, o, fields):
-        self.run_playbook(o, fields)
-
     def sync_record(self, o):
         logger.info("sync'ing object %s" % str(o))
 
-        node = self.get_node(o)
-        node_key_name = self.get_node_key(node)
-
-        if not os.path.exists(node_key_name):
-            raise Exception("Node key %s does not exist" % node_key_name)
-
-        node_key = file(node_key_name).read()
-
-        fields = { "hostname": node.name,
-                   "private_key": node_key,
-                 }
+        fields = self.get_ansible_fields(o)
 
         # If 'o' defines a 'sync_attributes' list, then we'll copy those
         # attributes into the Ansible recipe's field list automatically.
@@ -109,14 +124,33 @@
 
         fields.update(self.get_extra_attributes(o))
 
-        self.sync_fields(o, fields)
+        self.run_playbook(o, fields)
+
+        o.instance_id = fields["container_name"]
+        o.instance_name = fields["container_name"]
 
         o.save()
 
-    def run_playbook(self, o, fields):
+    def delete_record(self, o):
+        logger.info("delete'ing object %s" % str(o))
+
+        fields = self.get_ansible_fields(o)
+
+        # If 'o' defines a 'sync_attributes' list, then we'll copy those
+        # attributes into the Ansible recipe's field list automatically.
+        if hasattr(o, "sync_attributes"):
+            for attribute_name in o.sync_attributes:
+                fields[attribute_name] = getattr(o, attribute_name)
+
+        fields.update(self.get_extra_attributes(o))
+
+        self.run_playbook(o, fields, "teardown_container.yaml")
+
+    def run_playbook(self, o, fields, template_name=None):
+        if not template_name:
+            template_name = self.template_name
         tStart = time.time()
-        run_template_ssh(self.template_name, fields, path="container")
+        run_template_ssh(template_name, fields, path="container")
         logger.info("playbook execution time %d" % int(time.time()-tStart))
 
-    def delete_record(self, m):
-        pass
+
diff --git a/xos/openstack_observer/steps/sync_container.yaml b/xos/openstack_observer/steps/sync_container.yaml
index a707d0b..b60ffb8 100644
--- a/xos/openstack_observer/steps/sync_container.yaml
+++ b/xos/openstack_observer/steps/sync_container.yaml
@@ -11,10 +11,20 @@
     ports:
     {% for port in ports %}
        - device: {{ port.device }}
-         mac: {{ port.mac }}
+         xos_network_id: {{ port.xos_network_id }}
+         mac: {{ port.mac|default("") }}
          ip: {{ port.ip }}
          snoop_instance_mac: {{ port.snoop_instance_mac }}
          snoop_instance_id: {{ port.snoop_instance_id }}
+         parent_mac: {{ port.parent_mac|default("") }}
+         s_tag: {{ port.s_tag|default("")  }}
+         c_tag: {{ port.c_tag|default("") }}
+         next_hop: {{ port.next_hop|default("") }}
+         bridge: {{ port.bridge }}
+    {% endfor %}
+    volumes:
+    {% for volume in volumes %}
+       - {{ volume }}
     {% endfor %}
 
   tasks:
@@ -74,6 +84,10 @@
 #      state: running
 #      image: {{ docker_image }}
 
+  - name: check if systemd is installed
+    stat: path=/usr/bin/systemctl
+    register: systemctl
+
   - name: container upstart
     template: src=/opt/xos/openstack_observer/templates/container.conf.j2 dest=/etc/init/container-{{ container_name }}.conf
 
@@ -83,8 +97,18 @@
   - name: container startup script
     template: src=/opt/xos/openstack_observer/templates/start-container.sh.j2 dest=/usr/local/sbin/start-container-{{ container_name }}.sh mode=0755
 
+  - name: container teardown script
+    template: src=/opt/xos/openstack_observer/templates/stop-container.sh.j2 dest=/usr/local/sbin/stop-container-{{ container_name }}.sh mode=0755
+
   - name: restart systemd
     shell: systemctl daemon-reload
+    when: systemctl.stat.exists == True
+
+{% if ports %}
+  - name: make sure bridges are setup
+    shell: ifconfig {{ '{{' }} item.bridge {{ '}}' }}
+    with_items: "ports"
+{% endif %}
 
   - name: Make sure container is running
     service: name=container-{{ container_name }} state=started
diff --git a/xos/openstack_observer/steps/sync_instances.py b/xos/openstack_observer/steps/sync_instances.py
index 1209448..815c83e 100644
--- a/xos/openstack_observer/steps/sync_instances.py
+++ b/xos/openstack_observer/steps/sync_instances.py
@@ -22,6 +22,11 @@
     observes=Instance
     playbook='sync_instances.yaml'
 
+    def fetch_pending(self, deletion=False):
+        objs = super(SyncInstances, self).fetch_pending(deletion)
+        objs = [x for x in objs if x.isolation=="vm"]
+        return objs
+
     def get_userdata(self, instance, pubkeys):
         userdata = '#cloud-config\n\nopencloud:\n   slicename: "%s"\n   hostname: "%s"\n   restapi_hostname: "%s"\n   restapi_port: "%s"\n' % (instance.slice.name, instance.node.name, RESTAPI_HOSTNAME, str(RESTAPI_PORT))
         userdata += 'ssh_authorized_keys:\n'
@@ -61,7 +66,7 @@
             if controller_network.network.template.visibility == 'private' and \
                controller_network.network.template.translation == 'none':
                    if not controller_network.net_id:
-                        raise DeferredException("Private Network %s has no id; Try again later" % controller_network.network.name)
+                        raise DeferredException("Instance %s Private Network %s has no id; Try again later" % (instance, controller_network.network.name))
                    nics.append(controller_network.net_id)
 
         # now include network template
diff --git a/xos/openstack_observer/steps/sync_object.py b/xos/openstack_observer/steps/sync_object.py
index 5e70464..a289c95 100644
--- a/xos/openstack_observer/steps/sync_object.py
+++ b/xos/openstack_observer/steps/sync_object.py
@@ -17,4 +17,4 @@
     observes=[] # Caller fills this in
 
     def sync_record(self, r):
-        raise Exception('Waiting for Service dependency')
+        raise DeferredException('Waiting for Service dependency: %r'%r)
diff --git a/xos/openstack_observer/steps/sync_ports.py b/xos/openstack_observer/steps/sync_ports.py
index 7b20d29..bfdde8c 100644
--- a/xos/openstack_observer/steps/sync_ports.py
+++ b/xos/openstack_observer/steps/sync_ports.py
@@ -144,14 +144,11 @@
 
         # For ports that were created by the user, find that ones
         # that don't have neutron ports, and create them.
-        for port in Port.objects.filter(Q(port_id__isnull=True), Q(instance__isnull=False) | Q(container__isnull=False)):
+        for port in Port.objects.filter(Q(port_id__isnull=True), Q(instance__isnull=False) ):
             logger.info("XXX working on port %s" % port)
-            if port.instance:
-                controller = port.instance.node.site_deployment.controller
-                slice = port.instance.slice
-            else:
-                controller = port.container.node.site_deployment.controller
-                slice = port.container.slice
+            controller = port.instance.node.site_deployment.controller
+            slice = port.instance.slice
+
             if controller:
                 cn=port.network.controllernetworks.filter(controller=controller)
                 if not cn:
@@ -188,10 +185,6 @@
                     if neutron_port["fixed_ips"]:
                         port.ip = neutron_port["fixed_ips"][0]["ip_address"]
                     port.mac = neutron_port["mac_address"]
-
-                    neutron_network = driver.shell.quantum.list_networks(cn.net_id)["networks"][0]
-                    if "provider:segmentation_id" in neutron_network:
-                        port.segmentation_id = neutron_network["provider:segmentation_id"]
                 except:
                     logger.log_exc("failed to create neutron port for %s" % port)
                     continue
diff --git a/xos/openstack_observer/steps/teardown_container.yaml b/xos/openstack_observer/steps/teardown_container.yaml
new file mode 100644
index 0000000..5cabc78
--- /dev/null
+++ b/xos/openstack_observer/steps/teardown_container.yaml
@@ -0,0 +1,33 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: {{ username }}
+  sudo: yes
+
+  vars:
+    container_name: {{ container_name }}
+    docker_image: {{ docker_image }}
+    ports:
+    {% for port in ports %}
+       - device: {{ port.device }}
+         xos_network_id: {{ port.xos_network_id }}
+         mac: {{ port.mac|default("") }}
+         ip: {{ port.ip }}
+         snoop_instance_mac: {{ port.snoop_instance_mac }}
+         snoop_instance_id: {{ port.snoop_instance_id }}
+         parent_mac: {{ port.parent_mac|default("") }}
+         s_tag: {{ port.s_tag|default("")  }}
+         c_tag: {{ port.c_tag|default("") }}
+         next_hop: {{ port.next_hop|default("") }}
+         bridge: {{ port.bridge }}
+    {% endfor %}
+    volumes:
+    {% for volume in volumes %}
+       - {{ volume }}
+    {% endfor %}
+
+  tasks:
+  - name: Make sure container is stopped
+    service: name=container-{{ container_name }} state=stopped
+
diff --git a/xos/openstack_observer/syncstep.py b/xos/openstack_observer/syncstep.py
index 7accbfa..21327d7 100644
--- a/xos/openstack_observer/syncstep.py
+++ b/xos/openstack_observer/syncstep.py
@@ -8,6 +8,7 @@
 from core.models import *
 from django.db import reset_queries
 from observer.ansible import *
+from dependency_walker import *
 
 import json
 import time
@@ -101,12 +102,26 @@
         for dep in self.dependencies:
             peer_name = dep[0].lower() + dep[1:]    # django names are camelCased with the first letter lower
 
+            peer_objects=[]
             try:
-                peer_object = deepgetattr(obj, peer_name)
+                peer_names = plural(peer_name)
+                peer_object_list=[]
+
                 try:
-                    peer_objects = peer_object.all()
-                except AttributeError:
-                    peer_objects = [peer_object]
+                    peer_object_list.append(deepgetattr(obj, peer_name))
+                except:
+                    pass
+
+                try:
+                    peer_object_list.append(deepgetattr(obj, peer_names))
+                except:
+                    pass
+
+                for peer_object in peer_object_list:
+                    try:
+                        peer_objects.extend(peer_object.all())
+                    except AttributeError:
+                        peer_objects.append(peer_object)
             except:
                 peer_objects = []
 
@@ -174,6 +189,9 @@
                 pass
 
     def call(self, failed=[], deletion=False):
+        #if ('Instance' in self.__class__.__name__):
+        #    pdb.set_trace()
+
         pending = self.fetch_pending(deletion)
 
         for o in pending:
diff --git a/xos/openstack_observer/templates/container.conf.j2 b/xos/openstack_observer/templates/container.conf.j2
index d3ef42d..7cbb880 100644
--- a/xos/openstack_observer/templates/container.conf.j2
+++ b/xos/openstack_observer/templates/container.conf.j2
@@ -6,6 +6,9 @@
 respawn
 
 script
-  /usr/local/sbin/start-container-{{ container_name }}.sh
+  /usr/local/sbin/start-container-{{ container_name }}.sh ATTACH
 end script
 
+post-stop script
+  /usr/local/sbin/stop-container-{{ container_name }}.sh
+end script
\ No newline at end of file
diff --git a/xos/openstack_observer/templates/container.service.j2 b/xos/openstack_observer/templates/container.service.j2
index 9e2b83c..817d6d7 100644
--- a/xos/openstack_observer/templates/container.service.j2
+++ b/xos/openstack_observer/templates/container.service.j2
@@ -3,7 +3,9 @@
 After=docker.service
 
 [Service]
-ExecStart=/bin/bash -c "/usr/local/sbin/start-container-{{ container_name }}.sh"
+ExecStart=/bin/bash -c "/usr/local/sbin/start-container-{{ container_name }}.sh ATTACH"
+ExecStop=/bin/bash -c "/usr/local/sbin/stop-container-{{ container_name }}.sh"
+SuccessExitStatus=0 137
 
 [Install]
 WantedBy=multi-user.target
diff --git a/xos/openstack_observer/templates/start-container.sh.j2 b/xos/openstack_observer/templates/start-container.sh.j2
index 5656992..260666c 100644
--- a/xos/openstack_observer/templates/start-container.sh.j2
+++ b/xos/openstack_observer/templates/start-container.sh.j2
@@ -6,39 +6,125 @@
 CONTAINER={{ container_name }}
 IMAGE={{ docker_image }}
 
+function mac_to_iface {
+    PARENT_MAC=$1
+    ifconfig|grep $PARENT_MAC| awk '{print $1}'|grep -v '\.'
+}
+
+function encapsulate_stag {
+    LAN_IFACE=$1
+    STAG=$2
+    ifconfig $LAN_IFACE >> /dev/null
+    if [ "$?" == 0 ]; then
+        STAG_IFACE=$LAN_IFACE.$STAG
+        ifconfig $LAN_IFACE up
+        ifconfig $STAG_IFACE
+        if [ "$?" == 0 ]; then
+            echo $STAG_IFACE is already created
+        else
+            ifconfig $STAG_IFACE >> /dev/null || ip link add link $LAN_IFACE name $STAG_IFACE type vlan id $STAG
+        fi
+            ifconfig $STAG_IFACE up
+    else
+        echo There is no $LAN_IFACE. Aborting.
+        exit -1
+    fi
+}
+
+
+{% if volumes %}
+{% for volume in volumes %}
+DEST_DIR=/var/container_volumes/$CONTAINER/{{ volume }}
+mkdir -p $DEST_DIR
+VOLUME_ARGS="$VOLUME_ARGS -v $DEST_DIR:{{ volume }}"
+{% endfor %}
+{% endif %}
+
 docker inspect $CONTAINER > /dev/null 2>&1
 if [ "$?" == 1 ]
 then
     docker pull $IMAGE
-    docker run -d --name=$CONTAINER --privileged=true --net=none $IMAGE
+    docker run -d --name=$CONTAINER --privileged=true --net=none $VOLUME_ARGS $IMAGE
 else
     docker start $CONTAINER
 fi
 
 {% if ports %}
 {% for port in ports %}
+
+{% if port.next_hop %}
+NEXTHOP_ARG="@{{ port.next_hop }}"
+{% else %}
+NEXTHOP_ARG=""
+{% endif %}
+
+{% if port.c_tag %}
+CTAG_ARG="@{{ port.c_tag }}"
+{% else %}
+CTAG_ARG=""
+{% endif %}
+
+{% if port.parent_mac %}
+# container-in-VM
+SRC_DEV=$( mac_to_iface "{{ port.parent_mac }}" )
+CMD="docker exec $CONTAINER ifconfig $SRC_DEV >> /dev/null || pipework $SRC_DEV -i {{ port.device }} $CONTAINER {{ port.ip }}/24$NEXTHOP_ARG {{ port.mac }} $CTAG_ARG"
+echo $CMD
+eval $CMD
+
+{% else %}
+# container-on-metal
 IP="{{ port.ip }}"
+{% if port.mac %}
 MAC="{{ port.mac }}"
+{% else %}
+MAC=""
+{% endif %}
+
 DEVICE="{{ port.device }}"

+BRIDGE="{{ port.bridge }}"

+{% if port.s_tag %}

+# This is intended for lan_network. Assume that BRIDGE is set to br_lan. We

+# create a device that strips off the S-TAG.

+STAG="{{ port.s_tag }}"

+encapsulate_stag $BRIDGE $STAG

+SRC_DEV=$STAG_IFACE

+{% else %}

+# This is for a standard neutron private network. We use a donor VM to setup

+# openvswitch for us, and we snoop at its devices and create a tap using the

+# same settings.

+XOS_NETWORK_ID="{{ port.xos_network_id }}"

 INSTANCE_MAC="{{ port.snoop_instance_mac }}"
 INSTANCE_ID="{{ port.snoop_instance_id }}"
 INSTANCE_TAP=`virsh domiflist $INSTANCE_ID | grep -i $INSTANCE_MAC | awk '{print $1}'`
 INSTANCE_TAP=${INSTANCE_TAP:3}
 VLAN_ID=`ovs-vsctl show | grep -i -A 1 port.*$INSTANCE_TAP | grep -i tag | awk '{print $2}'`
-TAP="con`echo $CONTAINER_$DEVICE|md5sum|awk '{print $1}'`"
-TAP=${TAP:0:12}
+# One tap for all containers per XOS/neutron network. Included the VLAN_ID in the
+# hash, to cover the case where XOS is reinstalled and the XOS network ids
+# get reused.
+TAP="con`echo ${XOS_NETWORK_ID}_$VLAN_ID|md5sum|awk '{print $1}'`"
+TAP=${TAP:0:10}
 echo im=$INSTANCE_MAC ii=$INSTANCE_ID it=$INSTANCE_TAP vlan=$VLAN_ID tap=$TAP con=$CONTAINER dev=$DEVICE mac=$MAC
 ovs-vsctl show | grep -i $TAP
 if [[ $? == 1 ]]; then
     echo creating tap
-    ovs-vsctl add-port br-int $TAP tag=$VLAN_ID -- set interface $TAP type=internal
+    ovs-vsctl add-port $BRIDGE $TAP tag=$VLAN_ID -- set interface $TAP type=internal
 else
     echo tap exists
 fi
+SRC_DEV=$TAP
+{% endif %}
 
-docker exec $CONTAINER ifconfig $DEVICE >> /dev/null || pipework $TAP -i $DEVICE $CONTAINER $IP/24 $MAC
+CMD="docker exec $CONTAINER ifconfig $DEVICE >> /dev/null || pipework $SRC_DEV -i $DEVICE $CONTAINER $IP/24$NEXTHOP_ARG $MAC $CTAG_ARG"
+echo $CMD
+eval $CMD
+{% endif %}
 {% endfor %}
 {% endif %}
 
 # Attach to container
-# docker start -a $CONTAINER
+# (this is only done when using upstart, since upstart expects to be attached
+#  to a running service)
+if [[ "$1" == "ATTACH" ]]; then
+    docker start -a $CONTAINER
+fi
+
diff --git a/xos/openstack_observer/templates/stop-container.sh.j2 b/xos/openstack_observer/templates/stop-container.sh.j2
new file mode 100644
index 0000000..9cabb00
--- /dev/null
+++ b/xos/openstack_observer/templates/stop-container.sh.j2
@@ -0,0 +1,4 @@
+CONTAINER={{ container_name }}
+
+docker stop $CONTAINER
+docker rm $CONTAINER
diff --git a/xos/tosca/custom_types/xos.m4 b/xos/tosca/custom_types/xos.m4
index a661af7..4f726fc 100644
--- a/xos/tosca/custom_types/xos.m4
+++ b/xos/tosca/custom_types/xos.m4
@@ -47,6 +47,10 @@
                 type: string
                 required: false
                 description: Public key to install into Instances to allows Services to SSH into them.
+            private_key_fn:
+                type: string
+                required: false
+                description: Location of private key file
             versionNumber:
                 type: string
                 required: false
@@ -136,6 +140,9 @@
             dependencies:
                 type: string
                 required: false
+            config_network-cfg.json:
+                type: string
+                required: false
 
     tosca.nodes.VCPEService:
         description: >
@@ -233,10 +240,14 @@
             specific vlan_id.
         properties:
             xos_base_tenant_props
-            vlan_id:
+            s_tag:
                 type: string
                 required: false
-                description: vlan_id for connection to subscriber household.
+                description: s_tag, identifies which volt port
+            c_tag:
+                type: string
+                required: false
+                description: c_tag, identifies which subscriber within s_tag
 
     tosca.nodes.User:
         derived_from: tosca.nodes.Root
@@ -285,6 +296,17 @@
                 required: false
                 description: Indicates what page the user should go to on login.
 
+    tosca.nodes.NetworkParameterType:
+        derived_from: tosca.nodes.Root
+
+        description: >
+            An XOS network parameter type. May be applied to Networks and/or
+            Ports.
+
+        capabilities:
+            network_parameter_type:
+                type: tosca.capabilities.xos.NetworkParameterType
+
     tosca.nodes.NetworkTemplate:
         derived_from: tosca.nodes.Root
 
@@ -440,6 +462,10 @@
             image:
                 type: tosca.capabilities.xos.Image
         properties:
+            kind:
+                type: string
+                required: false
+                description: Type of image (container | VM)
             disk_format:
                 type: string
                 required: false
@@ -562,6 +588,10 @@
                 type: integer
                 default: 10
                 description: Quota of instances that this slice may create.
+            default_isolation:
+                type: string
+                required: false
+                description: default isolation to use when bringing up instances (default to 'vm')
 
     tosca.nodes.Node:
         derived_from: tosca.nodes.Root
@@ -591,6 +621,31 @@
                 required: false
                 description: URL to the dashboard
 
+    tosca.nodes.Compute.Container:
+      derived_from: tosca.nodes.Compute
+      description: >
+        The TOSCA Compute node represents a container on bare metal.
+      attributes:
+        private_address:
+          type: string
+        public_address:
+          type: string
+      capabilities:
+          host:
+             type: tosca.capabilities.Container
+          binding:
+             type: tosca.capabilities.network.Bindable
+          os:
+             type: tosca.capabilities.OperatingSystem
+          scalable:
+             type: tosca.capabilities.Scalable
+      requirements:
+        - local_storage:
+            capability: tosca.capabilities.Attachment
+            node: tosca.nodes.BlockStorage
+            relationship: tosca.relationships.AttachesTo
+            occurrences: [0, UNBOUNDED]
+
     tosca.relationships.MemberOfSlice:
         derived_from: tosca.relationships.Root
         valid_target_types: [ tosca.capabilities.xos.Slice ]
@@ -631,6 +686,10 @@
         derived_from: tosca.relationships.Root
         valid_target_types: [ tosca.capabilities.xos.Network ]
 
+    tosca.relationships.UsesImage:
+        derived_from: tosca.relationships.Root
+        valid_target_types: [ tosca.capabilities.xos.Image ]
+
     tosca.relationships.SupportsImage:
         derived_from: tosca.relationships.Root
         valid_target_types: [ tosca.capabilities.xos.Image ]
@@ -726,3 +785,7 @@
     tosca.capabilities.xos.DashboardView:
         derived_from: tosca.capabilities.Root
         description: An XOS DashboardView
+
+    tosca.capabilities.xos.NetworkParameterType:
+        derived_from: tosca.capabilities.Root
+        description: An XOS NetworkParameterType
diff --git a/xos/tosca/custom_types/xos.yaml b/xos/tosca/custom_types/xos.yaml
index 9170ecf..246c922 100644
--- a/xos/tosca/custom_types/xos.yaml
+++ b/xos/tosca/custom_types/xos.yaml
@@ -52,6 +52,10 @@
                 type: string
                 required: false
                 description: Public key to install into Instances to allows Services to SSH into them.
+            private_key_fn:
+                type: string
+                required: false
+                description: Location of private key file
             versionNumber:
                 type: string
                 required: false
@@ -90,6 +94,10 @@
                 type: string
                 required: false
                 description: Public key to install into Instances to allows Services to SSH into them.
+            private_key_fn:
+                type: string
+                required: false
+                description: Location of private key file
             versionNumber:
                 type: string
                 required: false
@@ -154,6 +162,9 @@
             dependencies:
                 type: string
                 required: false
+            config_network-cfg.json:
+                type: string
+                required: false
 
     tosca.nodes.VCPEService:
         description: >
@@ -188,6 +199,10 @@
                 type: string
                 required: false
                 description: Public key to install into Instances to allows Services to SSH into them.
+            private_key_fn:
+                type: string
+                required: false
+                description: Location of private key file
             versionNumber:
                 type: string
                 required: false
@@ -230,6 +245,10 @@
                 type: string
                 required: false
                 description: Public key to install into Instances to allows Services to SSH into them.
+            private_key_fn:
+                type: string
+                required: false
+                description: Location of private key file
             versionNumber:
                 type: string
                 required: false
@@ -272,6 +291,10 @@
                 type: string
                 required: false
                 description: Public key to install into Instances to allows Services to SSH into them.
+            private_key_fn:
+                type: string
+                required: false
+                description: Location of private key file
             versionNumber:
                 type: string
                 required: false
@@ -361,10 +384,14 @@
                 type: string
                 required: false
                 description: Service specific ID opaque to XOS but meaningful to service
-            vlan_id:
+            s_tag:
                 type: string
                 required: false
-                description: vlan_id for connection to subscriber household.
+                description: s_tag, identifies which volt port
+            c_tag:
+                type: string
+                required: false
+                description: c_tag, identifies which subscriber within s_tag
 
     tosca.nodes.User:
         derived_from: tosca.nodes.Root
@@ -413,6 +440,17 @@
                 required: false
                 description: Indicates what page the user should go to on login.
 
+    tosca.nodes.NetworkParameterType:
+        derived_from: tosca.nodes.Root
+
+        description: >
+            An XOS network parameter type. May be applied to Networks and/or
+            Ports.
+
+        capabilities:
+            network_parameter_type:
+                type: tosca.capabilities.xos.NetworkParameterType
+
     tosca.nodes.NetworkTemplate:
         derived_from: tosca.nodes.Root
 
@@ -579,6 +617,10 @@
             image:
                 type: tosca.capabilities.xos.Image
         properties:
+            kind:
+                type: string
+                required: false
+                description: Type of image (container | VM)
             disk_format:
                 type: string
                 required: false
@@ -734,6 +776,10 @@
                 type: integer
                 default: 10
                 description: Quota of instances that this slice may create.
+            default_isolation:
+                type: string
+                required: false
+                description: default isolation to use when bringing up instances (default to 'vm')
 
     tosca.nodes.Node:
         derived_from: tosca.nodes.Root
@@ -785,6 +831,31 @@
                 required: false
                 description: URL to the dashboard
 
+    tosca.nodes.Compute.Container:
+      derived_from: tosca.nodes.Compute
+      description: >
+        The TOSCA Compute node represents a container on bare metal.
+      attributes:
+        private_address:
+          type: string
+        public_address:
+          type: string
+      capabilities:
+          host:
+             type: tosca.capabilities.Container
+          binding:
+             type: tosca.capabilities.network.Bindable
+          os:
+             type: tosca.capabilities.OperatingSystem
+          scalable:
+             type: tosca.capabilities.Scalable
+      requirements:
+        - local_storage:
+            capability: tosca.capabilities.Attachment
+            node: tosca.nodes.BlockStorage
+            relationship: tosca.relationships.AttachesTo
+            occurrences: [0, UNBOUNDED]
+
     tosca.relationships.MemberOfSlice:
         derived_from: tosca.relationships.Root
         valid_target_types: [ tosca.capabilities.xos.Slice ]
@@ -825,6 +896,10 @@
         derived_from: tosca.relationships.Root
         valid_target_types: [ tosca.capabilities.xos.Network ]
 
+    tosca.relationships.UsesImage:
+        derived_from: tosca.relationships.Root
+        valid_target_types: [ tosca.capabilities.xos.Image ]
+
     tosca.relationships.SupportsImage:
         derived_from: tosca.relationships.Root
         valid_target_types: [ tosca.capabilities.xos.Image ]
@@ -920,3 +995,7 @@
     tosca.capabilities.xos.DashboardView:
         derived_from: tosca.capabilities.Root
         description: An XOS DashboardView
+
+    tosca.capabilities.xos.NetworkParameterType:
+        derived_from: tosca.capabilities.Root
+        description: An XOS NetworkParameterType
diff --git a/xos/tosca/resources/VOLTTenant.py b/xos/tosca/resources/VOLTTenant.py
index f00b515..20c4dfb 100644
--- a/xos/tosca/resources/VOLTTenant.py
+++ b/xos/tosca/resources/VOLTTenant.py
@@ -7,14 +7,14 @@
 import pdb
 
 from core.models import User
-from cord.models import VOLTTenant, VOLTService, CordSubscriberRoot
+from cord.models import VOLTTenant, VOLTService, CordSubscriberRoot, VOLT_KIND
 
 from xosresource import XOSResource
 
 class XOSVOLTTenant(XOSResource):
     provides = "tosca.nodes.VOLTTenant"
     xos_model = VOLTTenant
-    copyin_props = ["service_specific_id", "vlan_id"]
+    copyin_props = ["service_specific_id", "s_tag", "c_tag"]
     name_field = None
 
     def get_xos_args(self, throw_exception=True):
@@ -32,10 +32,12 @@
 
     def get_existing_objs(self):
         args = self.get_xos_args(throw_exception=False)
-        provider_service = args.get("provider", None)
+        provider_service = args.get("provider_service", None)
         service_specific_id = args.get("service_specific_id", None)
         if (provider_service) and (service_specific_id):
-            return [ self.get_xos_object(provider_service=provider_service, service_specific_id=service_specific_id) ]
+            existing_obj = self.get_xos_object(VOLTTenant, kind=VOLT_KIND, provider_service=provider_service, service_specific_id=service_specific_id, throw_exception=False)
+            if existing_obj:
+                return [ existing_obj ]
         return []
 
     def postprocess(self, obj):
diff --git a/xos/tosca/resources/compute.py b/xos/tosca/resources/compute.py
index f01a401..37ba390 100644
--- a/xos/tosca/resources/compute.py
+++ b/xos/tosca/resources/compute.py
@@ -13,7 +13,7 @@
 from xosresource import XOSResource
 
 class XOSCompute(XOSResource):
-    provides = "tosca.nodes.Compute"
+    provides = ["tosca.nodes.Compute", "tosca.nodes.Compute.Container"]
     xos_model = Instance
 
     def select_compute_node(self, user, v, hostname=None):
@@ -60,11 +60,15 @@
             colocate_host = colocate_instances[0].node.name
             self.info("colocating on %s" % colocate_host)
 
+        imageName = self.get_requirement("tosca.relationships.UsesImage", throw_exception=False)
+        if imageName:
+            image = self.get_xos_object(Image, name=imageName)
+
         capabilities = nodetemplate.get_capabilities()
         for (k,v) in capabilities.items():
-            if (k=="host"):
+            if (k=="host") and (not host):
                 (compute_node, flavor) = self.select_compute_node(self.user, v, hostname=colocate_host)
-            elif (k=="os"):
+            elif (k=="os") and (not image):
                 image = self.select_image(self.user, v)
 
         if not compute_node:
@@ -80,6 +84,9 @@
         args["node"] = compute_node
         args["deployment"] = compute_node.site_deployment.deployment
 
+        if nodetemplate.type == "tosca.nodes.Compute.Container":
+            args["isolation"] = "container"
+
         return args
 
     def create(self, name = None, index = None):
@@ -120,3 +127,4 @@
         else:
             return super(XOSCompute,self).get_existing_objs()
 
+
diff --git a/xos/tosca/resources/image.py b/xos/tosca/resources/image.py
index bdc66b6..938c5cd 100644
--- a/xos/tosca/resources/image.py
+++ b/xos/tosca/resources/image.py
@@ -15,7 +15,7 @@
 class XOSImage(XOSResource):
     provides = "tosca.nodes.Image"
     xos_model = Image
-    copyin_props = ["disk_format", "container_format", "path"]
+    copyin_props = ["disk_format", "container_format", "path", "kind"]
 
     def get_xos_args(self):
         args = super(XOSImage, self).get_xos_args()
diff --git a/xos/tosca/resources/networkparametertype.py b/xos/tosca/resources/networkparametertype.py
new file mode 100644
index 0000000..e0cc93e
--- /dev/null
+++ b/xos/tosca/resources/networkparametertype.py
@@ -0,0 +1,38 @@
+import os
+import pdb
+import sys
+import tempfile
+sys.path.append("/opt/tosca")
+from translator.toscalib.tosca_template import ToscaTemplate
+
+from core.models import Slice,User,Network,NetworkParameterType
+
+from xosresource import XOSResource
+
+class XOSNetworkParameterType(XOSResource):
+    provides = "tosca.nodes.NetworkParameterType"
+    xos_model = NetworkParameterType
+    copyin_props = []
+
+    def get_xos_args(self):
+        args = super(XOSNetworkParameterType, self).get_xos_args()
+
+        return args
+
+    def create(self):
+        xos_args = self.get_xos_args()
+
+        networkParameterType = NetworkParameterType(**xos_args)
+        networkParameterType.caller = self.user
+        networkParameterType.save()
+
+        self.info("Created NetworkParameterType '%s' " % (str(networkParameterType), ))
+
+    def delete(self, obj):
+        if obj.networkparameters.exists():
+            return
+
+        super(XOSNetworkParameterType, self).delete(obj)
+
+
+
diff --git a/xos/tosca/resources/onosapp.py b/xos/tosca/resources/onosapp.py
index 111cf9a..648bb09 100644
--- a/xos/tosca/resources/onosapp.py
+++ b/xos/tosca/resources/onosapp.py
@@ -43,9 +43,9 @@
             if attrs:
                 attr = attrs[0]
                 if attr.value != value:
-                    self.info("updating attribute %s" % k)
-                    attrs.value = value
-                    attrs.save()
+                    self.info("updating attribute %s" % prop_name)
+                    attr.value = value
+                    attr.save()
             else:
                 self.info("adding attribute %s" % prop_name)
                 ta = TenantAttribute(tenant=obj, name=prop_name, value=value)
diff --git a/xos/tosca/resources/service.py b/xos/tosca/resources/service.py
index 884c6db..247be08 100644
--- a/xos/tosca/resources/service.py
+++ b/xos/tosca/resources/service.py
@@ -13,7 +13,7 @@
 class XOSService(XOSResource):
     provides = "tosca.nodes.Service"
     xos_model = Service
-    copyin_props = ["view_url", "icon_url", "kind", "enabled", "published", "public_key", "versionNumber"]
+    copyin_props = ["view_url", "icon_url", "kind", "enabled", "published", "public_key", "private_key_fn", "versionNumber"]
 
     def postprocess(self, obj):
         for provider_service_name in self.get_requirements("tosca.relationships.TenantOfService"):
diff --git a/xos/tosca/resources/slice.py b/xos/tosca/resources/slice.py
index 2c02365..e37bfc8 100644
--- a/xos/tosca/resources/slice.py
+++ b/xos/tosca/resources/slice.py
@@ -12,7 +12,7 @@
 class XOSSlice(XOSResource):
     provides = "tosca.nodes.Slice"
     xos_model = Slice
-    copyin_props = ["enabled", "description", "slice_url", "max_instances"]
+    copyin_props = ["enabled", "description", "slice_url", "max_instances", "default_isolation"]
 
     def get_xos_args(self):
         args = super(XOSSlice, self).get_xos_args()
diff --git a/xos/tosca/resources/vcpeservice.py b/xos/tosca/resources/vcpeservice.py
index 6cc7390..8df7231 100644
--- a/xos/tosca/resources/vcpeservice.py
+++ b/xos/tosca/resources/vcpeservice.py
@@ -12,5 +12,5 @@
 class XOSVcpeService(XOSService):
     provides = "tosca.nodes.VCPEService"
     xos_model = VCPEService
-    copyin_props = ["view_url", "icon_url", "enabled", "published", "public_key", "versionNumber", "backend_network_label"]
+    copyin_props = ["view_url", "icon_url", "enabled", "published", "public_key", "private_key_fn", "versionNumber", "backend_network_label"]
 
diff --git a/xos/tosca/resources/xosresource.py b/xos/tosca/resources/xosresource.py
index 3553ab1..9c4f479 100644
--- a/xos/tosca/resources/xosresource.py
+++ b/xos/tosca/resources/xosresource.py
@@ -77,9 +77,6 @@
     def get_existing_objs(self):
         return self.xos_model.objects.filter(**{self.name_field: self.nodetemplate.name})
 
-    def get_xos_args(self):
-        return {}
-
     def get_model_class_name(self):
         return self.xos_model.__name__
 
diff --git a/xos/tosca/samples/container.yaml b/xos/tosca/samples/container.yaml
new file mode 100644
index 0000000..bd69fbe
--- /dev/null
+++ b/xos/tosca/samples/container.yaml
@@ -0,0 +1,42 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Template for deploying a single server with predefined properties.
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    mysite:
+      type: tosca.nodes.Site
+
+    mysite_contest:
+      type: tosca.nodes.Slice
+      requirements:
+          - slice:
+                node: mysite
+                relationship: tosca.relationships.MemberOfSite
+
+    andybavier/docker-vcpe:
+      type: tosca.nodes.Image
+      properties:
+        kind: container
+        container_format: na
+        disk_format: na
+
+    my_container:
+      type: tosca.nodes.Compute.Container
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+      requirements:
+          - slice:
+                node: mysite_contest
+                relationship: tosca.relationships.MemberOfSlice
+          - image:
+                node: andybavier/docker-vcpe
+                relationship: tosca.relationships.UsesImage
diff --git a/xos/tosca/samples/container_slice.yaml b/xos/tosca/samples/container_slice.yaml
new file mode 100644
index 0000000..520bec0
--- /dev/null
+++ b/xos/tosca/samples/container_slice.yaml
@@ -0,0 +1,24 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >
+    * Create a new deployment, controller, and site.
+    * Add a SiteDeployment from the site to the deployment using the controller.
+    * Create a Slice in the Site, with one Instance
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    mysite:
+      type: tosca.nodes.Site
+
+    mysite_containers:
+      type: tosca.nodes.Slice
+      requirements:
+          - slice:
+                node: mysite
+                relationship: tosca.relationships.MemberOfSite
+      properties:
+          default_isolation: container
+
diff --git a/xos/tosca/samples/cord.yaml b/xos/tosca/samples/cord.yaml
index 477be2f..567ced0 100644
--- a/xos/tosca/samples/cord.yaml
+++ b/xos/tosca/samples/cord.yaml
@@ -125,7 +125,8 @@
         type: tosca.nodes.VOLTTenant
         properties:
             service_specific_id: 1234
-            vlan_id: 4321
+            s_tag: 222
+            c_tag: 432
         requirements:
             - provider_service:
                 node: service_volt
diff --git a/xos/tosca/tests/allObserverTests.py b/xos/tosca/tests/allObserverTests.py
index d06daba..6a566a9 100644
--- a/xos/tosca/tests/allObserverTests.py
+++ b/xos/tosca/tests/allObserverTests.py
@@ -1,11 +1,13 @@
-from observerComputeTest import ObserverComputeTest
+from observerVMTest import ObserverVMTest
+from observerContainerTest import ObserverContainerTest
 from observerImageTest import ObserverImageTest
 from observerUserTest import ObserverUserTest
 from observerSiteTest import ObserverSiteTest
 from observerSliceTest import ObserverSliceTest
 
 if __name__ == "__main__":
-    ObserverComputeTest()
+    ObserverVMTest()
+    ObserverContainerTest()
     ObserverImageTest()
     ObserverSiteTest()
     ObserverUserTest()
diff --git a/xos/tosca/tests/basetest.py b/xos/tosca/tests/basetest.py
index f7f04eb..d9701d7 100644
--- a/xos/tosca/tests/basetest.py
+++ b/xos/tosca/tests/basetest.py
@@ -67,15 +67,23 @@
 
         return yml
 
-    def make_compute(self, slice, name, caps={}, props={}, reqs=[], num_cpus="1", disk_size="10 GB", mem_size="4 MB"):
+    def make_compute(self, slice, name, caps={}, props={}, reqs=[], num_cpus="1", disk_size="10 GB", mem_size="4 MB", isolation="vm"):
         reqs = reqs[:]
+        props = props.copy()
         caps = caps.copy()
 
+        if isolation=="container":
+            type = "tosca.nodes.Compute.Container"
+        elif isolation=="container_vm":
+            type = "tosca.nodes.Compute.ContainerVM"
+        else:
+            type = "tosca.nodes.Compute"
+
         caps.update( {"host": {"num_cpus": num_cpus, "disk_size": disk_size, "mem_size": mem_size},
                       "os": {"architecture": "x86_64", "type": "linux", "distribution": "rhel", "version": "6.5"}} )
         reqs.append( (slice, "tosca.relationships.MemberOfSlice") )
 
-        return self.make_nodetemplate(name, "tosca.nodes.Compute",
+        return self.make_nodetemplate(name, type,
                                       caps= caps,
                                       props = props,
                                       reqs= reqs)
diff --git a/xos/tosca/tests/observerContainerTest.py b/xos/tosca/tests/observerContainerTest.py
new file mode 100644
index 0000000..a31b866
--- /dev/null
+++ b/xos/tosca/tests/observerContainerTest.py
@@ -0,0 +1,95 @@
+from observertest import BaseObserverToscaTest
+
+from core.models import Instance, Site
+
+# Note that as a side effect, these tests will also create a Site
+
+class ObserverContainerTest(BaseObserverToscaTest):
+    tests = ["create_container"]
+    # hide_observer_output = False # uncomment to display lots of stuff to screen
+
+    def cleanup(self):
+        # We don't want to leak resources, so we make sure to let the observer
+        # attempt to delete these objects.
+        self.try_to_delete(Instance, purge=False, name="test_compute1")
+        self.try_to_delete(Site, purge=False, name="testsite")
+        self.run_observer()
+        # The site objects don't seem to go away nicely, they linger about and
+        # cause an IntegrityError due to a duplicate login_base
+        self.try_to_delete(Site, purge=True, name="testsite")
+
+    def get_base_templates(self):
+        return self.make_nodetemplate("testsite", "tosca.nodes.Site") + \
+               self.make_nodetemplate("testsite_slice1", "tosca.nodes.Slice", reqs=[("testsite", "tosca.relationships.MemberOfSite")]) + \
+               self.make_nodetemplate("andybavier/docker-vcpe", "tosca.nodes.Image", props={"kind": "container", "container_format": "na", "disk_format": "na"})
+
+    def create_container(self):
+        self.assert_noobj(Instance, "test_compute1")
+        self.execute(self.get_base_templates() +
+                     self.make_compute("testsite_slice1", "test_compute1", disk_size="1 GB", mem_size="513 MB", isolation="container",
+                                       reqs=[("andybavier/docker-vcpe", "tosca.relationships.UsesImage")],
+                                       ))
+        instance = self.assert_obj(Instance, "test_compute1")
+        assert(instance.flavor.name == "m1.small")
+
+        # first pass makes the Networks
+        self.run_model_policy(save_output="/tmp/instancetest:create_container:model_policy_first")
+
+        # XXX deal with bug where
+        instance = self.assert_obj(Instance, "test_compute1")
+        instance.save()
+
+        # second pass makes the NetworkControllers
+        self.run_model_policy(save_output="/tmp/instancetest:create_container:model_policy_second")
+
+        # first observer pass should make any necessary networks or ports
+        self.run_observer(save_output="/tmp/instancetest:create_container:observer_first")
+
+        # reset the exponential backoff
+        instance = self.assert_obj(Instance, "test_compute1")
+        instance.backend_register="{}"
+        instance.save()
+
+        # we need to reset the companion instance's exponential backoff too
+        companion_instance = Instance.objects.filter(slice=instance.slice, isolation="vm")
+        assert(companion_instance)
+        companion_instance = companion_instance[0]
+        companion_instance.backend_register="{}"
+        companion_instance.save()
+
+        # third pass reset lazy_blocked
+        self.run_model_policy(save_output="/tmp/instancetest:create_container:model_policy_third")
+
+        # second observer pass should instantiate the controller networks
+        #    (might instantiate the instance, too)
+        self.run_observer(save_output="/tmp/instancetest:create_container:observer_second")
+
+        # reset the exponential backoff
+        instance = self.assert_obj(Instance, "test_compute1")
+        instance.backend_register="{}"
+        instance.save()
+
+        # we need to reset the companion instance's exponential backoff too
+        companion_instance = Instance.objects.filter(slice=instance.slice, isolation="vm")
+        assert(companion_instance)
+        companion_instance = companion_instance[0]
+        companion_instance.backend_register="{}"
+        companion_instance.save()
+
+        # third observer pass should instantiate the companion instance
+        self.run_observer(save_output="/tmp/instancetest:create_container:observer_third")
+
+        # third observer pass should instantiate the instance
+        self.run_observer(save_output="/tmp/instancetest:create_container:observer_fourth")
+
+        instance = self.assert_obj(Instance, "test_compute1")
+
+        assert(instance.instance_id is not None)
+        assert(instance.instance_name is not None)
+
+        # there should be one port on the private network
+        assert(instance.ports.count() == 1)
+
+if __name__ == "__main__":
+    ObserverContainerTest()
+
diff --git a/xos/tosca/tests/observerComputeTest.py b/xos/tosca/tests/observerVMTest.py
similarity index 86%
rename from xos/tosca/tests/observerComputeTest.py
rename to xos/tosca/tests/observerVMTest.py
index 972b62c..65cbde5 100644
--- a/xos/tosca/tests/observerComputeTest.py
+++ b/xos/tosca/tests/observerVMTest.py
@@ -4,8 +4,8 @@
 
 # Note that as a side effect, these tests will also create a Site
 
-class ObserverComputeTest(BaseObserverToscaTest):
-    tests = ["create_instance"]
+class ObserverVMTest(BaseObserverToscaTest):
+    tests = ["create_vm"]
     # hide_observer_output = False # uncomment to display lots of stuff to screen
 
     def cleanup(self):
@@ -22,7 +22,7 @@
         return self.make_nodetemplate("testsite", "tosca.nodes.Site") + \
                self.make_nodetemplate("testsite_slice1", "tosca.nodes.Slice", reqs=[("testsite", "tosca.relationships.MemberOfSite")])
 
-    def create_instance(self):
+    def create_vm(self):
         self.assert_noobj(Instance, "test_compute1")
         self.execute(self.get_base_templates() +
                      self.make_compute("testsite_slice1", "test_compute1", disk_size="1 GB", mem_size="513 MB"))
@@ -30,13 +30,13 @@
         assert(instance.flavor.name == "m1.small")
 
         # first pass makes the Networks
-        self.run_model_policy(save_output="/tmp/instancetest:create_instance:model_policy_first")
+        self.run_model_policy(save_output="/tmp/instancetest:create_vm:model_policy_first")
 
         # second pass makes the NetworkControllers
-        self.run_model_policy(save_output="/tmp/instancetest:create_instance:model_policy_second")
+        self.run_model_policy(save_output="/tmp/instancetest:create_vm:model_policy_second")
 
         # first observer pass should make any necessary networks or ports
-        self.run_observer(save_output="/tmp/instancetest:create_instance:observer_first")
+        self.run_observer(save_output="/tmp/instancetest:create_vm:observer_first")
 
         # reset the exponential backoff
         instance = self.assert_obj(Instance, "test_compute1")
@@ -44,11 +44,11 @@
         instance.save()
 
         # third pass reset lazy_blocked
-        self.run_model_policy(save_output="/tmp/instancetest:create_instance:model_policy_third")
+        self.run_model_policy(save_output="/tmp/instancetest:create_vm:model_policy_third")
 
         # second observer pass should instantiate the controller networks
         #    (might instantiate the instance, too)
-        self.run_observer(save_output="/tmp/instancetest:create_instance:observer_second")
+        self.run_observer(save_output="/tmp/instancetest:create_vm:observer_second")
 
         # reset the exponential backoff
         instance = self.assert_obj(Instance, "test_compute1")
@@ -56,13 +56,16 @@
         instance.save()
 
         # third observer pass should instantiate the instance
-        self.run_observer(save_output="/tmp/instancetest:create_instance:observer_third")
+        self.run_observer(save_output="/tmp/instancetest:create_vm:observer_third")
 
         instance = self.assert_obj(Instance, "test_compute1")
 
         assert(instance.instance_id is not None)
         assert(instance.instance_name is not None)
 
+        # there should be a port on the private network and a port on nat-net
+        assert(instance.ports.count() == 2)
+
 if __name__ == "__main__":
-    ObserverComputeTest()
+    ObserverVMTest()
 
diff --git a/xos/xos/settings.py b/xos/xos/settings.py
index 834b3a1..91f631f 100644
--- a/xos/xos/settings.py
+++ b/xos/xos/settings.py
@@ -151,7 +151,6 @@
     'django.contrib.admindocs',
     'rest_framework',
     'django_extensions',
-    'django_evolution',
     'core',
     'hpc',
     'cord',
@@ -169,8 +168,6 @@
     # if django >= 1.7, then remove evolution and change the admin module
     INSTALLED_APPS = list(INSTALLED_APPS)
     INSTALLED_APPS[INSTALLED_APPS.index('django.contrib.admin')] = 'django.contrib.admin.apps.SimpleAdminConfig'
-    INSTALLED_APPS.remove('django_evolution')
-    INSTALLED_APPS = tuple(INSTALLED_APPS)
 
 # Added for django-suit form 
 TEMPLATE_CONTEXT_PROCESSORS = TCP + (