Merge branch 'master' of https://github.com/open-cloud/xos into AddVPNService
diff --git a/containers/README.md b/containers/README.md
new file mode 100644
index 0000000..0bdc74b
--- /dev/null
+++ b/containers/README.md
@@ -0,0 +1,77 @@
+# XOS Docker Images
+
+## Introduction
+
+ XOS is comprised of 3 core services:
+
+  * A database backend (postgres)
+  * A webserver front end (django)
+  * A synchronizer daemon that interacts with the openstack backend.
+
+We have created separate dockerfiles for each of these services, making it
+easier to build the services independently and also deploy and run them in
+isolated environments.
+
+#### Database Container
+
+To build and run the database container:
+
+```
+$ cd postgres; make build && make run
+```
+
+#### XOS Container
+
+To build and run the xos webserver container:
+
+```
+$ cd xos; make build && make run
+```
+
+You should now be able to access the login page by visiting
+`http://localhost:80` and log in using the default `paadmin@vicci.org` account
+with password `letmein`. It may be helpful to bootstrap xos with some sample
+data; deployment, controllers, sites, slices, etc. You can get started by
+loading tosca configuration for the opencloud demo dataset:
+
+```
+$ cd xos; make runtosca
+```
+
+Or you can create you own tosca configuraton file and customize the dataset
+however you want. You can all load your own tosca configuration by setting the
+`TOSCA_CONFIG_PATH` environment variable before executing the make command:
+
+```
+$ cd xos; TOSCA_CONFIG_PATH=/path/to/tosca/config.yaml make runtosca
+```
+
+#### Synchronizer Container
+
+The Synchronizer shares many of the same dependencies as the xos container. The
+synchronizer container takes advantage of this by building itself on top of the
+xos image. This means you must build the xos image before building the
+synchronizer image. The XOS and synchronizer containers can run on separate
+hosts, but you must build the xos image on the host that you plan to run the
+synchronizer container. Assuming you have already built the xos container,
+executing the following will build and run the synchronizer container:
+
+```
+$ cd synchronizer; make build && make run
+```
+
+#### Solution Compose File ![](https://img.shields.io/badge/compose-beta-red.svg)
+
+[Docker Compose](https://docs.docker.com/compose/) is a tool for defining and
+running multi-container Docker applications. With Compose, you use a Compose
+file to configure your application’s services. Then, using a single command, you
+create, start, scale, and manage all the services from your configuration.
+
+Included is a compose file in *YAML* format with content defined by the [Docker
+Compose Format](https://docs.docker.com/compose/compose-file/). With the compose
+file a complete XOS solution based on docker containers can be instantiated
+using a single command. To start the instance you can use the command:
+
+```
+$ docker-compose -f xos-compose.yml up -d
+```
diff --git a/containers/postgresql/Makefile b/containers/postgresql/Makefile
index 327f661..38f159c 100644
--- a/containers/postgresql/Makefile
+++ b/containers/postgresql/Makefile
@@ -1,21 +1,25 @@
+IMAGE_NAME:=xosproject/xos-postgress
+CONTAINER_NAME:=xos-db-postgress
+NO_DOCKER_CACHE?=false
+
 .PHONY: build
-build: ; docker build --rm -t postgres .
+build: ; docker build --no-cache=${NO_DOCKER_CACHE} --rm -t ${IMAGE_NAME} .
 
 .PHONY: run
-run: ; docker run -d -p 5432:5432 --name postgres-server postgres
+run: ; docker run -d -p 5432:5432 --name ${CONTAINER_NAME} ${IMAGE_NAME}
 
 .PHONY: stop
-stop: ; docker stop postgres-server
+stop: ; docker stop ${CONTAINER_NAME}
 
 .PHONY: rm
-rm: ; docker rm postgres-server
+rm: ; docker rm ${CONTAINER_NAME}
 
 .PHONE: rmi
-rmi: ; docker rmi postgres
+rmi: ; docker rmi ${IMAGE_NAME}
 
 .PHONY: backup
-backupvol: ; docker run --volumes-from postgres-server -v /backup:/backup postgres tar cvf /backup/backup-postgres.tar /var/lib/postgresql
+backupvol: ; docker run --volumes-from ${CONTAINER_NAME} -v /backup:/backup postgres tar cvf /backup/backup-postgres.tar /var/lib/postgresql
 
 .PHONY: restore
-restorevol: ; docker run --volumes-from postgres-server -v /backup:/backup postgres cd /var/lib/postgresql && tar xvf /backup/backup-postgres.tar
+restorevol: ; docker run --volumes-from ${CONTAINER_NAME} -v /backup:/backup postgres cd /var/lib/postgresql && tar xvf /backup/backup-postgres.tar
 
diff --git a/containers/synchronizer/Dockerfile b/containers/synchronizer/Dockerfile
index 44b058e..f9d79ae 100644
--- a/containers/synchronizer/Dockerfile
+++ b/containers/synchronizer/Dockerfile
@@ -1,4 +1,4 @@
-FROM       xos
+FROM       xosproject/xos
 
 # Install custom Ansible
 RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y \
diff --git a/containers/synchronizer/Makefile b/containers/synchronizer/Makefile
index 14520d9..8620438 100644
--- a/containers/synchronizer/Makefile
+++ b/containers/synchronizer/Makefile
@@ -1,10 +1,12 @@
-CONTAINER_NAME:=synchronizer-server
+IMAGE_NAME:=xosproject/xos-synchronizer-openstack
+CONTAINER_NAME:=xos-synchronizer
+NO_DOCKER_CACHE?=false
 
 .PHONY: build
-build: ; docker build --rm -t synchronizer .
+build: ; docker build --no-cache=${NO_DOCKER_CACHE} --rm -t ${IMAGE_NAME} .
 
 .PHONY: run
-run: ; docker run -d --name ${CONTAINER_NAME} synchronizer
+run: ; docker run -d --name ${CONTAINER_NAME} ${IMAGE_NAME}
 
 .PHONY: stop
 stop: ; docker stop ${CONTAINER_NAME}
diff --git a/containers/xos-compose.yml b/containers/xos-compose.yml
new file mode 100644
index 0000000..27a3b5a
--- /dev/null
+++ b/containers/xos-compose.yml
@@ -0,0 +1,25 @@
+xos_db:
+    image: xosproject/xos-postgress
+    expose:
+        - "5432"
+
+xos_synchronizer_openstack:
+    image: xosproject/xos-synchronizer-openstack
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: openstack
+
+# FUTURE
+#xos_swarm_synchronizer:
+#    image: xosproject/xos-swarm-synchronizer
+#    labels:
+#        org.xosproject.kind: synchronizer
+#        org.xosproject.target: swarm
+
+xos:
+    image: xosproject/xos
+    command: python /opt/xos/manage.py runserver 0.0.0.0:8000 --insecure --makemigrations
+    ports:
+        - "8000:8000"
+    links:
+        - xos_db
diff --git a/containers/xos/Dockerfile.templ b/containers/xos/Dockerfile.templ
new file mode 100644
index 0000000..e669692
--- /dev/null
+++ b/containers/xos/Dockerfile.templ
@@ -0,0 +1,88 @@
+FROM       ubuntu:14.04.3
+
+# XXX Workaround for docker bug:
+# https://github.com/docker/docker/issues/6345
+# Kernel 3.15 breaks docker, uss the line below as a workaround
+# until there is a fix
+RUN ln -s -f /bin/true /usr/bin/chfn
+# XXX End workaround
+
+# Install.
+RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y \
+    curl \
+    gcc \
+    geoip-database \
+    git \
+    graphviz \
+    graphviz-dev \
+    libgeoip1 \
+    libxslt1.1 \
+    libxslt1-dev \
+    libyaml-dev \
+    m4 \
+    pkg-config \
+    python-dev \
+    python-httplib2 \
+    python-pip \
+    python-psycopg2 \
+    python-pycurl \
+    python-setuptools \
+    tar \
+    wget \
+##### observer dependencies  
+    python-keystoneclient \
+    python-novaclient \
+    python-neutronclient \
+    python-glanceclient \
+    python-ceilometerclient
+
+RUN pip install -U \
+    django==1.7 \
+    django-bitfield \
+    django-crispy-forms \
+    django-encrypted-fields \
+    django_evolution \
+    django-extensions \
+    django-filter \
+    django-geoposition \
+    django-ipware \
+    django_rest_swagger \
+    django-suit \
+    django-timezones \
+    djangorestframework==2.4.4 \
+    dnslib \
+    google_api_python_client \
+    httplib2 \
+    httplib2.ca_certs_locater \
+    lxml \  
+    markdown \
+    netaddr \
+    python-dateutil \
+    python_gflags \
+    python-keyczar \
+    pygraphviz \
+    pytz \
+    pyyaml \
+    requests
+
+ADD http://code.jquery.com/jquery-1.9.1.min.js /usr/local/lib/python2.7/dist-packages/suit/static/suit/js/
+
+# Install XOS
+RUN git clone XOS_GIT_REPO -b XOS_GIT_BRANCH /tmp/xos && \
+    mv /tmp/xos/xos /opt/ && \
+    chmod +x /opt/xos/scripts/opencloud && \
+    /opt/xos/scripts/opencloud genkeys
+
+# install Tosca engine
+RUN bash /opt/xos/tosca/install_tosca.sh
+
+EXPOSE 8000
+
+# Set environment variables.
+ENV HOME /root
+
+# Define working directory.
+WORKDIR /root
+
+# Define default command.
+CMD python /opt/xos/manage.py runserver 0.0.0.0:8000 --insecure
diff --git a/containers/xos/Makefile b/containers/xos/Makefile
index c71873e..367f0ec 100644
--- a/containers/xos/Makefile
+++ b/containers/xos/Makefile
@@ -1,11 +1,18 @@
 CONTAINER_NAME:=xos-server
+IMAGE_NAME:=xosproject/xos
 TOSCA_CONFIG_PATH:=/opt/xos/configurations/opencloud/opencloud.yaml
+XOS_GIT_REPO?=git://github.com/open-cloud/xos.git
+XOS_GIT_BRANCH?=master
+NO_DOCKER_CACHE?=false
 
 .PHONY: build
-build: ; docker build --rm -t xos . && ./initdb
+build: ; docker build --no-cache=${NO_DOCKER_CACHE} --rm -t ${IMAGE_NAME} . 
+
+.PHONY: custom
+custom: ; cat Dockerfile.templ | sed -e "s|XOS_GIT_REPO|${XOS_GIT_REPO}|g" -e "s|XOS_GIT_BRANCH|${XOS_GIT_BRANCH}|g" | docker build --no-cache=${NO_DOCKER_CACHE} -
 
 .PHONY: run
-run: ; docker run -d --name ${CONTAINER_NAME} -p 80:8000 xos
+run: ; docker run -d --name ${CONTAINER_NAME} -p 80:8000 ${IMAGE_NAME}
 
 .PHONY: runtosca
 runtosca: ; docker exec -it ${CONTAINER_NAME} /usr/bin/python /opt/xos/tosca/run.py padmin@vicci.org ${TOSCA_CONFIG_PATH}
diff --git a/containers/xos/initdb b/containers/xos/initdb
index bd020c5..41e0a9a 100755
--- a/containers/xos/initdb
+++ b/containers/xos/initdb
@@ -1,16 +1,17 @@
 #!/bin/bash
 
-IMAGE_NAME=xos
+IMAGE_NAME=xosproject/xos
+CONTAINER_NAME=xos_build_helper_$$
 DB_HOST=$(wget http://ipinfo.io/ip -qO -)
 
 # configure db host
-docker run -it $IMAGE_NAME sed -i '0,/host/{s/host=localhost/host='$DB_HOST'/}' /opt/xos/xos_config
-CONTAINER_ID=$(docker ps -a | grep $IMAGE_NAME | head -1 |  awk '{print $1}')
-echo $CONTAINER_ID $IMAGE_NAME
-docker commit $CONTAINER_ID $IMAGE_NAME
+docker run -it --name=$CONTAINER_NAME $IMAGE_NAME sed -i '0,/host/{s/host=localhost/host='$DB_HOST'/}' /opt/xos/xos_config
+docker commit $CONTAINER_NAME $IMAGE_NAME
+docker rm $CONTAINER_NAME
 
 # init db schema
-docker run -it $IMAGE_NAME /opt/xos/scripts/opencloud makemigrations
-CONTAINER_ID=$(docker ps -a | grep $IMAGE_NAME | head -1 | awk '{print $1}')
+docker run -it --name=$CONTAINER_NAME $IMAGE_NAME /opt/xos/scripts/opencloud makemigrations
 # run overrides the CMD specifed in the Dockerfile, so we re-set the CMD in the final commit"
-docker commit --change "CMD python /opt/xos/manage.py runserver 0.0.0.0:8000 --insecure" $CONTAINER_ID $IMAGE_NAME
+echo docker commit --change="CMD python /opt/xos/manage.py runserver 0.0.0.0:8000 --insecure" $CONTAINER_NAME $IMAGE_NAME
+docker commit --change="CMD python /opt/xos/manage.py runserver 0.0.0.0:8000 --insecure" $CONTAINER_NAME $IMAGE_NAME
+docker rm $CONTAINER_NAME
diff --git a/xos/ceilometer/models.py b/xos/ceilometer/models.py
index ae73b3e..e149eb5 100644
--- a/xos/ceilometer/models.py
+++ b/xos/ceilometer/models.py
@@ -126,6 +126,12 @@
             for cs in slice.controllerslices.all():
                 if cs.tenant_id:
                     tenant_ids.add(cs.tenant_id)
+        if self.creator.is_admin:
+            #TODO: Ceilometer publishes the SDN meters without associating to any tenant IDs.
+            #For now, ceilometer code is changed to pusblish all such meters with tenant
+            #id as "default_admin_tenant". Here add that default tenant as authroized tenant_id
+            #for all admin users. 
+            tenant_ids.add("default_admin_tenant")
         return tenant_ids
 
     @property
diff --git a/xos/configurations/common/Dockerfile.common b/xos/configurations/common/Dockerfile.common
index 3f6a721..8db457c 100644
--- a/xos/configurations/common/Dockerfile.common
+++ b/xos/configurations/common/Dockerfile.common
@@ -61,7 +61,6 @@
 RUN pip install django_rest_swagger
 
 RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-setuptools
-RUN easy_install django_evolution
 RUN easy_install python_gflags
 RUN easy_install --upgrade httplib2
 RUN easy_install google_api_python_client
diff --git a/xos/configurations/common/Makefile.cloudlab b/xos/configurations/common/Makefile.cloudlab
index 43e7497..6e609d7 100644
--- a/xos/configurations/common/Makefile.cloudlab
+++ b/xos/configurations/common/Makefile.cloudlab
@@ -13,6 +13,7 @@
 
 flat_name:
 	sudo bash -c "source /root/setup/admin-openrc.sh ; neutron net-list" |grep flat|awk '{printf "%s",$$4}' > flat_net_name
+	[ -s flat_net_name ] # throw error if flat_net_name is empty
 
 nodes_yaml:
 	bash ./make-cloudlab-nodes-yaml.sh
diff --git a/xos/configurations/cord/Dockerfile.cord b/xos/configurations/cord/Dockerfile.cord
index f7fdb65..8734eef 100644
--- a/xos/configurations/cord/Dockerfile.cord
+++ b/xos/configurations/cord/Dockerfile.cord
@@ -19,6 +19,7 @@
 ADD xos/observers/monitoring_channel/supervisor/monitoring_channel_observer.conf /etc/supervisor/conf.d/
 RUN sed -i 's/proxy_ssh=True/proxy_ssh=False/' /opt/xos/observers/vcpe/vcpe_observer_config
 RUN sed -i 's/proxy_ssh=True/proxy_ssh=False/' /opt/xos/observers/monitoring_channel/monitoring_channel_observer_config
+ADD xos/configurations/cord/virtualbng.json /root/setup/
 
 CMD /usr/bin/make -C /opt/xos/configurations/cord -f Makefile.inside; /bin/bash
 
diff --git a/xos/configurations/cord/Makefile b/xos/configurations/cord/Makefile
index 86b4ca1..701e92f 100644
--- a/xos/configurations/cord/Makefile
+++ b/xos/configurations/cord/Makefile
@@ -2,7 +2,7 @@
 RUNNING_CONTAINER:=$(shell sudo docker ps|grep "xos"|awk '{print $$NF}')
 LAST_CONTAINER=$(shell sudo docker ps -l -q)
 
-cord: common_cloudlab ceilometer_dashboard
+cord: common_cloudlab ceilometer_dashboard virtualbng_json
 	echo "# Autogenerated -- do not edit" > Dockerfile
 	cat ../common/Dockerfile.common Dockerfile.cord >> Dockerfile
 	cd ../../..; sudo docker build -t xos -f xos/configurations/cord/Dockerfile .
@@ -16,7 +16,10 @@
 	#NOTE: The below dashboard install scripts assume 
 	#clouldlab openstack environment created using "OpenStack" profile
 	bash install_xos_ceilometer_dashboard.sh
-	bash install_ceilometer_vcpe_notification_listener.sh
+	bash install_ceilometer_patch.sh
+
+virtualbng_json:
+	bash make-virtualbng-json.sh
 
 stop:
 	sudo docker stop $(RUNNING_CONTAINER)
diff --git a/xos/configurations/cord/README.md b/xos/configurations/cord/README.md
index f0a0a9b..606f12a 100644
--- a/xos/configurations/cord/README.md
+++ b/xos/configurations/cord/README.md
@@ -12,7 +12,7 @@
 
 ## End-to-end dataplane
 
-The configuration uses XOS to set up an end-to-end dataplane for development of the XOS services and ONOS apps 
+The configuration uses XOS to set up an end-to-end dataplane for development of the XOS services and ONOS apps
 used in CORD.  It abstracts away most of the complexity of the CORD hardware using virtual networks
 and Open vSwitch (OvS) switches.  At a high level the dataplane looks like this:
 
@@ -43,7 +43,7 @@
 
 To get started on CloudLab:
 * Create an experiment using the *OpenStack-CORD* profile.  (You can also use the *OpenStack* profile, but choose *Kilo*
-and disable security groups.)
+with two compute nodes and disable security groups.)
 * Wait until you get an email from CloudLab with title "OpenStack Instance Finished Setting Up".
 * Login to the *ctl* node of your experiment and run:
 ```
@@ -58,12 +58,12 @@
 1. *Slice mysite_onos*: runs the ONOS Docker container with `virtualbng` app loaded
 1. *Slice mysite_onos*: runs the ONOS Docker container with `olt` app loaded
 1. *Slice mysite_vbng*: for running OvS with the `virtualbng` app as controller
-1. *Slice mysite_volt*: for running OvS with the `olt` app as controller
+1. *Slice mysite_volt*: for running the CPqD switch with the `olt` app as controller
 1. *Slice mysite_clients*: a subscriber client for end-to-end testing
-1. *Slice mysite_vcpe*: runs the vCPE Docker container
+1. *Slice mysite_vcpe*: runs the vCPE Docker container (if not using containers on bare metal)
 
 Once all the VMs are up and the ONOS apps are configured, XOS should be able to get an address mapping from the `virtualbng`
-ONOS app for the vCPE. To verify that it has received an IP address mapping, look at the **Routeable subnet:** field in 
+ONOS app for the vCPE. To verify that it has received an IP address mapping, look at the **Routeable subnet:** field in
 the appropriate *Vbng tenant* object in XOS.  It should contain an IP address in the 10.254.0.0/24 subnet.
 
 After launching the ONOS apps, it is necessary to configure software switches along the dataplane so that ONOS can control
@@ -74,35 +74,50 @@
 ctl:~/xos/xos/configurations/cord/dataplane$ ansible-playbook -i hosts dataplane.yaml
 ```
 
-Currently the vOLT switch is not forwarding ARP and so it is necessary to set up ARP mappings between the client
-and vCPE.  Log into the client and add an ARP entry for the vCPE: 
+To setup the dataplane for containers on bare metal, perform these steps in addition to the above (note: make sure to sudo when running the playbook):
 ```
-client:$ sudo arp -s 192.168.0.1 <mac-of-eth1-in-vCPE-container>
+ctl:~/xos/xos/configurations/cord/dataplane$ ./generate-bm.sh > hosts-bm   
+ctl:~/xos/xos/configurations/cord/dataplane$ sudo ansible-playbook -i hosts-bm dataplane-bm.yaml
 ```
-Inside the vCPE container add a similar entry for the client:
-```
-vcpe:$ arp -s 192.168.0.2 <mac-of-br-sub-on-client>
-```
+
+Check that the vCPE container has started, by going into the XOS UI, selecting 'Services', 'service_vcpe', 'Administration', 'Vcpe Tenants', and make sure there's a green icon next to the vCPE.
+
+If the vCPE Tenant is still red, then the Instance could be exponentially backed-off due to errors while trying to sync before dataplane.yaml was run. You can reset the exponential backoff by tracking down the vCPE Instance (Slices->mysite_vcpe->Instances, and find the Instance associated with the vCPE Tenant) and hitting the save button.
 
 Now SSH into ONOS running the OLT app (see below) and activate the subscriber:
 ```
 onos> add-subscriber-access of:0000000000000001 1 432
 ```
 
-At this point you should be able to ping 192.168.0.1 from the client.  The final step is to set the 
-vCPE as the gateway on the client:
+At this point the client should be able to get an IP address from the vCPE via
+DHCP.  To set up the IP address and default route on the client:
 ```
 client:$ sudo route del default gw 10.11.10.5
-client:$ sudo route add default gw 192.168.0.1
+client:$ sudo dhclient br-sub
 ```
-The client should now be able to surf the Internet through the dataplane.
+Once `dhclient` returns, the client should now be able to surf the Internet
+through the dataplane.
+
+## Setting up /etc/hosts
+
+To make it easy to log into the various VMs that make up the dataplane, add entries for them into `/etc/hosts` on the
+*ctl* node.  As root, run:
+```
+ctl:~/xos/xos/configurations/cord/dataplane$ ./gen-etc-hosts.sh >> /etc/hosts
+```
+For example, to log into the client:
+```
+ctl:~$ ssh ubuntu@client
+```
 
 ## How to log into ONOS
 
-The ONOS Docker container runs in the VMs belonging to the *mysite_onos* slice.  All ports exposed by the ONOS container are forwarded to the outside, and can be accessed from the *ctl* node using the `flat-lan-1-net` address of the hosting VM.  For example, if the IP addresss of the VM is 10.11.10.30, then it is possible to SSH to ONOS as follows (password is *karaf*):
+ONOS apps are run inside Docker containers hosted in VMs.  All ports exposed by the ONOS container are forwarded to the
+outside, and can be accessed from the *ctl* node over the `flat-lan-1-net` network.  Assuming that `/etc/hosts`
+has been configured as described above, it is possible to SSH to the ONOS running the `virtualbng` app as follows (password is *karaf*):
 
 ```
-$ ssh -p 8101 karaf@10.11.10.30
+$ ssh -p 8101 karaf@onos_vbng
 Password authentication
 Password:
 Welcome to Open Network Operating System (ONOS)!
@@ -126,3 +141,9 @@
    Private IP - Public IP
    10.0.1.3 - 10.254.0.129
 ```
+
+## Troubleshooting
+
+#### Problem: No external connectivity from vCPE container
+1. Make sure the hosts listed in `virtualbng.json` are the actual compute nodes used in your experiment.
+2. Try rebooting the ONOS container running the `virtualbng` app: `$ ssh ubuntu@onos-vbng "sudo docker restart ONOS"`
diff --git a/xos/configurations/cord/ceilometer_vcpe_notification_agent.tar.gz b/xos/configurations/cord/ceilometer_vcpe_notification_agent.tar.gz
deleted file mode 100644
index dcc6765..0000000
--- a/xos/configurations/cord/ceilometer_vcpe_notification_agent.tar.gz
+++ /dev/null
Binary files differ
diff --git a/xos/configurations/cord/cord.yaml b/xos/configurations/cord/cord.yaml
index 46acde9..02137ec 100644
--- a/xos/configurations/cord/cord.yaml
+++ b/xos/configurations/cord/cord.yaml
@@ -14,6 +14,12 @@
           - vcpe_tenant:
               node: service_vcpe
               relationship: tosca.relationships.TenantOfService
+          - lan_network:
+              node: lan_network
+              relationship: tosca.relationships.UsesNetwork
+          - wan_network:
+              node: wan_network
+              relationship: tosca.relationships.UsesNetwork
       properties:
           view_url: /admin/cord/voltservice/$id$/
           kind: vOLT
@@ -91,20 +97,9 @@
                 }
               }
             }
-          config_virtualbng.json: >
-            {
-                "localPublicIpPrefixes" : [
-                    "10.254.0.128/25"
-                ],
-                "nextHopIpAddress" : "10.254.0.1",
-                "publicFacingMac" : "00:00:00:00:00:66",
-                "xosIpAddress" : "10.11.10.1",
-                "xosRestPort" : "9999",
-                "hosts" : {
-                    "cp-1.devel.xos-pg0.clemson.cloudlab.us" : "of:0000000000000001/1",
-                    "cp-2.devel.xos-pg0.clemson.cloudlab.us" : "of:0000000000000001/1"
-                }
-            }
+          config_virtualbng.json: { get_artifact: [ SELF, virtualbng_json, LOCAL_FILE] }
+      artifacts:
+          virtualbng_json: /root/setup/virtualbng.json
 
     service_ONOS_vOLT:
       type: tosca.nodes.ONOSService
@@ -127,7 +122,7 @@
               node: service_volt
               relationship: tosca.relationships.UsedByService
       properties:
-          dependencies: org.onosproject.openflow, org.onosproject.olt
+          dependencies: org.onosproject.openflow-base, org.onosproject.olt
           config_network-cfg.json: >
             {
               "devices" : {
@@ -241,6 +236,11 @@
           - site:
               node: mysite
               relationship: tosca.relationships.MemberOfSite
+          - vcpe_docker_image:
+              node: andybavier/docker-vcpe
+              relationship: tosca.relationships.UsesImage
+      properties:
+          default_isolation: container
 
     mysite_onos_vbng:
       description: ONOS Controller Slice for vBNG
diff --git a/xos/configurations/cord/dataplane/dataplane-bm.yaml b/xos/configurations/cord/dataplane/dataplane-bm.yaml
index 2fbbf6a..e1e78ee 100644
--- a/xos/configurations/cord/dataplane/dataplane-bm.yaml
+++ b/xos/configurations/cord/dataplane/dataplane-bm.yaml
@@ -5,11 +5,15 @@
   - name: Create tunnel port on br-lan
     openvswitch_port:
       bridge=br-lan
-      port={{ grename }}
+      port={{ item }}
       state=present
+    with_items: "grenames.split(' ')"
 
   - name: Set up GRE tunnel to vCPE
-    shell: ovs-vsctl set Interface {{ grename }} type=gre options:remote_ip={{ bm_addr }}
+    shell: ovs-vsctl set Interface {{ item.0 }} type=gre options:remote_ip={{ item.1 }}
+    with_together:
+      - "grenames.split(' ')"
+      - "bm_ips.split(' ')"
 
 - hosts: baremetal
 
diff --git a/xos/configurations/cord/dataplane/dataplane.yaml b/xos/configurations/cord/dataplane/dataplane.yaml
index f43e4d7..a0950be 100644
--- a/xos/configurations/cord/dataplane/dataplane.yaml
+++ b/xos/configurations/cord/dataplane/dataplane.yaml
@@ -158,6 +158,14 @@
     shell: ifconfig br-lan inet6 del {{ ipv6.stdout }}
     when: ipv6.stdout != ""
 
+  - name: Check if veth1 has an IPv6 address
+    shell: ip addr show veth1|grep inet6|awk '{print $2}'
+    register: ipv6
+
+  - name: Remove veth1 IPv6 address if present
+    shell: ifconfig veth1 inet6 del {{ ipv6.stdout }}
+    when: ipv6.stdout != ""
+
   - name: Run the datapath
     command: /usr/local/bin/ofdatapath -i veth1,br-lan punix:/tmp/s1 -d 000000000001 --no-slicing -D -P
       creates=/usr/local/var/run/ofdatapath.pid
@@ -204,7 +212,7 @@
   - name: Run some commands on br-sub
     shell: "{{ item }}"
     with_items:
-    - ifconfig br-sub 192.168.0.2 mtu 1400 up
+    - ifconfig br-sub 0.0.0.0 mtu 1400 up
     - ethtool -K br-sub tso off
     - ethtool -K br-sub tx off
 
diff --git a/xos/configurations/cord/dataplane/generate-bm.sh b/xos/configurations/cord/dataplane/generate-bm.sh
index 1a3ec78..25f6fa5 100755
--- a/xos/configurations/cord/dataplane/generate-bm.sh
+++ b/xos/configurations/cord/dataplane/generate-bm.sh
@@ -1,4 +1,4 @@
-source ~/admin-openrc.sh
+source ../../common/admin-openrc.sh
 
 get_ip () {
     LABEL=$1
@@ -6,23 +6,28 @@
     nova list --all-tenants|grep $LABEL|sed "s/^.*$NETWORK=//g"|sed 's/; .*$//g'|awk '{print $1}'
     }
 
+GRENAMES=()
+BM_IPS=()
+
 NODES=`sudo bash -c "source /root/setup/admin-openrc.sh ; nova hypervisor-list" |grep cloudlab|awk '{print $4}'`
 I=1
 for NODE in $NODES; do
-    IP=`getent hosts $NODE | awk '{ print $1 }'`
-    echo switch_volt$I    ansible_ssh_host=$( get_ip mysite_volt flat-lan-1-net) grename=gre-bm-$I bm_addr=$IP
-    echo bm$I           ansible_ssh_host=$IP grename=gre-bm-$I volt_addr=$( get_ip mysite_volt lan_network)  ansible_ssh_private_key_file=/root/.ssh/id_rsa
+    BM_SSH_IP=`getent hosts $NODE | awk '{ print $1 }'`
+    IFS=. read BM_NAME BM_REMAINDER <<< $NODE
+    BM_IP=`sudo grep -i $BM_NAME /root/setup/data-hosts.flat-lan-1 | awk '{print $1}'`
+
+    GRE_NAMES+=("gre-bm-$I")
+    BM_IPS+=("$BM_IP")
+
+    #echo switch_volt$I    ansible_ssh_host=$( get_ip mysite_volt flat-lan-1-net) grename=gre-bm-$I bm_addr=$BM_IP
+    echo bm$I           ansible_ssh_host=$BM_SSH_IP grename=gre-bm-$I volt_addr=$( get_ip mysite_volt flat-lan-1-net)  ansible_ssh_private_key_file=/root/.ssh/id_rsa
     I=$(( I+1 ))
 done
 
-# a kludge for now -- just rerun the onos_volt step for each baremetal machine
+GRE_NAMES=${GRE_NAMES[@]}
+BM_IPS=${BM_IPS[@]}
 
-echo "[switch_volt]"
-I=1
-for NODE in $NODES; do
-    echo switch_volt$I
-    I=$((I+1))
-done
+echo switch_volt ansible_ssh_host=$( get_ip mysite_volt flat-lan-1-net) grenames=\"$GRE_NAMES\" bm_ips=\"$BM_IPS\"
 
 echo "[baremetal]"
 I=1
diff --git a/xos/configurations/cord/install_ceilometer_patch.sh b/xos/configurations/cord/install_ceilometer_patch.sh
new file mode 100755
index 0000000..d6b4056
--- /dev/null
+++ b/xos/configurations/cord/install_ceilometer_patch.sh
@@ -0,0 +1,22 @@
+if [ -d /usr/lib/python2.7/dist-packages/ceilometer/network/ext_services ]; then
+    echo "Seems VCPE notification listeners are already enabled in ceilometer... so exiting gracefully..."
+    exit 0
+fi
+echo "Verifying if all the required files are present"
+if [ ! -f openstack_ceilometer_patch.tar.gz ];
+then
+    echo "File openstack_ceilometer_patch.tar.gz not found"
+    exit 1
+fi
+echo "Copying the ceilometer patch files to /usr/lib/python2.7/dist-packages/ceilometer"
+tar -xzf openstack_ceilometer_patch.tar.gz
+sudo mv ceilometer/network/ext_services /usr/lib/python2.7/dist-packages/ceilometer/network/
+sudo mv ceilometer/network/statistics/onos /usr/lib/python2.7/dist-packages/ceilometer/network/statistics/
+sudo mv /usr/lib/python2.7/dist-packages/ceilometer/network/statistics/__init__.py /usr/lib/python2.7/dist-packages/ceilometer/network/statistics/orig_init.orig_py
+sudo mv ceilometer/network/statistics/__init__.py /usr/lib/python2.7/dist-packages/ceilometer/network/statistics/
+sudo mv ceilometer-2015.1.1.egg-info/entry_points.txt /usr/lib/python2.7/dist-packages/ceilometer-*egg-info/
+sudo mv pipeline.yaml /etc/ceilometer/
+echo "Restarting ceilometer-agent-notification"
+sudo service ceilometer-agent-notification restart
+echo "Restarting ceilometer-agent-central"
+sudo service ceilometer-agent-central restart
diff --git a/xos/configurations/cord/install_ceilometer_vcpe_notification_listener.sh b/xos/configurations/cord/install_ceilometer_vcpe_notification_listener.sh
deleted file mode 100755
index 1992e80..0000000
--- a/xos/configurations/cord/install_ceilometer_vcpe_notification_listener.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-if [ -d /usr/lib/python2.7/dist-packages/ceilometer/network/ext_services ]; then
-    echo "Seems VCPE notification listeners are already enabled in ceilometer... so exiting gracefully..."
-    exit 0
-fi
-echo "Verifying if all the required files are present"
-if [ ! -f ceilometer_vcpe_notification_agent.tar.gz ];
-then
-    echo "File ceilometer_vcpe_notification_agent.tar.gz not found"
-    exit 1
-fi
-echo "Copying the ceilometer vcpe notification agent files /usr/lib/python2.7/dist-packages/ceilometer"
-tar -xzf ceilometer_vcpe_notification_agent.tar.gz
-sudo mv ceilometer/network/ext_services /usr/lib/python2.7/dist-packages/ceilometer/network/
-sudo mv ceilometer-2015.1.1.egg-info/entry_points.txt /usr/lib/python2.7/dist-packages/ceilometer-*egg-info/
-echo "Restarting ceilometer-agent-notification"
-sudo service ceilometer-agent-notification restart
diff --git a/xos/configurations/cord/make-virtualbng-json.sh b/xos/configurations/cord/make-virtualbng-json.sh
new file mode 100644
index 0000000..0e1d350
--- /dev/null
+++ b/xos/configurations/cord/make-virtualbng-json.sh
@@ -0,0 +1,39 @@
+#FN=/opt/xos/configurations/common/cloudlab-nodes.yaml
+FN=virtualbng.json
+
+rm -f $FN
+
+cat >> $FN <<EOF
+{
+    "localPublicIpPrefixes" : [
+        "10.254.0.128/25"
+    ],
+    "nextHopIpAddress" : "10.254.0.1",
+    "publicFacingMac" : "00:00:00:00:00:66",
+    "xosIpAddress" : "10.11.10.1",
+    "xosRestPort" : "9999",
+    "hosts" : {
+EOF
+
+NODES=$( sudo bash -c "source /root/setup/admin-openrc.sh ; nova hypervisor-list" |grep cloudlab|awk '{print $4}' )
+
+NODECOUNT=0
+for NODE in $NODES; do
+    ((NODECOUNT++))
+done
+
+I=0
+for NODE in $NODES; do
+    echo $NODE
+    ((I++))
+    if [[ "$I" -lt "$NODECOUNT" ]]; then
+        echo "      \"$NODE\" : \"of:0000000000000001/1\"," >> $FN
+    else
+        echo "      \"$NODE\" : \"of:0000000000000001/1\"" >> $FN
+    fi
+done
+
+cat >> $FN <<EOF
+    }
+}
+EOF
diff --git a/xos/configurations/cord/openstack_ceilometer_patch.tar.gz b/xos/configurations/cord/openstack_ceilometer_patch.tar.gz
new file mode 100644
index 0000000..6a6ffa7
--- /dev/null
+++ b/xos/configurations/cord/openstack_ceilometer_patch.tar.gz
Binary files differ
diff --git a/xos/configurations/cord/xos_metering_dashboard.tar.gz b/xos/configurations/cord/xos_metering_dashboard.tar.gz
index 648f91a..3b4d127 100644
--- a/xos/configurations/cord/xos_metering_dashboard.tar.gz
+++ b/xos/configurations/cord/xos_metering_dashboard.tar.gz
Binary files differ
diff --git a/xos/core/admin.py b/xos/core/admin.py
index 26c6dba..be9dcc0 100644
--- a/xos/core/admin.py
+++ b/xos/core/admin.py
@@ -23,7 +23,6 @@
 from cgi import escape as html_escape
 from django.contrib import messages
 
-import django_evolution
 import threading
 
 # thread locals necessary to work around a django-suit issue
@@ -106,6 +105,10 @@
             # this 'if' might be redundant if save_by_user is implemented right
             raise PermissionDenied
 
+        # reset exponential backoff
+        if hasattr(obj, "backend_register"):
+            obj.backend_register = "{}"
+
         obj.caller = request.user
         # update openstack connection to use this site/tenant
         obj.save_by_user(request.user)
@@ -2033,12 +2036,6 @@
 # unregister the Group model from admin.
 #admin.site.unregister(Group)
 
-#Do not show django evolution in the admin interface
-from django_evolution.models import Version, Evolution
-#admin.site.unregister(Version)
-#admin.site.unregister(Evolution)
-
-
 # When debugging it is often easier to see all the classes, but for regular use 
 # only the top-levels should be displayed
 showAll = False
diff --git a/xos/core/xoslib/methods/ceilometerview.py b/xos/core/xoslib/methods/ceilometerview.py
new file mode 100644
index 0000000..a453524
--- /dev/null
+++ b/xos/core/xoslib/methods/ceilometerview.py
@@ -0,0 +1,940 @@
+import logging
+import requests
+from six.moves import urllib
+import urllib2
+import pytz
+import datetime
+from rest_framework.decorators import api_view
+from rest_framework.response import Response
+from rest_framework.reverse import reverse
+from rest_framework import serializers
+from rest_framework import generics
+from rest_framework.views import APIView
+from core.models import *
+from ceilometer.models import *
+from django.forms import widgets
+from django.utils import datastructures
+from django.utils.translation import ugettext_lazy as _
+from django.utils import timezone
+from syndicate_storage.models import Volume
+from django.core.exceptions import PermissionDenied
+
+# This REST API endpoint provides information that the ceilometer view needs to display
+LOG = logging.getLogger(__name__)
+
+def getTenantCeilometerProxyURL(user):
+    monitoring_channel = None
+    for obj in MonitoringChannel.get_tenant_objects().all():
+        if (obj.creator.username == user.username):
+            monitoring_channel = obj
+            break
+    if not monitoring_channel:
+        raise XOSMissingField("Monitoring channel is missing for this tenant...Create one and invoke this REST API")
+    #TODO: Wait until URL is completely UP
+    while True:
+        try:
+            response = urllib2.urlopen(monitoring_channel.ceilometer_url,timeout=1)
+            break
+        except urllib2.HTTPError, e:
+            LOG.info('SRIKANTH: HTTP error %(reason)s' % {'reason':e.reason})
+            break
+        except urllib2.URLError, e:
+            LOG.info('SRIKANTH: URL error %(reason)s' % {'reason':e.reason})
+            pass
+    return monitoring_channel.ceilometer_url
+
+def getTenantControllerTenantMap(user):
+    tenantmap={}
+    for slice in Slice.objects.filter(creator=user):
+        for cs in slice.controllerslices.all():
+            if cs.tenant_id:
+                tenantmap[cs.tenant_id] = cs.slice.name
+    return tenantmap
+
+def build_url(path, q, params=None):
+    """Convert list of dicts and a list of params to query url format.
+
+    This will convert the following:
+        "[{field=this,op=le,value=34},
+          {field=that,op=eq,value=foo,type=string}],
+         ['foo=bar','sna=fu']"
+    to:
+        "?q.field=this&q.field=that&
+          q.op=le&q.op=eq&
+          q.type=&q.type=string&
+          q.value=34&q.value=foo&
+          foo=bar&sna=fu"
+    """
+    if q:
+        query_params = {'q.field': [],
+                        'q.value': [],
+                        'q.op': [],
+                        'q.type': []}
+
+        for query in q:
+            for name in ['field', 'op', 'value', 'type']:
+                query_params['q.%s' % name].append(query.get(name, ''))
+
+        # Transform the dict to a sequence of two-element tuples in fixed
+        # order, then the encoded string will be consistent in Python 2&3.
+        new_qparams = sorted(query_params.items(), key=lambda x: x[0])
+        path += "?" + urllib.parse.urlencode(new_qparams, doseq=True)
+
+        if params:
+            for p in params:
+                path += '&%s' % p
+    elif params:
+        path += '?%s' % params[0]
+        for p in params[1:]:
+            path += '&%s' % p
+    return path
+
+def concat_url(endpoint, url):
+    """Concatenate endpoint and final URL.
+
+    E.g., "http://keystone/v2.0/" and "/tokens" are concatenated to
+    "http://keystone/v2.0/tokens".
+
+    :param endpoint: the base URL
+    :param url: the final URL
+    """
+    return "%s/%s" % (endpoint.rstrip("/"), url.strip("/"))
+
+def resource_list(request, query=None, ceilometer_url=None, ceilometer_usage_object=None):
+    """List the resources."""
+    url = concat_url(ceilometer_url, build_url('/v2/resources', query))
+    try:
+        response = requests.get(url)
+    except requests.exceptions.RequestException as e:
+        raise e
+    return response.json()
+
+def sample_list(request, meter_name, ceilometer_url=None, query=None, limit=None):
+    """List the samples for this meters."""
+    params = ['limit=%s' % limit] if limit else []
+    url = concat_url(ceilometer_url, build_url('/v2/samples', query, params))
+    try:
+        response = requests.get(url)
+    except requests.exceptions.RequestException as e:
+        raise e
+    return response.json()
+
+def meter_list(request, ceilometer_url=None, query=None):
+    """List the user's meters."""
+    url = concat_url(ceilometer_url, build_url('/v2/meters', query))
+    try:
+        response = requests.get(url)
+    except requests.exceptions.RequestException as e:
+        raise e
+    return response.json()
+
+
+def statistic_list(request, meter_name, ceilometer_url=None, query=None, period=None):
+    """List of statistics."""
+    p = ['period=%s' % period] if period else []
+    url = concat_url(ceilometer_url, build_url('/v2/meters/' + meter_name + '/statistics', query, p))
+    try:
+        response = requests.get(url)
+    except requests.exceptions.RequestException as e:
+        raise e
+    return response.json()
+
+def diff_lists(a, b):
+    if not a:
+        return []
+    elif not b:
+        return a
+    else:
+        return list(set(a) - set(b))
+
+class Meters(object):
+    """Class for listing of available meters.
+
+    It is listing meters defined in this class that are available
+    in Ceilometer meter_list.
+
+    It is storing information that is not available in Ceilometer, i.e.
+    label, description.
+
+    """
+
+    def __init__(self, request=None, ceilometer_meter_list=None, ceilometer_url=None, tenant_map=None):
+        # Storing the request.
+        self._request = request
+        self.ceilometer_url = ceilometer_url
+        self.tenant_map = tenant_map
+
+        # Storing the Ceilometer meter list
+        if ceilometer_meter_list:
+            self._ceilometer_meter_list = ceilometer_meter_list
+        else:
+            try:
+                query=[]
+                self._ceilometer_meter_list = meter_list(request, self.ceilometer_url, query)
+                #LOG.info('SRIKANTH: meters=%(meters)s'%{'meters':[m.project_id for m in self._ceilometer_meter_list]})
+            except requests.exceptions.RequestException as e:
+                self._ceilometer_meter_list = []
+                raise e
+
+        # Storing the meters info categorized by their services.
+        self._nova_meters_info = self._get_nova_meters_info()
+        self._neutron_meters_info = self._get_neutron_meters_info()
+        self._glance_meters_info = self._get_glance_meters_info()
+        self._cinder_meters_info = self._get_cinder_meters_info()
+        self._swift_meters_info = self._get_swift_meters_info()
+        self._kwapi_meters_info = self._get_kwapi_meters_info()
+        self._ipmi_meters_info = self._get_ipmi_meters_info()
+        self._vcpe_meters_info = self._get_vcpe_meters_info()
+        self._sdn_meters_info = self._get_sdn_meters_info()
+
+        # Storing the meters info of all services together.
+        all_services_meters = (self._nova_meters_info,
+                               self._neutron_meters_info,
+                               self._glance_meters_info,
+                               self._cinder_meters_info,
+                               self._swift_meters_info,
+                               self._kwapi_meters_info,
+                               self._ipmi_meters_info,
+                               self._vcpe_meters_info,
+                               self._sdn_meters_info)
+        self._all_meters_info = {}
+        for service_meters in all_services_meters:
+            self._all_meters_info.update(dict([(meter_name, meter_info)
+                                               for meter_name, meter_info
+                                               in service_meters.items()]))
+
+        # Here will be the cached Meter objects, that will be reused for
+        # repeated listing.
+        self._cached_meters = {}
+
+    def list_all(self, only_meters=None, except_meters=None):
+        """Returns a list of meters based on the meters names.
+
+        :Parameters:
+          - `only_meters`: The list of meter names we want to show.
+          - `except_meters`: The list of meter names we don't want to show.
+        """
+
+        return self._list(only_meters=only_meters,
+                          except_meters=except_meters)
+
+    def list_nova(self, except_meters=None):
+        """Returns a list of meters tied to nova.
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show.
+        """
+
+        return self._list(only_meters=self._nova_meters_info.keys(),
+                          except_meters=except_meters)
+
+    def list_neutron(self, except_meters=None):
+        """Returns a list of meters tied to neutron.
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show.
+        """
+
+        return self._list(only_meters=self._neutron_meters_info.keys(),
+                          except_meters=except_meters)
+
+    def list_glance(self, except_meters=None):
+        """Returns a list of meters tied to glance.
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show.
+        """
+
+        return self._list(only_meters=self._glance_meters_info.keys(),
+                          except_meters=except_meters)
+
+    def list_cinder(self, except_meters=None):
+        """Returns a list of meters tied to cinder.
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show.
+        """
+
+        return self._list(only_meters=self._cinder_meters_info.keys(),
+                          except_meters=except_meters)
+
+    def list_swift(self, except_meters=None):
+        """Returns a list of meters tied to swift.
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show.
+        """
+
+        return self._list(only_meters=self._swift_meters_info.keys(),
+                          except_meters=except_meters)
+
+    def list_kwapi(self, except_meters=None):
+        """Returns a list of meters tied to kwapi.
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show.
+        """
+
+        return self._list(only_meters=self._kwapi_meters_info.keys(),
+                          except_meters=except_meters)
+
+    def list_ipmi(self, except_meters=None):
+        """Returns a list of meters tied to ipmi
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show
+        """
+
+        return self._list(only_meters=self._ipmi_meters_info.keys(),
+                          except_meters=except_meters)
+
+    def list_vcpe(self, except_meters=None):
+        """Returns a list of meters tied to vcpe service
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show
+        """
+
+        return self._list(only_meters=self._vcpe_meters_info.keys(),
+                          except_meters=except_meters)
+
+    def list_sdn(self, except_meters=None):
+        """Returns a list of meters tied to sdn service
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show
+        """
+
+        return self._list(only_meters=self._sdn_meters_info.keys(),
+                          except_meters=except_meters)
+
+    def list_other_services(self, except_meters=None):
+        """Returns a list of meters tied to ipmi
+
+        :Parameters:
+          - `except_meters`: The list of meter names we don't want to show
+        """
+        other_service_meters = [m for m in self._ceilometer_meter_list
+                                if m.name not in self._all_meters_info.keys()]
+        other_service_meters = diff_lists(other_service_meters, except_meters)
+
+        meters = []
+        for meter in other_service_meters:
+            self._cached_meters[meter.name] = meter
+            meters.append(meter)
+        return meters
+
+    def _list(self, only_meters=None, except_meters=None):
+        """Returns a list of meters based on the meters names.
+
+        :Parameters:
+          - `only_meters`: The list of meter names we want to show.
+          - `except_meters`: The list of meter names we don't want to show.
+        """
+
+        # Get all wanted meter names.
+        if only_meters:
+            meter_names = only_meters
+        else:
+            meter_names = [meter_name for meter_name
+                           in self._all_meters_info.keys()]
+
+        meter_names = diff_lists(meter_names, except_meters)
+        # Collect meters for wanted meter names.
+        return self._get_meters(meter_names)
+
+    def _get_meters(self, meter_names):
+        """Obtain meters based on meter_names.
+
+        The meters that do not exist in Ceilometer meter list are left out.
+
+        :Parameters:
+          - `meter_names`: A list of meter names we want to fetch.
+        """
+
+        meters = []
+        for meter_name in meter_names:
+            meter = self._get_meter(meter_name)
+            if meter:
+                meters.append(meter)
+        return meters
+
+    def _get_meter(self, meter_name):
+        """Obtains a meter.
+
+        Obtains meter either from cache or from Ceilometer meter list
+        joined with statically defined meter info like label and description.
+
+        :Parameters:
+          - `meter_name`: A meter name we want to fetch.
+        """
+        meter = self._cached_meters.get(meter_name, None)
+        if not meter:
+            meter_candidates = [m for m in self._ceilometer_meter_list
+                                if m["name"] == meter_name]
+
+            if meter_candidates:
+                meter_info = self._all_meters_info.get(meter_name, None)
+                if meter_info:
+                    label = meter_info["label"]
+                    description = meter_info["description"]
+                else:
+                    label = ""
+                    description = ""
+                meter = meter_candidates[0]
+                meter["label"] = label
+                meter["description"] = description
+                if meter["project_id"] in self.tenant_map.keys():
+                    meter["project_name"] = self.tenant_map[meter["project_id"]]
+                else:
+                    meter["project_name"] = meter["project_id"]
+
+                self._cached_meters[meter_name] = meter
+
+        return meter
+
+    def _get_nova_meters_info(self):
+        """Returns additional info for each meter.
+
+        That will be used for augmenting the Ceilometer meter.
+        """
+
+        # TODO(lsmola) Unless the Ceilometer will provide the information
+        # below, I need to define it as a static here. I will be joining this
+        # to info that I am able to obtain from Ceilometer meters, hopefully
+        # some day it will be supported all.
+        meters_info = datastructures.SortedDict([
+            ("instance", {
+                'label': '',
+                'description': _("Existence of instance"),
+            }),
+            ("instance:<type>", {
+                'label': '',
+                'description': _("Existence of instance <type> "
+                                 "(openstack types)"),
+            }),
+            ("memory", {
+                'label': '',
+                'description': _("Volume of RAM"),
+            }),
+            ("memory.usage", {
+                'label': '',
+                'description': _("Volume of RAM used"),
+            }),
+            ("cpu", {
+                'label': '',
+                'description': _("CPU time used"),
+            }),
+            ("cpu_util", {
+                'label': '',
+                'description': _("Average CPU utilization"),
+            }),
+            ("vcpus", {
+                'label': '',
+                'description': _("Number of VCPUs"),
+            }),
+            ("network.incoming.bytes.rate", {
+                'label': '',
+                'description': _("Average rate per sec of incoming "
+                                 "bytes on a VM network interface"),
+            }),
+            ("network.outgoing.bytes.rate", {
+                'label': '',
+                'description': _("Average rate per sec of outgoing "
+                                 "bytes on a VM network interface"),
+            }),
+        ])
+        # Adding flavor based meters into meters_info dict
+        # TODO(lsmola) this kind of meter will be probably deprecated
+        # https://bugs.launchpad.net/ceilometer/+bug/1208365 . Delete it then.
+        #for flavor in get_flavor_names(self._request):
+        #    name = 'instance:%s' % flavor
+        #    meters_info[name] = dict(meters_info["instance:<type>"])
+
+        #    meters_info[name]['description'] = (
+        #        _('Duration of instance type %s (openstack flavor)') %
+        #        flavor)
+
+        # TODO(lsmola) allow to set specific in local_settings. For all meters
+        # because users can have their own agents and meters.
+        return meters_info
+
+    def _get_neutron_meters_info(self):
+        """Returns additional info for each meter.
+
+        That will be used for augmenting the Ceilometer meter.
+        """
+
+        # TODO(lsmola) Unless the Ceilometer will provide the information
+        # below, I need to define it as a static here. I will be joining this
+        # to info that I am able to obtain from Ceilometer meters, hopefully
+        # some day it will be supported all.
+        return datastructures.SortedDict([
+            ('network', {
+                'label': '',
+                'description': _("Existence of network"),
+            }),
+            ('subnet', {
+                'label': '',
+                'description': _("Existence of subnet"),
+            }),
+            ('port', {
+                'label': '',
+                'description': _("Existence of port"),
+            }),
+            ('ip.floating', {
+                'label': '',
+                'description': _("Existence of floating ip"),
+            }),
+        ])
+
+    def _get_glance_meters_info(self):
+        """Returns additional info for each meter.
+
+        That will be used for augmenting the Ceilometer meter.
+        """
+
+        # TODO(lsmola) Unless the Ceilometer will provide the information
+        # below, I need to define it as a static here. I will be joining this
+        # to info that I am able to obtain from Ceilometer meters, hopefully
+        # some day it will be supported all.
+        return datastructures.SortedDict([
+            ('image', {
+                'label': '',
+                'description': _("Image existence check"),
+            }),
+            ('image.size', {
+                'label': '',
+                'description': _("Uploaded image size"),
+            }),
+            ('image.update', {
+                'label': '',
+                'description': _("Number of image updates"),
+            }),
+            ('image.upload', {
+                'label': '',
+                'description': _("Number of image uploads"),
+            }),
+            ('image.delete', {
+                'label': '',
+                'description': _("Number of image deletions"),
+            }),
+            ('image.download', {
+                'label': '',
+                'description': _("Image is downloaded"),
+            }),
+            ('image.serve', {
+                'label': '',
+                'description': _("Image is served out"),
+            }),
+        ])
+
+    def _get_cinder_meters_info(self):
+        """Returns additional info for each meter.
+
+        That will be used for augmenting the Ceilometer meter.
+        """
+
+        # TODO(lsmola) Unless the Ceilometer will provide the information
+        # below, I need to define it as a static here. I will be joining this
+        # to info that I am able to obtain from Ceilometer meters, hopefully
+        # some day it will be supported all.
+        return datastructures.SortedDict([
+            ('volume', {
+                'label': '',
+                'description': _("Existence of volume"),
+            }),
+            ('volume.size', {
+                'label': '',
+                'description': _("Size of volume"),
+            }),
+        ])
+
+    def _get_swift_meters_info(self):
+        """Returns additional info for each meter.
+
+        That will be used for augmenting the Ceilometer meter.
+        """
+
+        # TODO(lsmola) Unless the Ceilometer will provide the information
+        # below, I need to define it as a static here. I will be joining this
+        # to info that I am able to obtain from Ceilometer meters, hopefully
+        # some day it will be supported all.
+        return datastructures.SortedDict([
+            ('storage.objects', {
+                'label': '',
+                'description': _("Number of objects"),
+            }),
+            ('storage.objects.size', {
+                'label': '',
+                'description': _("Total size of stored objects"),
+            }),
+            ('storage.objects.containers', {
+                'label': '',
+                'description': _("Number of containers"),
+            }),
+            ('storage.objects.incoming.bytes', {
+                'label': '',
+                'description': _("Number of incoming bytes"),
+            }),
+            ('storage.objects.outgoing.bytes', {
+                'label': '',
+                'description': _("Number of outgoing bytes"),
+            }),
+            ('storage.api.request', {
+                'label': '',
+                'description': _("Number of API requests against swift"),
+            }),
+        ])
+
+    def _get_kwapi_meters_info(self):
+        """Returns additional info for each meter.
+
+        That will be used for augmenting the Ceilometer meter.
+        """
+
+        # TODO(lsmola) Unless the Ceilometer will provide the information
+        # below, I need to define it as a static here. I will be joining this
+        # to info that I am able to obtain from Ceilometer meters, hopefully
+        # some day it will be supported all.
+        return datastructures.SortedDict([
+            ('energy', {
+                'label': '',
+                'description': _("Amount of energy"),
+            }),
+            ('power', {
+                'label': '',
+                'description': _("Power consumption"),
+            }),
+        ])
+
+    def _get_ipmi_meters_info(self):
+        """Returns additional info for each meter
+
+        That will be used for augmenting the Ceilometer meter
+        """
+
+        # TODO(lsmola) Unless the Ceilometer will provide the information
+        # below, I need to define it as a static here. I will be joining this
+        # to info that I am able to obtain from Ceilometer meters, hopefully
+        # some day it will be supported all.
+        return datastructures.SortedDict([
+            ('hardware.ipmi.node.power', {
+                'label': '',
+                'description': _("System Current Power"),
+            }),
+            ('hardware.ipmi.fan', {
+                'label': '',
+                'description': _("Fan RPM"),
+            }),
+            ('hardware.ipmi.temperature', {
+                'label': '',
+                'description': _("Sensor Temperature Reading"),
+            }),
+            ('hardware.ipmi.current', {
+                'label': '',
+                'description': _("Sensor Current Reading"),
+            }),
+            ('hardware.ipmi.voltage', {
+                'label': '',
+                'description': _("Sensor Voltage Reading"),
+            }),
+            ('hardware.ipmi.node.inlet_temperature', {
+                'label': '',
+                'description': _("System Inlet Temperature Reading"),
+            }),
+            ('hardware.ipmi.node.outlet_temperature', {
+                'label': '',
+                'description': _("System Outlet Temperature Reading"),
+            }),
+            ('hardware.ipmi.node.airflow', {
+                'label': '',
+                'description': _("System Airflow Reading"),
+            }),
+            ('hardware.ipmi.node.cups', {
+                'label': '',
+                'description': _("System CUPS Reading"),
+            }),
+            ('hardware.ipmi.node.cpu_util', {
+                'label': '',
+                'description': _("System CPU Utility Reading"),
+            }),
+            ('hardware.ipmi.node.mem_util', {
+                'label': '',
+                'description': _("System Memory Utility Reading"),
+            }),
+            ('hardware.ipmi.node.io_util', {
+                'label': '',
+                'description': _("System IO Utility Reading"),
+            }),
+        ])
+
+    def _get_vcpe_meters_info(self):
+        """Returns additional info for each meter
+
+        That will be used for augmenting the Ceilometer meter
+        """
+
+        # TODO(lsmola) Unless the Ceilometer will provide the information
+        # below, I need to define it as a static here. I will be joining this
+        # to info that I am able to obtain from Ceilometer meters, hopefully
+        # some day it will be supported all.
+        return datastructures.SortedDict([
+            ('vcpe', {
+                'label': '',
+                'description': _("Existence of vcpe instance"),
+            }),
+            ('vcpe.dns.cache.size', {
+                'label': '',
+                'description': _("Number of entries in DNS cache"),
+            }),
+            ('vcpe.dns.total_instered_entries', {
+                'label': '',
+                'description': _("Total number of inserted entries into the cache"),
+            }),
+            ('vcpe.dns.replaced_unexpired_entries', {
+                'label': '',
+                'description': _("Unexpired entries that were thrown out of cache"),
+            }),
+            ('vcpe.dns.queries_answered_locally', {
+                'label': '',
+                'description': _("Number of cache hits"),
+            }),
+            ('vcpe.dns.queries_forwarded', {
+                'label': '',
+                'description': _("Number of cache misses"),
+            }),
+            ('vcpe.dns.server.queries_sent', {
+                'label': '',
+                'description': _("For each upstream server, the number of queries sent"),
+            }),
+            ('vcpe.dns.server.queries_failed', {
+                'label': '',
+                'description': _("For each upstream server, the number of queries failed"),
+            }),
+        ])
+
+    def _get_sdn_meters_info(self):
+        """Returns additional info for each meter
+
+        That will be used for augmenting the Ceilometer meter
+        """
+
+        # TODO(lsmola) Unless the Ceilometer will provide the information
+        # below, I need to define it as a static here. I will be joining this
+        # to info that I am able to obtain from Ceilometer meters, hopefully
+        # some day it will be supported all.
+        return datastructures.SortedDict([
+            ('switch', {
+                'label': '',
+                'description': _("Existence of switch"),
+            }),
+            ('switch.port', {
+                'label': '',
+                'description': _("Existence of port"),
+            }),
+            ('switch.port.receive.packets', {
+                'label': '',
+                'description': _("Packets received on port"),
+            }),
+            ('switch.port.transmit.packets', {
+                'label': '',
+                'description': _("Packets transmitted on port"),
+            }),
+            ('switch.port.receive.drops', {
+                'label': '',
+                'description': _("Drops received on port"),
+            }),
+            ('switch.port.transmit.drops', {
+                'label': '',
+                'description': _("Drops transmitted on port"),
+            }),
+            ('switch.port.receive.errors', {
+                'label': '',
+                'description': _("Errors received on port"),
+            }),
+            ('switch.port.transmit.errors', {
+                'label': '',
+                'description': _("Errors transmitted on port"),
+            }),
+            ('switch.flow', {
+                'label': '',
+                'description': _("Duration of flow"),
+            }),
+            ('switch.flow.packets', {
+                'label': '',
+                'description': _("Packets received"),
+            }),
+            ('switch.table', {
+                'label': '',
+                'description': _("Existence of table"),
+            }),
+            ('switch.table.active.entries', {
+                'label': '',
+                'description': _("Active entries in table"),
+            }),
+        ])
+
+def make_query(user_id=None, tenant_id=None, resource_id=None,
+               user_ids=None, tenant_ids=None, resource_ids=None):
+    """Returns query built from given parameters.
+
+    This query can be then used for querying resources, meters and
+    statistics.
+
+    :Parameters:
+      - `user_id`: user_id, has a priority over list of ids
+      - `tenant_id`: tenant_id, has a priority over list of ids
+      - `resource_id`: resource_id, has a priority over list of ids
+      - `user_ids`: list of user_ids
+      - `tenant_ids`: list of tenant_ids
+      - `resource_ids`: list of resource_ids
+    """
+    user_ids = user_ids or []
+    tenant_ids = tenant_ids or []
+    resource_ids = resource_ids or []
+
+    query = []
+    if user_id:
+        user_ids = [user_id]
+    for u_id in user_ids:
+        query.append({"field": "user_id", "op": "eq", "value": u_id})
+
+    if tenant_id:
+        tenant_ids = [tenant_id]
+    for t_id in tenant_ids:
+        query.append({"field": "project_id", "op": "eq", "value": t_id})
+
+    if resource_id:
+        resource_ids = [resource_id]
+    for r_id in resource_ids:
+        query.append({"field": "resource_id", "op": "eq", "value": r_id})
+
+    return query
+
+def calc_date_args(date_from, date_to, date_options):
+    # TODO(lsmola) all timestamps should probably work with
+    # current timezone. And also show the current timezone in chart.
+    if date_options == "other":
+        try:
+            if date_from:
+                date_from = pytz.utc.localize(
+                    datetime.datetime.strptime(str(date_from), "%Y-%m-%d"))
+            else:
+                # TODO(lsmola) there should be probably the date
+                # of the first sample as default, so it correctly
+                # counts the time window. Though I need ordering
+                # and limit of samples to obtain that.
+                pass
+            if date_to:
+                date_to = pytz.utc.localize(
+                    datetime.datetime.strptime(str(date_to), "%Y-%m-%d"))
+                # It returns the beginning of the day, I want the end of
+                # the day, so I add one day without a second.
+                date_to = (date_to + datetime.timedelta(days=1) -
+                           datetime.timedelta(seconds=1))
+            else:
+                date_to = timezone.now()
+        except Exception:
+            raise ValueError(_("The dates haven't been recognized"))
+    else:
+        try:
+            date_to = timezone.now()
+            date_from = date_to - datetime.timedelta(days=float(date_options))
+        except Exception as e:
+            raise e
+            #raise ValueError(_("The time delta must be a number representing "
+            #                   "the time span in days"))
+    return date_from, date_to
+
+class MetersList(APIView):
+    method_kind = "list"
+    method_name = "meters"
+
+    def get(self, request, format=None):
+        if (not request.user.is_authenticated()):
+            raise PermissionDenied("You must be authenticated in order to use this API")
+        tenant_ceilometer_url = getTenantCeilometerProxyURL(request.user)
+        if (not tenant_ceilometer_url):
+            raise XOSMissingField("Tenant ceilometer URL is missing")
+        tenant_map = getTenantControllerTenantMap(request.user)
+        meters = Meters(request, ceilometer_url=tenant_ceilometer_url, tenant_map=tenant_map)
+        services = {
+            _('Nova'): meters.list_nova(),
+            _('Neutron'): meters.list_neutron(),
+            _('VCPE'): meters.list_vcpe(),
+            _('SDN'): meters.list_sdn(),
+        }
+        return Response(meters._cached_meters.values())
+
+class MeterStatisticsList(APIView):
+    method_kind = "list"
+    method_name = "meterstatistics"
+
+    def get(self, request, format=None):
+        if (not request.user.is_authenticated()):
+            raise PermissionDenied("You must be authenticated in order to use this API")
+        tenant_ceilometer_url = getTenantCeilometerProxyURL(request.user)
+        if (not tenant_ceilometer_url):
+            raise XOSMissingField("Tenant ceilometer URL is missing")
+        tenant_map = getTenantControllerTenantMap(request.user)
+        
+        date_options = request.QUERY_PARAMS.get('period', 1)
+        date_from = request.QUERY_PARAMS.get('date_from', '')
+        date_to = request.QUERY_PARAMS.get('date_to', '')
+
+        try:
+            date_from, date_to = calc_date_args(date_from,
+                                                date_to,
+                                                date_options)
+        except Exception as e:
+           raise e 
+
+        meters = Meters(request, ceilometer_url=tenant_ceilometer_url, tenant_map=tenant_map)
+        services = {
+            _('Nova'): meters.list_nova(),
+            _('Neutron'): meters.list_neutron(),
+            _('VCPE'): meters.list_vcpe(),
+            _('SDN'): meters.list_sdn(),
+        }
+        report_rows = []
+        for service,meters in services.items():
+            for meter in meters:
+                query = make_query(tenant_id=meter["project_id"])
+                statistics = statistic_list(request, meter["name"],
+                                        ceilometer_url=tenant_ceilometer_url, query=query, period=3600*24)
+                statistic = statistics[0]
+                row = {"name": 'none',
+                       "project": meter["project_name"],
+                       "meter": meter["name"],
+                       "description": meter["description"],
+                       "service": service,
+                       "time": statistic["period_end"],
+                       "value": statistic["avg"],
+                       "unit": meter["unit"]}
+                report_rows.append(row)
+
+        return Response(report_rows)
+
+
+class MeterSamplesList(APIView):
+    method_kind = "list"
+    method_name = "metersamples"
+
+    def get(self, request, format=None):
+        if (not request.user.is_authenticated()):
+            raise PermissionDenied("You must be authenticated in order to use this API")
+        tenant_ceilometer_url = getTenantCeilometerProxyURL(request.user)
+        if (not tenant_ceilometer_url):
+            raise XOSMissingField("Tenant ceilometer URL is missing")
+        meter_name = request.QUERY_PARAMS.get('meter', None)
+        tenant_id = request.QUERY_PARAMS.get('tenant', None)
+        if not meter_name:
+            raise XOSMissingField("Meter name in query params is missing")
+        query = []
+        if tenant_id:
+            query.extend(make_query(tenant_id="default_admin_tenant"))
+        query.append({"field": "meter", "op": "eq", "value": meter_name})
+        samples = sample_list(request, meter_name,
+                           ceilometer_url=tenant_ceilometer_url, query=query, limit=10) 
+        return Response(samples)
diff --git a/xos/manage.py b/xos/manage.py
index 0ddd014..5d09794 100644
--- a/xos/manage.py
+++ b/xos/manage.py
@@ -7,6 +7,10 @@
 
     from django.core.management import execute_from_command_line
 
+    if "--makemigrations" in sys.argv:
+        os.system("/opt/xos/scripts/opencloud makemigrations")
+        sys.argv.remove("--makemigrations")
+
     if "--nomodelpolicy" in sys.argv:
         import model_policy
         model_policy.EnableModelPolicy(False)
diff --git a/xos/observers/monitoring_channel/templates/ceilometer_proxy_server.py b/xos/observers/monitoring_channel/templates/ceilometer_proxy_server.py
index c6be286..249c965 100644
--- a/xos/observers/monitoring_channel/templates/ceilometer_proxy_server.py
+++ b/xos/observers/monitoring_channel/templates/ceilometer_proxy_server.py
@@ -121,7 +121,7 @@
     i=0
     user_specified_tenants=[]
     for field in query_params['q.field']:
-        if field != 'project_id':
+        if (field != 'project_id') and (field != 'project'):
             query = {}
             query['field']=field
             if query_params['q.op'][i] != '':
@@ -195,11 +195,13 @@
              "q.op": [],
              "q.type": [],
              "q.value": [],
+             "limit": None,
         }
         query_params = web.input(**keyword_args)
         new_query, user_specified_tenants = filter_query_params(query_params)
 
         client = ceilometerclient()
+        limit=query_params.limit
         samples=[]
         for (k,v) in config.items('allowed_tenants'):
               if user_specified_tenants and (k not in user_specified_tenants):
@@ -209,7 +211,7 @@
               query = make_query(tenant_id=k)
               final_query.extend(query)
               logger.debug('final query=%s',final_query)
-              results = client.samples.list(q=final_query)
+              results = client.new_samples.list(q=final_query,limit=limit)
               samples.extend(results)
         return json.dumps([ob._info for ob in samples])
 
diff --git a/xos/observers/onos/steps/sync_onosapp.py b/xos/observers/onos/steps/sync_onosapp.py
index 8c97391..9b32298 100644
--- a/xos/observers/onos/steps/sync_onosapp.py
+++ b/xos/observers/onos/steps/sync_onosapp.py
@@ -75,6 +75,7 @@
 
     def write_configs(self, o):
         o.config_fns = []
+        o.rest_configs = []
         o.files_dir = self.get_files_dir(o)
 
         if not os.path.exists(o.files_dir):
@@ -85,6 +86,16 @@
                 fn = attr.name[7:] # .replace("_json",".json")
                 o.config_fns.append(fn)
                 file(os.path.join(o.files_dir, fn),"w").write(attr.value)
+            if attr.name.startswith("rest_"):
+                fn = attr.name[5:].replace("/","_")
+                endpoint = attr.name[5:]
+                # Ansible goes out of it's way to make our life difficult. If
+                # 'lookup' sees a file that it thinks contains json, then it'll
+                # insist on parsing and return a json object. We just want
+                # a string, so prepend a space and then strip the space off
+                # later.
+                file(os.path.join(o.files_dir, fn),"w").write(" " +attr.value)
+                o.rest_configs.append( {"endpoint": endpoint, "fn": fn} )
 
     def prepare_record(self, o):
         self.write_configs(o)
@@ -95,6 +106,7 @@
         fields["appname"] = o.name
         fields["nat_ip"] = self.get_instance(o).get_ssh_ip()
         fields["config_fns"] = o.config_fns
+        fields["rest_configs"] = o.rest_configs
         fields["dependencies"] = [x.strip() for x in o.dependencies.split(",")]
         fields["ONOS_container"] = "ONOS"
         return fields
diff --git a/xos/observers/onos/steps/sync_onosapp.yaml b/xos/observers/onos/steps/sync_onosapp.yaml
index ad3718c..9ee2513 100644
--- a/xos/observers/onos/steps/sync_onosapp.yaml
+++ b/xos/observers/onos/steps/sync_onosapp.yaml
@@ -7,6 +7,13 @@
   vars:
     appname: {{ appname }}
     dependencies: {{ dependencies }}
+{% if rest_configs %}
+    rest_configs:
+{% for rest_config in rest_configs %}
+       - endpoint: {{ rest_config.endpoint }}
+         body: "{{ '{{' }} lookup('file', '{{ files_dir }}/{{ rest_config.fn }}') {{ '}}' }}"
+{% endfor %}
+{% endif %}
 
   tasks:
 
@@ -38,6 +45,18 @@
         {% endfor %}
 {% endif %}
 
+{% if rest_configs %}
+  - name: Add ONOS configuration values
+    uri:
+      url: http://localhost:8181/{{ '{{' }} item.endpoint {{ '}}' }} #http://localhost:8181/onos/v1/network/configuration/
+      body: "{{ '{{' }} item.body {{ '}}' }}"
+      body_format: raw
+      method: POST
+      user: karaf
+      password: karaf
+    with_items: "rest_configs"
+{% endif %}
+
   # Don't know how to check for this condition, just wait
   - name: Wait for ONOS to install the apps
     wait_for: timeout=15
diff --git a/xos/openstack_observer/steps/sync_container.py b/xos/openstack_observer/steps/sync_container.py
index b9b41c5..272e5f8 100644
--- a/xos/openstack_observer/steps/sync_container.py
+++ b/xos/openstack_observer/steps/sync_container.py
@@ -7,7 +7,7 @@
 from django.db.models import F, Q
 from xos.config import Config
 from observers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
-from observer.syncstep import SyncStep
+from observer.syncstep import SyncStep, DeferredException
 from observer.ansible import run_template_ssh
 from core.models import Service, Slice, Instance
 from services.onos.models import ONOSService, ONOSApp
@@ -45,7 +45,7 @@
         for parent_port in instance.parent.ports.all():
             if parent_port.network == port.network:
                 if not parent_port.mac:
-                     raise Exception("parent port on network %s does not have mac yet" % parent_port.network.name)
+                     raise DeferredException("parent port on network %s does not have mac yet" % parent_port.network.name)
                 return parent_port.mac
         raise Exception("failed to find corresponding parent port for network %s" % port.network.name)
 
@@ -56,18 +56,24 @@
             if (not port.ip):
                 # 'unmanaged' ports may have an ip, but no mac
                 # XXX: are there any ports that have a mac but no ip?
-                raise Exception("Port on network %s is not yet ready" % port.network.name)
+                raise DeferredException("Port on network %s is not yet ready" % port.network.name)
 
             pd={}
             pd["mac"] = port.mac or ""
             pd["ip"] = port.ip or ""
             pd["xos_network_id"] = port.network.id
 
+            if port.network.name == "wan_network":
+                if port.ip:
+                    (a, b, c, d) = port.ip.split('.')
+                    pd["mac"] = "02:42:%02x:%02x:%02x:%02x" % (int(a), int(b), int(c), int(d))
+
+
             if o.isolation == "container":
                 # container on bare metal
                 instance_port = self.get_instance_port(port)
                 if not instance_port:
-                    raise Exception("No instance on slice for port on network %s" % port.network.name)
+                    raise DeferredException("No instance on slice for port on network %s" % port.network.name)
 
                 pd["snoop_instance_mac"] = instance_port.mac
                 pd["snoop_instance_id"] = instance_port.instance.instance_id
diff --git a/xos/openstack_observer/steps/sync_container.yaml b/xos/openstack_observer/steps/sync_container.yaml
index 56edaea..b60ffb8 100644
--- a/xos/openstack_observer/steps/sync_container.yaml
+++ b/xos/openstack_observer/steps/sync_container.yaml
@@ -104,6 +104,12 @@
     shell: systemctl daemon-reload
     when: systemctl.stat.exists == True
 
+{% if ports %}
+  - name: make sure bridges are setup
+    shell: ifconfig {{ '{{' }} item.bridge {{ '}}' }}
+    with_items: "ports"
+{% endif %}
+
   - name: Make sure container is running
     service: name=container-{{ container_name }} state=started
 
diff --git a/xos/tosca/custom_types/xos.m4 b/xos/tosca/custom_types/xos.m4
index 3606601..d4e96ce 100644
--- a/xos/tosca/custom_types/xos.m4
+++ b/xos/tosca/custom_types/xos.m4
@@ -143,6 +143,9 @@
             config_network-cfg.json:
                 type: string
                 required: false
+            rest_onos/v1/network/configuration/:
+                type: string
+                required: false
 
     tosca.nodes.VCPEService:
         description: >
@@ -588,6 +591,10 @@
                 type: integer
                 default: 10
                 description: Quota of instances that this slice may create.
+            default_isolation:
+                type: string
+                required: false
+                description: default isolation to use when bringing up instances (default to 'vm')
 
     tosca.nodes.Node:
         derived_from: tosca.nodes.Root
diff --git a/xos/tosca/custom_types/xos.yaml b/xos/tosca/custom_types/xos.yaml
index 7a6030e..0c20211 100644
--- a/xos/tosca/custom_types/xos.yaml
+++ b/xos/tosca/custom_types/xos.yaml
@@ -165,6 +165,9 @@
             config_network-cfg.json:
                 type: string
                 required: false
+            rest_onos/v1/network/configuration/:
+                type: string
+                required: false
 
     tosca.nodes.VCPEService:
         description: >
@@ -776,6 +779,10 @@
                 type: integer
                 default: 10
                 description: Quota of instances that this slice may create.
+            default_isolation:
+                type: string
+                required: false
+                description: default isolation to use when bringing up instances (default to 'vm')
 
     tosca.nodes.Node:
         derived_from: tosca.nodes.Root
diff --git a/xos/tosca/resources/onosapp.py b/xos/tosca/resources/onosapp.py
index 111cf9a..7ed47d7 100644
--- a/xos/tosca/resources/onosapp.py
+++ b/xos/tosca/resources/onosapp.py
@@ -43,9 +43,9 @@
             if attrs:
                 attr = attrs[0]
                 if attr.value != value:
-                    self.info("updating attribute %s" % k)
-                    attrs.value = value
-                    attrs.save()
+                    self.info("updating attribute %s" % prop_name)
+                    attr.value = value
+                    attr.save()
             else:
                 self.info("adding attribute %s" % prop_name)
                 ta = TenantAttribute(tenant=obj, name=prop_name, value=value)
@@ -57,6 +57,8 @@
             v = d.value
             if k.startswith("config_"):
                 self.set_tenant_attr(obj, k, v)
+            elif k.startswith("rest_"):
+                self.set_tenant_attr(obj, k, v)
 
     def can_delete(self, obj):
         return super(XOSONOSApp, self).can_delete(obj)
diff --git a/xos/tosca/resources/slice.py b/xos/tosca/resources/slice.py
index 2c02365..e37bfc8 100644
--- a/xos/tosca/resources/slice.py
+++ b/xos/tosca/resources/slice.py
@@ -12,7 +12,7 @@
 class XOSSlice(XOSResource):
     provides = "tosca.nodes.Slice"
     xos_model = Slice
-    copyin_props = ["enabled", "description", "slice_url", "max_instances"]
+    copyin_props = ["enabled", "description", "slice_url", "max_instances", "default_isolation"]
 
     def get_xos_args(self):
         args = super(XOSSlice, self).get_xos_args()
diff --git a/xos/tosca/samples/container_slice.yaml b/xos/tosca/samples/container_slice.yaml
new file mode 100644
index 0000000..520bec0
--- /dev/null
+++ b/xos/tosca/samples/container_slice.yaml
@@ -0,0 +1,24 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >
+    * Create a new deployment, controller, and site.
+    * Add a SiteDeployment from the site to the deployment using the controller.
+    * Create a Slice in the Site, with one Instance
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    mysite:
+      type: tosca.nodes.Site
+
+    mysite_containers:
+      type: tosca.nodes.Slice
+      requirements:
+          - slice:
+                node: mysite
+                relationship: tosca.relationships.MemberOfSite
+      properties:
+          default_isolation: container
+
diff --git a/xos/xos/settings.py b/xos/xos/settings.py
index cf2412f..709a2b6 100644
--- a/xos/xos/settings.py
+++ b/xos/xos/settings.py
@@ -1,11 +1,32 @@
 from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
 from django import VERSION as DJANGO_VERSION
 import socket
+import os
+from urlparse import urlparse
 
 # Django settings for XOS.
 from config import Config
+from config import set_override
 config = Config()
 
+# Override the config from the environment. This is used leverage the LINK
+# capability of docker. It would be far better to use DNS and that can be
+# done in environments like kubernetes. Look for environment variables that
+# match the link pattern and set the appropriate overeides. It is expected
+# that the set of overrides will be expanded with need
+def overrideDbSettings(v):
+    parsed = urlparse(v)
+    config.db_host = parsed.hostname
+    config.db_port = parsed.port
+
+env_to_config_dict = {
+    "XOS_DB_PORT" : overrideDbSettings
+}
+
+for key, ofunc in env_to_config_dict.items():
+    if key in os.environ:
+        ofunc(os.environ[key])
+
 GEOIP_PATH = "/usr/share/GeoIP"
 XOS_DIR = "/opt/xos"
 
@@ -29,7 +50,7 @@
         'USER': config.db_user,
         'PASSWORD': config.db_password,
         'HOST': config.db_host,                      # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
-        'PORT': '',                      # Set to empty string for default.
+        'PORT': config.db_port,                      # Set to empty string for default.
     }
 }
 
@@ -150,7 +171,6 @@
     'django.contrib.admindocs',
     'rest_framework',
     'django_extensions',
-    'django_evolution',
     'core',
     'hpc',
     'cord',
@@ -167,10 +187,9 @@
 )
 
 if DJANGO_VERSION[1]>=7:
-    # if django >= 1.7, then remove evolution and change the admin module
+    # if django >= 1.7, then change the admin module
     INSTALLED_APPS = list(INSTALLED_APPS)
     INSTALLED_APPS[INSTALLED_APPS.index('django.contrib.admin')] = 'django.contrib.admin.apps.SimpleAdminConfig'
-    INSTALLED_APPS.remove('django_evolution')
     INSTALLED_APPS = tuple(INSTALLED_APPS)
 
 # Added for django-suit form