move over configurations from xos repo

Change-Id: I66583bdaea582986d8f16a29066a79c6687b97fb
diff --git a/cord-deprecated/Dockerfile.cord b/cord-deprecated/Dockerfile.cord
new file mode 100644
index 0000000..3e63eb2
--- /dev/null
+++ b/cord-deprecated/Dockerfile.cord
@@ -0,0 +1,27 @@
+RUN mkdir -p /root/setup
+ADD xos/configurations/common/admin-openrc.sh /root/setup/
+ADD xos/configurations/common/controller_settings /root/setup/
+ADD xos/configurations/common/flat_net_name /root/setup/
+ADD xos/configurations/common/nodes.yaml /opt/xos/configurations/commmon/
+ADD xos/configurations/common/id_rsa.pub /root/setup/padmin_public_key
+ADD xos/configurations/common/id_rsa.pub /opt/xos/synchronizers/vcpe/vcpe_public_key
+ADD xos/configurations/common/id_rsa /opt/xos/synchronizers/vcpe/vcpe_private_key
+ADD xos/configurations/common/id_rsa.pub /opt/xos/synchronizers/monitoring_channel/monitoring_channel_public_key
+ADD xos/configurations/common/id_rsa /opt/xos/synchronizers/monitoring_channel/monitoring_channel_private_key
+ADD xos/configurations/common/id_rsa.pub /opt/xos/synchronizers/onos/onos_key.pub
+ADD xos/configurations/common/id_rsa /opt/xos/synchronizers/onos/onos_key
+ADD xos/configurations/common/node_key.pub /root/setup/node_key.pub
+ADD xos/configurations/common/node_key /root/setup/node_key
+ADD xos/configurations/common/ceilometer_url /root/setup/ceilometer_url
+ADD xos/synchronizers/vcpe/supervisor/vcpe-observer.conf /etc/supervisor/conf.d/
+ADD xos/synchronizers/vbng/supervisor/vbng-observer.conf /etc/supervisor/conf.d/
+ADD xos/synchronizers/onos/supervisor/onos-observer.conf /etc/supervisor/conf.d/
+ADD xos/synchronizers/monitoring_channel/supervisor/monitoring_channel_observer.conf /etc/supervisor/conf.d/
+RUN sed -i 's/proxy_ssh=True/proxy_ssh=False/' /opt/xos/synchronizers/vcpe/vcpe_synchronizer_config
+RUN sed -i 's/proxy_ssh=True/proxy_ssh=False/' /opt/xos/synchronizers/monitoring_channel/monitoring_channel_synchronizer_config
+ADD xos/configurations/cord/virtualbng.json /root/setup/
+ADD xos/configurations/cord/vtn-network-cfg.json /root/setup/
+
+CMD /usr/bin/make -C /opt/xos/configurations/cord -f Makefile.inside; /bin/bash
+
+#CMD ["/bin/bash"]
diff --git a/cord-deprecated/Makefile b/cord-deprecated/Makefile
new file mode 100644
index 0000000..184f2d5
--- /dev/null
+++ b/cord-deprecated/Makefile
@@ -0,0 +1,100 @@
+SETUPDIR:=../setup
+MYIP:=$(shell hostname -i)
+
+cloudlab: common_cloudlab cord acord
+
+devstack: upgrade_pkgs common_devstack devstack_net_fix cord
+
+cord: virtualbng_json vtn_network_cfg_json
+	sudo MYIP=$(MYIP) docker-compose up -d
+	bash ../common/wait_for_xos.sh
+	sudo MYIP=$(MYIP) docker-compose run xos python /opt/xos/tosca/run.py none /opt/xos/configurations/common/fixtures.yaml
+	sudo docker-compose run xos python /opt/xos/tosca/run.py none /opt/xos/configurations/common/mydeployment.yaml
+	sudo MYIP=$(MYIP) docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/cloudlab-openstack.yaml
+	sudo MYIP=$(MYIP) docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /root/setup/nodes.yaml
+	sudo MYIP=$(MYIP) docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/cord/cord.yaml
+
+containers:
+	cd ../../../containers/xos; make devel
+	cd ../../../containers/synchronizer; make
+
+common_cloudlab:
+	make -C ../common -f Makefile.cloudlab
+
+common_devstack:
+	make -C ../common -f Makefile.devstack
+
+acord: cloudlab_ceilometer_custom_images ceilometer_cloudlab_cord_plugins
+	sudo MYIP=$(MYIP) docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/cord/ceilometer.yaml
+
+ceilometer_cloudlab_cord_plugins:
+	if [ -d ./ceilometer-plugins ]; then rm -fr ./ceilometer-plugins; fi
+	git clone https://github.com/srikanthvavila/ceilometer-plugins.git
+	sudo cp -r ceilometer-plugins/network/ext_services /usr/lib/python2.7/dist-packages/ceilometer/network/
+	sudo cp -r ceilometer-plugins/network/statistics/onos /usr/lib/python2.7/dist-packages/ceilometer/network/statistics/
+	sudo cp ceilometer-plugins/network/statistics/__init__.py /usr/lib/python2.7/dist-packages/ceilometer/network/statistics/ 
+	sudo cp ceilometer-plugins/entry_points.txt /usr/lib/python2.7/dist-packages/ceilometer-*egg-info/
+	sudo cp ceilometer-plugins/pipeline.yaml /etc/ceilometer/
+	echo "Restarting ceilometer-agent-notification"
+	sudo service ceilometer-agent-notification restart
+	echo "Restarting ceilometer-agent-central"
+	sudo service ceilometer-agent-central restart
+
+ceilometer_pub_sub:
+	if [ -d ./pub-sub ]; then rm -fr ./pub-sub; fi
+	git clone https://github.com/srikanthvavila/pub-sub.git
+	echo "Starting Ceilometer PUB/SUB service...Ensure zookeeper and kafka services are launched (if required)"
+	cd pub-sub/ceilometer_pub_sub/ ; python sub_main.py & 
+	cd ../..
+
+virtualbng_json:
+	export SETUPDIR=$(SETUPDIR); bash make-virtualbng-json.sh
+
+vtn_network_cfg_json:
+	export SETUPDIR=$(SETUPDIR); bash make-vtn-networkconfig-json.sh
+
+stop:
+	sudo MYIP=$(MYIP) docker-compose stop
+
+rm:
+	sudo MYIP=$(MYIP) docker-compose rm
+
+showlogs:
+	sudo MYIP=$(MYIP) docker-compose logs
+
+ps:
+	sudo MYIP=$(MYIP) docker-compose ps
+
+dataplane: etc_hosts
+	cd dataplane; ./gen-inventory.sh > hosts
+	cd dataplane; ansible-playbook -i hosts dataplane.yaml
+
+dataplane_bm: dataplane
+	cd dataplane; bash -c "./generate-bm.sh > hosts-bm"
+	cd dataplane; sudo bash -c "ansible-playbook -i hosts-bm dataplane-bm.yaml"
+
+etc_hosts:
+	sudo bash -c "sed -i '/^10.11.10/ d' /etc/hosts"
+	cd dataplane; sudo bash -c "./gen-etc-hosts.sh >> /etc/hosts"
+
+setup_client:
+	# add subscriber to vOLT?  Is there a REST API?
+	echo "Don't forget: add-subscriber-access of:0000000000000001 1 432"
+	cd dataplane; ansible -i hosts client -m shell -s -a "route del default gw 10.11.10.5; dhclient br-sub"
+	# reboot the vBNG ONOS
+	cd dataplane; ansible -i hosts onos_vbng -m shell -s -a "docker restart ONOS"
+
+cleanup: stop rm
+	cd dataplane; ./cleanup.sh
+	bash -c "source ../setup/admin-openrc.sh; nova list --all-tenants; neutron net-list"
+	echo "Don't forget to clean up vCPE containers"
+
+devstack_net_fix:
+	sudo ../common/devstack/net-fix.sh
+	sudo bash -c "source ../setup/admin-openrc.sh; neutron subnet-update private-subnet --dns-nameservers list=true 8.8.8.8 8.8.4.4"
+
+upgrade_pkgs:
+	sudo pip install httpie --upgrade
+
+cloudlab_ceilometer_custom_images:
+	bash -c "source ../setup/admin-openrc.sh; glance image-show ceilometer-trusty-server-multi-nic || if test -f /proj/xos-PG0/images/ceilometer-trusty-server-multi-nic.compressed.qcow2 ; then glance image-create --name ceilometer-trusty-server-multi-nic --disk-format qcow2 --file /proj/xos-PG0/images/ceilometer-trusty-server-multi-nic.compressed.qcow2 --container-format bare ; else mkdir -p /tmp/images && wget http://www.vicci.org/cord/ceilometer-trusty-server-multi-nic.compressed.qcow2 -P /tmp/images && glance image-create --name ceilometer-trusty-server-multi-nic --disk-format qcow2 --file /tmp/images/ceilometer-trusty-server-multi-nic.compressed.qcow2 --container-format bare ; fi "
diff --git a/cord-deprecated/Makefile.inside b/cord-deprecated/Makefile.inside
new file mode 100644
index 0000000..d7bdbaf
--- /dev/null
+++ b/cord-deprecated/Makefile.inside
@@ -0,0 +1,12 @@
+all: setup_xos run_develserver
+
+setup_xos:
+	bash /opt/xos/tools/docker_setup_xos
+	python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/fixtures.yaml
+	python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/base.yaml
+	python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/nodes.yaml
+	python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/cord/cord.yaml
+	python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/cord/ceilometer.yaml
+
+run_develserver:
+	cd /opt/xos; python manage.py runserver 0.0.0.0:8000 --insecure
diff --git a/cord-deprecated/README-VTN.md b/cord-deprecated/README-VTN.md
new file mode 100644
index 0000000..a3c4e69
--- /dev/null
+++ b/cord-deprecated/README-VTN.md
@@ -0,0 +1,156 @@
+# vtn notes:
+
+see also: https://github.com/hyunsun/documentations/wiki/Neutron-ONOS-Integration-for-CORD-VTN#onos-setup
+
+VTN doesn't seem to like cloudlab's networks (flat-net-1, ext-net, etc). I've placed a script in xos/scripts/ called destroy-all-networks.sh that will automate tearing down all of cloudlab's neutron networks.
+
+    cd xos/tools
+    ./destroy-all-networks.sh
+
+inside the xos container, update the configuration. Make sure to restart Openstack Synchronizer afterward. Might be a good idea to restart the XOS UI as well:
+
+    python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/tosca/samples/vtn.yaml
+    emacs /opt/xos/xos_configuration/xos_common_config
+        [networking]
+        use_vtn=True
+    supervisorctl restart observer
+
+### ctl node:
+
+    # set ONOS_VTN_HOSTNAME to the host where the VTN container was installed
+    ONOS_VTN_HOSTNAME="cp-2.smbaker-xos5.xos-pg0.clemson.cloudlab.us"
+    apt-get -y install python-pip
+    pip install -U setuptools pip
+    pip install testrepository
+    git clone https://github.com/openstack/networking-onos.git
+    cd networking-onos
+    python setup.py install
+    # the above fails the first time with an error about pbr.json
+    # I ran it again and it succeeded, but I am skeptical there's
+    # not still an issue lurking...
+    cat > /usr/local/etc/neutron/plugins/ml2/conf_onos.ini <<EOF
+    [onos]
+    url_path = http://$ONOS_VTN_HOSTNAME:8181/onos/cordvtn
+    username = karaf
+    password = karaf
+    EOF
+    emacs /etc/neutron/plugins/ml2/ml2_conf.ini
+        update settings as per vtn docs ([ml2] and [ml2_type_vxlan] sections)
+    systemctl stop neutron-server
+    # I started neutron manually to make sure it's using exactly the right config
+    # files. Maybe it can be restarted using systemctl instead...
+    /usr/bin/neutron-server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /usr/local/etc/neutron/plugins/ml2/conf_onos.ini
+
+### Compute nodes and nm nodes:
+
+    cd xos/configurations/cord/dataplane
+    ./generate-bm.sh > hosts-bm
+    ansible-playbook -i hosts-bm dataplane-vtn.yaml
+    # the playbook will:
+    #  1) turn off neutron openvswitch-agent
+    #  2) set openvswitch to listen on port 6641
+    #  3) restart openvswitch
+    #  4) delete any existing br-int bridge
+    #  5) [nm only] turn off neutron-dhcp-agent
+
+Additional compute node stuff:
+
+I've been deleting any existing unused bridges. Not sure if it's necesary.
+
+    ovs-vsctl del-br br-tun
+    ovs-vsctl del-br br-flat-lan-1
+
+To get the management network working, we need to create management network template, slice, and network. configurations/cord/vtn.yaml will do this for you. Then add a connection to the management network for any slice that needs management connectivity.
+    
+### Notes:
+* I've configured the OpenvSwitch switches to use port 6641 instead of port 6640. This is because the VTN app listens on 6640
+itself, and since we're running it in docker 'host' networking mode now, it would conflict with an Openvswitch that was
+also listening on 6640.
+* Adding use_vtn=True to the [networking] section in the XOS config file has two effects: 1) it sets the gateway in sync_controller_networks, and 2) it disables automatic creation of nat-net for new slices. This is because VTN will fail if there is no gateway on a network, and because we don't have nat-net under the VTN configuration.
+* When using of-vfctl to look at flow rules, if you get a protocol error, try "ovs-ofctl show -O OpenFlow13 br-int "
+* Note that the VTN Synchronizer isn't started automatically. It's only use for inter-Service connectivity, so no need to mess with it until intra-Slice connectivity is working first. 
+* Note that the VTN Synchronizer won't connect non-access networks. Any network templates you want VTN to connect must have Access set to "Direct" or "Indirect". 
+
+In case management network isn't working, you can use a VNC tunnel, like this:
+
+    # on compute node, run the following and note the IP address and port number
+    virsh vncdisplay <instance-id>
+    
+    # from home
+    ssh -o "GatewayPorts yes"  -L <port+5900>:<IP>:<port+5900> <username>@<compute_node_hostname>
+    
+    # example
+    ssh -o "GatewayPorts yes"  -L 5901:192.168.0.7:5901 smbaker@cp-1.smbaker-xos3.xos-pg0.clemson.cloudlab.us
+
+Then open a VNC session to the local port on your local machine. You'll have a console on the Instance. The username is "Ubuntu" and the password can be obtained from your cloudlab experiment description
+
+### Things that can be tested:
+
+* Create an Instance, it should have a Private network, and there should be a tap attached from the instance to br-int
+* Two Instances in the same Slice can talk to one another. They can be on the same machine or different machines.
+* Two Slices can talk to one another if the slices are associated with Services and those Services have a Tenancy relationship between them. Note that 1) The VTN Synchronizer must be running, 2) There must be a Private network with Access=[Direct|Indirect], and 3) The connectivity is unidirectional, from subscriber service to provider service.
+
+### Testing service composition
+
+1. Change the private network template's 'Access' field from None to Direct
+2. Create a Service, Service-A
+3. Enter Slice Admin for Slice-1 and assign it to Service-A
+4. Create a Service, Service-B
+5. Enter Slice Admin for Slice-2 and assign it to Service-B
+6. Enter Service Admin for Service-B, Go to Tenancy Tab
+7. In the 'Provided Tenants' section of Service-B, create a Tenant with Subsciber-Service=Serivce-A. 
+8. Start the VTN Observer. It will send a REST request to VTN app.
+9. Launch tcpdump in one of Slice-2's instances
+10. From Slice-1, start pinging the instance in Slice-2 where you launched tcpdump
+11. You should see the pings arrive and responses sent out. Note that the ping responses will not reach Slice-1, since VTN traffic is unidirectional.
+12. Delete the Tenancy relation you created in Step #7. The ping traffic should no longer appear in the tcpdump.
+
+### Getting external connectivity working on cloudlab
+
+On head node:
+
+    ovs-vsctl del-br br-flat-lan-1
+    ifconfig eth2 10.123.0.1
+    iptables --table nat --append POSTROUTING --out-interface br-ex -j MASQUERADE
+    #arp -s 10.123.0.3 fa:16:3e:ea:11:0a
+    sysctl net.ipv4.conf.all.send_redirects
+    sysctl net.ipv4.conf.all.send_redirects=0
+    sysctl net.ipv4.conf.default.send_redirects=0
+    sysctl net.ipv4.conf.eth0.send_redirects=0
+    sysctl net.ipv4.conf.br-ex.send_redirects=0
+    
+Substitute for your installation:
+
+    10.123.0.3 = wan_ip of vSG
+    10.123.0.1 = wan gateway
+    fa:16:3e:ea:11:0a = wan_mac of vSG
+    00:8c:fa:5b:09:d8 = wan_mac of gateway
+    
+### Setting up a test-client
+
+Before setting up VTN, create a bridge and attach it to the dataplane device on each compute node:
+
+    brctl addbr br-inject
+    brctl addif br-inject eth3   # substitute dataplane eth device here, may be different on each compute node
+    ip link set br-inject up
+    ip link set dev br-inject promisc on
+    
+Then update the network-config attribute of the VTN ONOS App in XOS to use a dataplaneIntf of br-inject instead of the eth device. Bring up VTN and a VSG. WAN connectivity and everything else should be working fine. 
+
+Add a new slice, mysite_client, and make sure to give it both a private and a management network. Bring up an instance on the same node as the vSG you want to test. On the compute node, run the following:
+
+    $MAC=<make-up-some-mac>
+    $INSTANCE=<instance-id>
+    virsh attach-interface --domain $INSTANCE --type bridge --source br-inject --model virtio --mac $MAC --config --live
+    
+Log into the vSG via the management interface. Inside of the vSG run the following:
+
+    STAG=<your s-tag here>
+    CTAG=<your c-tag here>
+    ip link add link eth2 eth2.$STAG type vlan id $STAG
+    ip link add link eth2.$STAG eth2.$STAG.$CTAG type vlan id $CTAG
+    ip link set eth2.$STAG up
+    ip link set eth2.$STAG.$CTAG up
+    ip addr add 192.168.0.2/24 dev eth2.$STAG.$CTAG
+    ip route del default
+    ip route add default via 192.168.0.1
diff --git a/cord-deprecated/README.md b/cord-deprecated/README.md
new file mode 100644
index 0000000..64075d9
--- /dev/null
+++ b/cord-deprecated/README.md
@@ -0,0 +1,151 @@
+# CORD development environment
+
+This configuration can be used to set up a CORD development environment.
+It does the following:
+
+* Sets up a basic dataplane for testing end-to-end packet flow between a subscriber client and the Internet
+* Brings up ONOS apps for controlling the dataplane: virtualbng, olt
+* Configures XOS with the CORD services: vCPE, vBNG, vOLT
+
+**NOTE: This configuration is stale and likely not working at present.  If you are looking to evaluate 
+and/or contribute to [CORD](http://opencord.org/), 
+you should look instead at the [cord-pod](../cord-pod) configuration. Almost
+all CORD developers have transitioned to [cord-pod](../cord-pod).**
+
+## End-to-end dataplane
+
+The configuration uses XOS to set up an end-to-end dataplane for development of the XOS services and ONOS apps
+used in CORD.  It abstracts away most of the complexity of the CORD hardware using virtual networks
+and Open vSwitch (OvS) switches.  At a high level the dataplane looks like this:
+
+```
+             olt                 virtualbng
+             ----                  ----
+             ONOS                  ONOS
+              |                     |
+client ----> CPqD ----> vCPE ----> OvS ----> Internet
+         1         2          3         4
+```
+
+On the datapath are two OvS switches, controlled by the `olt` and `virtualbng` ONOS applications.  Once all the pieces are in
+place, the client at left should be able to obtain an IP address via DHCP from the vCPE and send packets out to the Internet.
+
+All of the components in the above diagram (i.e., client, OvS switches, ONOS, and vCPE) currently run in distinct VMs
+created by XOS.  The numbers in the diagram correspond to networks set up by XOS:
+
+1. subscriber_network
+2. lan_network
+3. wan_network
+4. public_network
+
+## How to run it
+
+The configuration is intended to be run on [CloudLab](http://cloudlab.us).
+It launches an XOS container on Cloudlab that runs the XOS develserver.  The container is left running in the background.
+
+To get started on CloudLab:
+* Create an experiment using the *OpenStack-CORD* profile.  (You can also use the *OpenStack* profile, but choose *Kilo*
+with two compute nodes and disable security groups.)
+* Wait until you get an email from CloudLab with title "OpenStack Instance Finished Setting Up".
+* Login to the *ctl* node of your experiment and run:
+```
+ctl:~$ git clone https://github.com/open-cloud/xos.git
+ctl:~$ cd xos/xos/configurations/cord/
+ctl:~/xos/xos/configurations/cord$ make
+```
+
+Running `make` in this directory creates the XOS Docker container and runs the TOSCA engine with `cord.yaml` to
+configure XOS with the CORD services.  In addition, a number of VMs are created:
+
+1. *Slice mysite_onos*: runs the ONOS Docker container with `virtualbng` app loaded
+1. *Slice mysite_onos*: runs the ONOS Docker container with `olt` app loaded
+1. *Slice mysite_vbng*: for running OvS with the `virtualbng` app as controller
+1. *Slice mysite_volt*: for running the CPqD switch with the `olt` app as controller
+1. *Slice mysite_clients*: a subscriber client for end-to-end testing
+1. *Slice mysite_vcpe*: runs the vCPE Docker container (if not using containers on bare metal)
+
+Once all the VMs are up and the ONOS apps are configured, XOS should be able to get an address mapping from the `virtualbng`
+ONOS app for the vCPE. To verify that it has received an IP address mapping, look at the **Routeable subnet:** field in
+the appropriate *Vbng tenant* object in XOS.  It should contain an IP address in the 10.254.0.0/24 subnet.
+
+After launching the ONOS apps, it is necessary to configure software switches along the dataplane so that ONOS can control
+them.  To do this, from the `cord` configuration directory:
+```
+ctl:~/xos/xos/configurations/cord$ cd dataplane/
+ctl:~/xos/xos/configurations/cord/dataplane$ ./gen-inventory.sh > hosts
+ctl:~/xos/xos/configurations/cord/dataplane$ ansible-playbook -i hosts dataplane.yaml
+```
+
+To setup the dataplane for containers on bare metal, perform these steps in addition to the above (note: make sure to sudo when running the playbook):
+```
+ctl:~/xos/xos/configurations/cord/dataplane$ ./generate-bm.sh > hosts-bm   
+ctl:~/xos/xos/configurations/cord/dataplane$ sudo ansible-playbook -i hosts-bm dataplane-bm.yaml
+```
+
+Check that the vCPE container has started, by going into the XOS UI, selecting 'Services', 'service_vcpe', 'Administration', 'Vcpe Tenants', and make sure there's a green icon next to the vCPE.
+
+If the vCPE Tenant is still red, then the Instance could be exponentially backed-off due to errors while trying to sync before dataplane.yaml was run. You can reset the exponential backoff by tracking down the vCPE Instance (Slices->mysite_vcpe->Instances, and find the Instance associated with the vCPE Tenant) and hitting the save button.
+
+Now SSH into ONOS running the OLT app (see below) and activate the subscriber:
+```
+onos> add-subscriber-access of:0000000000000001 1 432
+```
+
+At this point the client should be able to get an IP address from the vCPE via
+DHCP.  To set up the IP address and default route on the client:
+```
+client:$ sudo route del default gw 10.11.10.5
+client:$ sudo dhclient br-sub
+```
+Once `dhclient` returns, the client should now be able to surf the Internet
+through the dataplane.
+
+## Setting up /etc/hosts
+
+To make it easy to log into the various VMs that make up the dataplane, add entries for them into `/etc/hosts` on the
+*ctl* node.  As root, run:
+```
+ctl:~/xos/xos/configurations/cord/dataplane$ ./gen-etc-hosts.sh >> /etc/hosts
+```
+For example, to log into the client:
+```
+ctl:~$ ssh ubuntu@client
+```
+
+## How to log into ONOS
+
+ONOS apps are run inside Docker containers hosted in VMs.  All ports exposed by the ONOS container are forwarded to the
+outside, and can be accessed from the *ctl* node over the `flat-lan-1-net` network.  Assuming that `/etc/hosts`
+has been configured as described above, it is possible to SSH to the ONOS running the `virtualbng` app as follows (password is *karaf*):
+
+```
+$ ssh -p 8101 karaf@onos_vbng
+Password authentication
+Password:
+Welcome to Open Network Operating System (ONOS)!
+     ____  _  ______  ____
+    / __ \/ |/ / __ \/ __/
+   / /_/ /    / /_/ /\ \
+   \____/_/|_/\____/___/
+
+
+Hit '<tab>' for a list of available commands
+and '[cmd] --help' for help on a specific command.
+Hit '<ctrl-d>' or type 'system:shutdown' or 'logout' to shutdown ONOS.
+
+onos>
+```
+
+For instance, to check the IP address mappings managed by the `virtualbng` app:
+
+```
+onos> vbngs
+   Private IP - Public IP
+   10.0.1.3 - 10.254.0.129
+```
+
+## Troubleshooting
+
+#### Problem: No external connectivity from vCPE container
+1. Make sure the hosts listed in `virtualbng.json` are the actual compute nodes used in your experiment.
+2. Try rebooting the ONOS container running the `virtualbng` app: `$ ssh ubuntu@onos-vbng "sudo docker restart ONOS"`
diff --git a/cord-deprecated/ceilometer.yaml b/cord-deprecated/ceilometer.yaml
new file mode 100644
index 0000000..464b07b
--- /dev/null
+++ b/cord-deprecated/ceilometer.yaml
@@ -0,0 +1,270 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Setup CORD-related services -- vOLT, vCPE, vBNG.
+
+imports:
+   - custom_types/xos.yaml
+
+node_types:
+    tosca.nodes.SFlowService:
+        derived_from: tosca.nodes.Root
+        description: >
+            XOS SFlow Collection Service
+        capabilities:
+            scalable:
+                type: tosca.capabilities.Scalable
+            service:
+                type: tosca.capabilities.xos.Service
+        properties:
+            kind:
+                type: string
+                default: generic
+                description: Type of service.
+            view_url:
+                type: string
+                required: false
+                description: URL to follow when icon is clicked in the Service Directory.
+            icon_url:
+                type: string
+                required: false
+                description: ICON to display in the Service Directory.
+            enabled:
+                type: boolean
+                default: true
+            published:
+                type: boolean
+                default: true
+                description: If True then display this Service in the Service Directory.
+            public_key:
+                type: string
+                required: false
+                description: Public key to install into Instances to allows Services to SSH into them.
+            private_key_fn:
+                type: string
+                required: false
+                description: Location of private key file
+            versionNumber:
+                type: string
+                required: false
+                description: Version number of Service.
+            sflow_port:
+              type: integer
+              required: false
+              default: 6343
+              description: sFlow listening port
+            sflow_api_port:
+              type: integer
+              required: false
+              default: 33333
+              description: sFlow publish subscribe api listening port
+
+    tosca.nodes.CeilometerService:
+        derived_from: tosca.nodes.Root
+        description: >
+            XOS Ceilometer Service
+        capabilities:
+            scalable:
+                type: tosca.capabilities.Scalable
+            service:
+                type: tosca.capabilities.xos.Service
+        properties:
+            kind:
+                type: string
+                default: generic
+                description: Type of service.
+            view_url:
+                type: string
+                required: false
+                description: URL to follow when icon is clicked in the Service Directory.
+            icon_url:
+                type: string
+                required: false
+                description: ICON to display in the Service Directory.
+            enabled:
+                type: boolean
+                default: true
+            published:
+                type: boolean
+                default: true
+                description: If True then display this Service in the Service Directory.
+            public_key:
+                type: string
+                required: false
+                description: Public key to install into Instances to allows Services to SSH into them.
+            private_key_fn:
+                type: string
+                required: false
+                description: Location of private key file
+            versionNumber:
+                type: string
+                required: false
+                description: Version number of Service.
+            ceilometer_pub_sub_url:
+                type: string
+                required: false
+                description: REST URL of ceilometer PUB/SUB component
+
+    tosca.nodes.CeilometerTenant:
+        derived_from: tosca.nodes.Root
+        description: >
+            CORD: A Tenant of the Ceilometer Service.
+        properties:
+            kind:
+                type: string
+                default: generic
+                description: Kind of tenant
+
+topology_template:
+  node_templates:
+    service_ceilometer:
+      type: tosca.nodes.CeilometerService
+      requirements:
+      properties:
+          view_url: /admin/ceilometer/ceilometerservice/$id$/
+          kind: ceilometer
+          ceilometer_pub_sub_url: http://10.11.10.1:4455/
+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+      artifacts:
+          pubkey: /opt/xos/synchronizers/monitoring_channel/monitoring_channel_public_key
+
+#    service_sflow:
+#      type: tosca.nodes.SFlowService
+#      requirements:
+#      properties:
+#          view_url: /admin/ceilometer/sflowservice/$id$/
+#          kind: sflow
+#          sflow_port: 6343
+#          sflow_api_port: 33333
+
+    Private:
+      type: tosca.nodes.NetworkTemplate
+
+    ceilometer_network:
+      type: tosca.nodes.network.Network.XOS
+      properties:
+          ip_version: 4
+          labels: ceilometer_client_access
+      requirements:
+          - network_template:
+              node: Private
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_ceilometer
+              relationship: tosca.relationships.MemberOfSlice
+          - connection:
+              node: mysite_ceilometer
+              relationship: tosca.relationships.ConnectsToSlice
+
+    mysite:
+      type: tosca.nodes.Site
+
+    trusty-server-multi-nic:
+      type: tosca.nodes.Image
+
+    ceilometer-trusty-server-multi-nic:
+      type: tosca.nodes.Image
+
+    m1.small:
+      type: tosca.nodes.Flavor
+
+    mysite_ceilometer:
+      description: Ceilometer Proxy Slice
+      type: tosca.nodes.Slice
+      requirements:
+          - ceilometer_service:
+              node: service_ceilometer
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+          - default_image:
+                node: ceilometer-trusty-server-multi-nic
+                relationship: tosca.relationships.DefaultImage
+          - default_flavor:
+                node: m1.small
+                relationship: tosca.relationships.DefaultFlavor
+
+#    mysite_sflow:
+#      description: Slice for sFlow service
+#      type: tosca.nodes.Slice
+#      requirements:
+#          - sflow_service:
+#              node: service_sflow
+#              relationship: tosca.relationships.MemberOfService
+#          - site:
+#              node: mysite
+#              relationship: tosca.relationships.MemberOfSite
+
+    my_ceilometer_tenant:
+      description: Ceilometer Service default Tenant
+      type: tosca.nodes.CeilometerTenant
+      requirements:
+          - provider_service:
+              node: service_ceilometer
+              relationship: tosca.relationships.MemberOfService
+       
+    # Virtual machines
+#    sflow_service_instance:
+#      type: tosca.nodes.Compute
+#      capabilities:
+#        # Host container properties
+#        host:
+#         properties:
+#           num_cpus: 1
+#           disk_size: 10 GB
+#           mem_size: 4 MB
+#        # Guest Operating System properties
+#        os:
+#          properties:
+#            # host Operating System image properties
+#            architecture: x86_64
+#            type: linux
+#            distribution: Ubuntu
+#            version: 14.10
+#      requirements:
+#          - slice:
+#                node: mysite_sflow
+#                relationship: tosca.relationships.MemberOfSlice
+
+    Customer Care:
+      type: tosca.nodes.DashboardView
+      properties:
+          url: template:xosDiagnostic
+
+    TruckRoll:
+      type: tosca.nodes.DashboardView
+      properties:
+          url: template:xosTruckroll
+
+    Ceilometer:
+      type: tosca.nodes.DashboardView
+      properties:
+          url: template:xosCeilometerDashboard
+
+
+    Tenant:
+      type: tosca.nodes.DashboardView
+      properties:
+          no-create: true
+          no-update: true
+          no-delete: true
+
+    padmin@vicci.org:
+      type: tosca.nodes.User
+      properties:
+          firstname: XOS
+          lastname: admin
+          is_admin: true
+      requirements:
+          - customer_care_dashboard:
+              node: Customer Care
+              relationship: tosca.relationships.UsesDashboard
+          - truckroll_dashboard:
+              node: TruckRoll
+              relationship: tosca.relationships.UsesDashboard
+          - ceilometer_dashboard:
+              node: Ceilometer
+              relationship: tosca.relationships.UsesDashboard
+          - tenant_dashboard:
+              node: Tenant
+              relationship: tosca.relationships.UsesDashboard
diff --git a/cord-deprecated/cord.yaml b/cord-deprecated/cord.yaml
new file mode 100644
index 0000000..c708d8e
--- /dev/null
+++ b/cord-deprecated/cord.yaml
@@ -0,0 +1,550 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Setup CORD-related services -- vOLT, vCPE, vBNG.
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    # CORD Services
+    service_vtr:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /admin/vtr/vtrservice/$id$/
+          kind: vTR
+
+    service_volt:
+      type: tosca.nodes.Service
+      requirements:
+          - vsg_tenant:
+              node: service_vsg
+              relationship: tosca.relationships.TenantOfService
+          - lan_network:
+              node: lan_network
+              relationship: tosca.relationships.UsesNetwork
+          - wan_network:
+              node: wan_network
+              relationship: tosca.relationships.UsesNetwork
+      properties:
+          view_url: /admin/cord/voltservice/$id$/
+          kind: vOLT
+
+    # set a pool of addresses that we can hand out for the VSG Wan.
+    public_addresses:
+      type: tosca.nodes.AddressPool
+      properties:
+          addresses: 10.123.0.0/24 10.124.0.0/24
+
+    service_vsg:
+      type: tosca.nodes.VSGService
+      requirements:
+          - vbng_tenant:
+              node: service_vbng
+              relationship: tosca.relationships.TenantOfService
+      properties:
+          view_url: /admin/cord/vsgservice/$id$/
+          backend_network_label: hpc_client
+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+          private_key_fn: /opt/xos/synchronizers/vcpe/vcpe_private_key
+#          node_label: label_vsg
+      artifacts:
+          pubkey: /opt/xos/synchronizers/vcpe/vcpe_public_key
+
+    service_vbng:
+      type: tosca.nodes.VBNGService
+      properties:
+          view_url: /admin/cord/vbngservice/$id$/
+# if unspecified, vbng observer will look for an ONOSApp Tenant and
+# generate a URL from its IP address
+#          vbng_url: http://10.11.10.24:8181/onos/virtualbng/
+
+    service_ONOS_vBNG:
+      type: tosca.nodes.ONOSService
+      requirements:
+      properties:
+          kind: onos
+          view_url: /admin/onos/onosservice/$id$/
+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+      artifacts:
+          pubkey: /opt/xos/synchronizers/onos/onos_key.pub
+
+#
+# To actually bring up the vBNG app
+# - Set up the dataplane using the ansible script
+# - Log into the vBNG ONOS and run 'devices' to get switch dpID
+# - Change the dpID values in vBNG ONOS app in XOS GUI
+# - (Synchronizer should copy the files to ONOS container immediately)
+# - Log into service_ONOS_vBNG VM and restart ONOS Docker container
+#   (Should roll this step into a Synchronizer)
+#f
+    vBNG_ONOS_app:
+      type: tosca.nodes.ONOSvBNGApp
+      requirements:
+          - onos_tenant:
+              node: service_ONOS_vBNG
+              relationship: tosca.relationships.TenantOfService
+          - vbng_service:
+              node: service_vbng
+              relationship: tosca.relationships.UsedByService
+      properties:
+          dependencies: org.onosproject.proxyarp, org.onosproject.virtualbng, org.onosproject.openflow, org.onosproject.fwd
+          config_network-cfg.json: >
+            {
+              "ports" : {
+                "of:0000000000000001/1" : {
+                  "interfaces" : [
+                    {
+                      "ips"  : [ "10.0.1.253/24" ],
+                      "mac"  : "00:00:00:00:00:99"
+                    }
+                  ]
+                },
+                "of:0000000000000001/2" : {
+                  "interfaces" : [
+                    {
+                      "ips"  : [ "10.254.0.2/24" ],
+                      "mac"  : "00:00:00:00:00:98"
+                    }
+                  ]
+                }
+              }
+            }
+          config_virtualbng.json: { get_artifact: [ SELF, virtualbng_json, LOCAL_FILE] }
+      artifacts:
+          virtualbng_json: /root/setup/virtualbng.json
+
+    service_ONOS_vOLT:
+      type: tosca.nodes.ONOSService
+      requirements:
+      properties:
+          kind: onos
+          view_url: /admin/onos/onosservice/$id$/
+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+          rest_onos/v1/network/configuration/: >
+            {
+              "devices" : {
+                "of:0000000000000001" : {
+                  "accessDevice" : {
+                    "uplink" : "2",
+                    "vlan"   : "222",
+                    "defaultVlan" : "1"
+                  },
+                  "basic" : {
+                    "driver" : "pmc-olt"
+                  }
+                }
+              }
+            }
+      artifacts:
+          pubkey: /opt/xos/synchronizers/onos/onos_key.pub
+
+
+    vOLT_ONOS_app:
+      type: tosca.nodes.ONOSvOLTApp
+      requirements:
+          - onos_tenant:
+              node: service_ONOS_vOLT
+              relationship: tosca.relationships.TenantOfService
+          - volt_service:
+              node: service_volt
+              relationship: tosca.relationships.UsedByService
+      properties:
+          install_dependencies: onos-ext-notifier-1.0-SNAPSHOT.oar, onos-ext-volt-event-publisher-1.0-SNAPSHOT.oar
+          dependencies: org.onosproject.openflow-base, org.onosproject.olt, org.ciena.onos.ext_notifier, org.ciena.onos.volt_event_publisher
+          component_config: >
+             {
+                "org.ciena.onos.ext_notifier.KafkaNotificationBridge":{
+                   "rabbit.user": "<rabbit_user>",
+                   "rabbit.password": "<rabbit_password>",
+                   "rabbit.host": "<rabbit_host>",
+                   "publish.kafka": "false",
+                   "publish.rabbit": "true",
+                   "volt.events.rabbit.topic": "notifications.info",
+                   "volt.events.rabbit.exchange": "voltlistener",
+                   "volt.events.opaque.info": "{project_id: <keystone_tenant_id>, user_id: <keystone_user_id>}",
+                   "publish.volt.events": "true"
+                }
+             }
+#          config_network-cfg.json: >
+#            {
+#              "devices" : {
+#                "of:0000000000000001" : {
+#                  "accessDevice" : {
+#                    "uplink" : "2",
+#                    "vlan"   : "222",
+#                    "defaultVlan" : "1"
+#                  },
+#                  "basic" : {
+#                    "driver" : "default"
+#                  }
+#                }
+#              }
+#            }
+
+    # Network templates
+    Private:
+      type: tosca.nodes.NetworkTemplate
+
+    Public network hack:
+      type: tosca.nodes.NetworkTemplate
+      properties:
+          visibility: private
+          translation: NAT
+          shared_network_name: tun0-net
+
+
+    # Networks required by the CORD setup
+    lan_network:
+      type: tosca.nodes.network.Network
+      properties:
+          ip_version: 4
+      requirements:
+          - network_template:
+              node: Private
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_vsg
+              relationship: tosca.relationships.MemberOfSlice
+          - connection:
+              node: mysite_vsg
+              relationship: tosca.relationships.ConnectsToSlice
+          - connection:
+              node: mysite_vsg
+              relationship: tosca.relationships.ConnectsToSlice
+
+    wan_network:
+      type: tosca.nodes.network.Network
+      properties:
+          ip_version: 4
+      requirements:
+          - network_template:
+              node: Private
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_vsg
+              relationship: tosca.relationships.MemberOfSlice
+          - connection:
+              node: mysite_vsg
+              relationship: tosca.relationships.ConnectsToSlice
+          - connection:
+              node: mysite_vsg
+              relationship: tosca.relationships.ConnectsToSlice
+
+    Private-Direct:
+      type: tosca.nodes.NetworkTemplate
+      properties:
+          access: direct
+
+    Private-Indirect:
+      type: tosca.nodes.NetworkTemplate
+      properties:
+          access: indirect
+
+    subscriber_network:
+      type: tosca.nodes.network.Network
+      properties:
+          ip_version: 4
+      requirements:
+          - network_template:
+              node: Private
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_volt
+              relationship: tosca.relationships.MemberOfSlice
+          - connection:
+              node: mysite_volt
+              relationship: tosca.relationships.ConnectsToSlice
+          - connection:
+              node: mysite_clients
+              relationship: tosca.relationships.ConnectsToSlice
+
+    public_network:
+      type: tosca.nodes.network.Network
+      properties:
+      requirements:
+          - network_template:
+              node: Public network hack
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_vbng
+              relationship: tosca.relationships.MemberOfSlice
+          - connection:
+              node: mysite_vbng
+              relationship: tosca.relationships.ConnectsToSlice
+
+
+    mysite:
+      type: tosca.nodes.Site
+
+    label_vsg:
+      type: tosca.nodes.NodeLabel
+
+    # CORD Slices
+    mysite_vsg:
+      description: vSG Controller Slice
+      type: tosca.nodes.Slice
+      requirements:
+          - vsg_service:
+              node: service_vsg
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+          - vsg_docker_image:
+              node: docker-vsg
+              relationship: tosca.relationships.UsesImage
+#      properties:
+#          default_isolation: container
+
+    mysite_onos_vbng:
+      description: ONOS Controller Slice for vBNG
+      type: tosca.nodes.Slice
+      requirements:
+          - ONOS:
+              node: service_ONOS_vBNG
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+
+    mysite_onos_volt:
+      description: ONOS Controller Slice for vOLT
+      type: tosca.nodes.Slice
+      requirements:
+          - ONOS:
+              node: service_ONOS_vOLT
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+
+    mysite_vbng:
+      description: slice running OVS controlled by vBNG
+      type: tosca.nodes.Slice
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+
+    mysite_volt:
+      description: OVS controlled by vOLT
+      type: tosca.nodes.Slice
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+
+    mysite_clients:
+      description: slice for clients at the subscriber
+      type: tosca.nodes.Slice
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+
+
+    # Virtual machines
+    onos_app_1:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: Ubuntu
+            version: 14.10
+      requirements:
+          - slice:
+                node: mysite_onos_vbng
+                relationship: tosca.relationships.MemberOfSlice
+
+    onos_app_2:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: Ubuntu
+            version: 14.10
+      requirements:
+          - slice:
+                node: mysite_onos_volt
+                relationship: tosca.relationships.MemberOfSlice
+
+    # VM for running the OVS controlled by vBNG
+    ovs_vbng:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: ubuntu
+            version: 14.04
+      requirements:
+          - slice:
+                node: mysite_vbng
+                relationship: tosca.relationships.MemberOfSlice
+
+    # VM for running the OVS controlled by vOLT
+    ovs_volt:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: ubuntu
+            version: 14.04
+      requirements:
+          - slice:
+                node: mysite_volt
+                relationship: tosca.relationships.MemberOfSlice
+
+    # A subscriber client VM
+    client1:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: ubuntu
+            version: 14.04
+      requirements:
+          - slice:
+                node: mysite_clients
+                relationship: tosca.relationships.MemberOfSlice
+
+    # docker image for vsg containers
+    docker-vsg:
+      # TODO: need to attach this to mydeployment
+      type: tosca.nodes.Image
+      properties:
+        kind: container
+        container_format: na
+        disk_format: na
+        path: andybavier/docker-vcpe
+        tag: develop
+
+    # Let's add a user who can be administrator of the household
+    johndoe@myhouse.com:
+      type: tosca.nodes.User
+      properties:
+          password: letmein
+          firstname: john
+          lastname: doe
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+
+    # A subscriber
+    My House:
+       type: tosca.nodes.CORDSubscriber
+       properties:
+           service_specific_id: 123
+           firewall_enable: false
+           cdn_enable: false
+           url_filter_enable: false
+           url_filter_level: R
+       requirements:
+          - house_admin:
+              node: johndoe@myhouse.com
+              relationship: tosca.relationships.AdminPrivilege
+
+    Mom's PC:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 01:02:03:04:05:06
+           level: PG_13
+       requirements:
+           - household:
+               node: My House
+               relationship: tosca.relationships.SubscriberDevice
+
+    Dad's PC:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 90:E2:BA:82:F9:75
+           level: PG_13
+       requirements:
+           - household:
+               node: My House
+               relationship: tosca.relationships.SubscriberDevice
+
+    Jack's Laptop:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 68:5B:35:9D:91:D5
+           level: PG_13
+       requirements:
+           - household:
+               node: My House
+               relationship: tosca.relationships.SubscriberDevice
+
+    Jill's Laptop:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 34:36:3B:C9:B6:A6
+           level: PG_13
+       requirements:
+           - household:
+               node: My House
+               relationship: tosca.relationships.SubscriberDevice
+
+    My Volt:
+        type: tosca.nodes.VOLTTenant
+        properties:
+            service_specific_id: 123
+            s_tag: 222
+            c_tag: 432
+        requirements:
+            - provider_service:
+                node: service_volt
+                relationship: tosca.relationships.MemberOfService
+            - subscriber:
+                node: My House
+                relationship: tosca.relationships.BelongsToSubscriber
diff --git a/cord-deprecated/dataplane/ansible.cfg b/cord-deprecated/dataplane/ansible.cfg
new file mode 100644
index 0000000..9100590
--- /dev/null
+++ b/cord-deprecated/dataplane/ansible.cfg
@@ -0,0 +1,4 @@
+[defaults]
+remote_user = ubuntu
+private_key_file = ~/.ssh/id_rsa
+host_key_checking = false
\ No newline at end of file
diff --git a/cord-deprecated/dataplane/change_controller.sh b/cord-deprecated/dataplane/change_controller.sh
new file mode 100755
index 0000000..2b961ee
--- /dev/null
+++ b/cord-deprecated/dataplane/change_controller.sh
@@ -0,0 +1,13 @@
+#! /bin/bash
+
+# put IP address of node running ONOS VTN App here
+DESIRED_CONTROLLER="tcp:130.127.133.24:6653"
+
+while [[ 1 ]]; do
+    CONTROLLER=`ovs-vsctl get-controller br-int`
+    if [[ "$CONTROLLER" == "tcp:172.17.0.2:6653" ]]; then
+       ovs-vsctl set-controller br-int $DESIRED_CONTROLLER
+       echo "changed controller from $CONTROLLER to $DESIRED_CONTROLLER"
+    fi
+    sleep 10s
+done
diff --git a/cord-deprecated/dataplane/cleanup.sh b/cord-deprecated/dataplane/cleanup.sh
new file mode 100755
index 0000000..91d821c
--- /dev/null
+++ b/cord-deprecated/dataplane/cleanup.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+function cleanup_network {
+  NETWORK=$1
+  SUBNETS=`neutron net-show $NETWORK | grep -i subnets | awk '{print $4}'`
+  if [[ $SUBNETS != "" ]]; then
+      PORTS=`neutron port-list | grep -i $SUBNETS | awk '{print $2}'`
+      for PORT in $PORTS; do
+          echo "Deleting port $PORT"
+          neutron port-delete $PORT
+      done
+  fi
+  neutron net-delete $NETWORK
+}
+
+source ../../setup/admin-openrc.sh
+
+echo "Deleting VMs"
+# Delete all VMs
+VMS=$( nova list --all-tenants|grep mysite|awk '{print $2}' )
+for VM in $VMS
+do
+    nova delete $VM
+done
+
+echo "Waiting 5 seconds..."
+sleep 5
+
+cleanup_network lan_network
+cleanup_network wan_network
+cleanup_network mysite_vsg-private
+cleanup_network mysite_vsg-access
+cleanup_network management
+
+echo "Deleting networks"
+# Delete all networks beginning with mysite_
+NETS=$( neutron net-list --all-tenants|grep mysite|awk '{print $2}' )
+for NET in $NETS
+do
+    neutron net-delete $NET
+done
+
+neutron net-delete lan_network || true
+neutron net-delete subscriber_network || true
+neutron net-delete public_network || true
+neutron net-delete hpc_client_network || true
+neutron net-delete ceilometer_network || true
+neutron net-delete management || true
+neutron net-delete mysite_vsg-access || true
diff --git a/cord-deprecated/dataplane/dataplane-bm.yaml b/cord-deprecated/dataplane/dataplane-bm.yaml
new file mode 100644
index 0000000..e1e78ee
--- /dev/null
+++ b/cord-deprecated/dataplane/dataplane-bm.yaml
@@ -0,0 +1,36 @@
+---
+- hosts: switch_volt
+  sudo: yes
+  tasks:
+  - name: Create tunnel port on br-lan
+    openvswitch_port:
+      bridge=br-lan
+      port={{ item }}
+      state=present
+    with_items: "grenames.split(' ')"
+
+  - name: Set up GRE tunnel to vCPE
+    shell: ovs-vsctl set Interface {{ item.0 }} type=gre options:remote_ip={{ item.1 }}
+    with_together:
+      - "grenames.split(' ')"
+      - "bm_ips.split(' ')"
+
+- hosts: baremetal
+
+  user: root
+  sudo: no
+  tasks:
+  - name: Create br-lan
+    openvswitch_bridge:
+      bridge=br-lan
+      state=present
+
+  - name: Create tunnel port
+    openvswitch_port:
+      bridge=br-lan
+      port={{ grename }}
+      state=present
+
+  - name: Configure GRE tunnel to vOLT switch
+    shell: ovs-vsctl set Interface {{ grename }} type=gre options:remote_ip={{ volt_addr }}
+
diff --git a/cord-deprecated/dataplane/dataplane-vtn.yaml b/cord-deprecated/dataplane/dataplane-vtn.yaml
new file mode 100644
index 0000000..f6561b5
--- /dev/null
+++ b/cord-deprecated/dataplane/dataplane-vtn.yaml
@@ -0,0 +1,31 @@
+---
+- hosts: baremetal:nm
+
+  user: root
+  sudo: no
+  tasks:
+  - name: stop neutron openvswitch agent
+    service: name=neutron-plugin-openvswitch-agent state=stopped
+
+  - name: Update openvswitch config
+    lineinfile:
+      dest=/usr/share/openvswitch/scripts/ovs-ctl
+      insertafter="set ovsdb-server.*DB_FILE.*"
+      regexp=".*set.*--remote=ptcp.*"
+      line="        set \"$@\" --remote=ptcp:6641"
+      state=present
+
+  - name: Restart openvswitch
+    service: name=openvswitch-switch state=restarted
+
+  - name: Delete br-int
+    shell: ovs-vsctl show | grep -i br-int > /dev/null && ovs-vsctl del-br br-int
+    ignore_errors: yes
+
+- hosts: nm
+
+  user: root
+  sudo: no
+  tasks:
+  - name: stop neutron dhcp agent
+    service: name=neutron-dhcp-agent state=stopped
diff --git a/cord-deprecated/dataplane/dataplane.yaml b/cord-deprecated/dataplane/dataplane.yaml
new file mode 100644
index 0000000..3ca3bbe
--- /dev/null
+++ b/cord-deprecated/dataplane/dataplane.yaml
@@ -0,0 +1,256 @@
+---
+- hosts: switch_vbng
+  sudo: yes
+  vars:
+    controller_ip: "{{ hostvars['onos_vbng']['ansible_ssh_host'] }}"
+    controller_port: 6653
+    ovs_dpid: "0000000000000001"
+  tags:
+  - vbng
+  tasks:
+  - name: Fix /etc/hosts
+    lineinfile:
+      dest=/etc/hosts
+      regexp="127.0.0.1 localhost"
+      line="127.0.0.1 localhost {{ ansible_hostname }}"
+
+  - name: Install packages
+    apt: name={{ item }}
+      state=latest
+      update_cache=yes
+    with_items:
+    - openvswitch-switch
+    - python-netifaces
+
+  - name: Create br-vbng
+    openvswitch_bridge:
+      bridge=br-vbng
+      state=present
+
+  - name: Find wan_network interface
+    script: scripts/if_from_ip.py {{ wan_ip }}
+    register: wan_net
+
+  - name: Find public_network interface
+    script: scripts/if_from_ip.py {{ public_ip }}
+    register: public_net
+
+  - name: Hook up wan-network to br-vbng
+    openvswitch_port:
+      bridge=br-vbng
+      port={{ wan_net.stdout }}
+      state=present
+
+  - name: Hook up public-network to OvS
+    openvswitch_port:
+      bridge=br-vbng
+      port={{ public_net.stdout }}
+      state=present
+
+  - name: Remove IP address on public_network
+    command: /sbin/ifconfig {{ public_net.stdout }} 0.0.0.0
+    when: public_net.stdout
+
+  - name: Change datapath ID of bridge to match config file
+    command: /usr/bin/ovs-vsctl set bridge br-vbng other-config:datapath-id={{ ovs_dpid }}
+
+  - name: Add controller to switch
+    command: /usr/bin/ovs-vsctl set-controller br-vbng tcp:{{ controller_ip }}:{{ controller_port }}
+
+- hosts: switch_volt
+  sudo: yes
+  vars:
+    controller_ip: "{{ hostvars['onos_volt']['ansible_ssh_host'] }}"
+    controller_port: 6653
+    vcpe_lan_ip: "{{ hostvars['vcpe']['lan_ip'] }}"
+  tags:
+  - volt
+  tasks:
+
+  - name: Fix /etc/hosts
+    lineinfile:
+      dest=/etc/hosts
+      regexp="127.0.0.1 localhost"
+      line="127.0.0.1 localhost {{ ansible_hostname }}"
+
+  - name: Install packages
+    apt: name={{ item }} state=present update_cache=yes
+    with_items:
+    - git
+    - python-netifaces
+    - openvswitch-switch
+
+  - name: Checkout the Mininet repo
+    git: repo=https://github.com/mininet/mininet.git
+      dest=/tmp/mininet
+
+  - name: Install the CPqD switch using Mininet install script
+    shell: /tmp/mininet/util/install.sh -3f
+      creates=/usr/local/bin/ofdatapath
+    ignore_errors: true
+
+  - name: Find subscriber_network interface
+    script: scripts/if_from_ip.py {{ subscriber_ip }}
+    register: subscriber_net
+
+  - name: Create bridge br-sub
+    openvswitch_bridge:
+      bridge=br-sub
+      state=present
+
+  - name: Add subscriber_net to br-sub
+    openvswitch_port:
+      bridge=br-sub
+      port={{ subscriber_net.stdout }}
+      state=present
+
+  # The CPqD switch is expecting that packets coming from the client have
+  # VLAN tag 1.  However Neutron's OvS configuration eats VLAN-tagged packets.
+  # So tag them with VLAN 1 here before sending to CPqD.
+  #
+  # Note that the VLAN tag is 0 in the real-world setup, but the CPqD switch
+  # seems to have a problem with these packets.
+
+  # Using OvS to tag packets with VLAN ID 1 is not quite working for some reason.
+  # The packets from the client get tagged OK, but only the first packet from the
+  # VCPE gets its tag stripped off.  Very weird.  That's why we are using veth
+  # devices instead.
+  #- name: Add tag 1 to br-sub port
+  #  shell: ovs-vsctl set port {{ subscriber_net.stdout }} tag=1
+
+  - name: Create a pair of veth devices
+    shell: ifconfig veth0 >> /dev/null || ip link add veth0 type veth peer name veth1
+
+  - name: Create veth0.1
+    shell: ifconfig veth0.1 >> /dev/null || ip link add link veth0 name veth0.1 type vlan id 1
+
+  - name: Bring the interfaces up
+    shell: ip link set {{ item }} up
+    with_items:
+    - veth0
+    - veth1
+    - veth0.1
+
+  - name: Add veth0.1 to br-sub
+    openvswitch_port:
+      bridge=br-sub
+      port=veth0.1
+      state=present
+
+  - name: Create bridge br-lan
+    openvswitch_bridge:
+      bridge=br-lan
+      state=present
+
+  - name: Create tunnel port on br-lan
+    openvswitch_port:
+      bridge=br-lan
+      port=gre0
+      state=present
+
+  - name: Set up GRE tunnel to vCPE
+    shell: ovs-vsctl set Interface gre0 type=gre options:remote_ip={{ vcpe_lan_ip }}
+
+  - name: Check if br-lan has an IPv6 address
+    shell: ip addr show br-lan|grep inet6|awk '{print $2}'
+    register: ipv6
+
+  - name: Remove br-lan IPv6 address if present
+    shell: ifconfig br-lan inet6 del {{ ipv6.stdout }}
+    when: ipv6.stdout != ""
+
+  - name: Check if veth1 has an IPv6 address
+    shell: ip addr show veth1|grep inet6|awk '{print $2}'
+    register: ipv6
+
+  - name: Remove veth1 IPv6 address if present
+    shell: ifconfig veth1 inet6 del {{ ipv6.stdout }}
+    when: ipv6.stdout != ""
+
+  - name: Run the datapath
+    command: /usr/local/bin/ofdatapath -i veth1,br-lan punix:/tmp/s1 -d 000000000001 --no-slicing -D -P
+      creates=/usr/local/var/run/ofdatapath.pid
+
+  - name: Run the control program
+    command: /usr/local/bin/ofprotocol unix:/tmp/s1 tcp:{{ controller_ip }}:{{ controller_port }} --fail=closed --listen=punix:/tmp/s1.listen -D -P
+      creates=/usr/local/var/run/ofprotocol.pid
+
+- hosts: client
+  sudo: yes
+  tags:
+  - client
+  tasks:
+
+  - name: Fix /etc/hosts
+    lineinfile:
+      dest=/etc/hosts
+      regexp="127.0.0.1 localhost"
+      line="127.0.0.1 localhost {{ ansible_hostname }}"
+
+  - name: Install packages
+    apt: name={{ item }}
+      state=latest
+      update_cache=yes
+    with_items:
+    - openvswitch-switch
+    - python-netifaces
+
+  - name: Create br-sub
+    openvswitch_bridge:
+      bridge=br-sub
+      state=present
+
+  - name: Find subscriber_network interface
+    script: scripts/if_from_ip.py {{ subscriber_ip }}
+    register: client_net
+
+  - name: Hook up subscriber-network to OvS
+    openvswitch_port:
+      bridge=br-sub
+      port={{ client_net.stdout }}
+      state=present
+
+  - name: Run some commands on br-sub
+    shell: "{{ item }}"
+    with_items:
+    - ifconfig br-sub 0.0.0.0 mtu 1400 up
+    - ethtool -K br-sub tso off
+    - ethtool -K br-sub tx off
+
+  # Run dhclient on br-sub internal interface to issue DHCP request to vCPE
+
+#
+# Need to set up a tunnel between vCPE and vOLT to keep VLAN-tagged
+# packets from being swallowed by the network.
+#
+- hosts: vcpe
+  sudo: yes
+  vars:
+    volt_lan_ip: "{{ hostvars['switch_volt']['lan_ip'] }}"
+  tags:
+  - vcpe
+  tasks:
+
+  - name: Install packages
+    apt: name={{ item }}
+      state=latest
+      update_cache=yes
+    with_items:
+    - openvswitch-switch
+
+  - name: Create br-lan
+    openvswitch_bridge:
+      bridge=br-lan
+      state=present
+
+  - name: Create tunnel port
+    openvswitch_port:
+      bridge=br-lan
+      port=gre0
+      state=present
+
+  - name: Configure GRE tunnel to vOLT switch
+    shell: ovs-vsctl set Interface gre0 type=gre options:remote_ip={{ volt_lan_ip }}
+
+  - name: Restart vCPEs
+    script: scripts/restart-vcpes.sh
diff --git a/cord-deprecated/dataplane/gen-etc-hosts.sh b/cord-deprecated/dataplane/gen-etc-hosts.sh
new file mode 100755
index 0000000..0d49706
--- /dev/null
+++ b/cord-deprecated/dataplane/gen-etc-hosts.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# set -x
+
+source ../../setup/admin-openrc.sh
+
+get_ip () {
+    LABEL=$1
+    NETWORK=$2
+    nova list --all-tenants|grep $LABEL|sed "s/^.*$NETWORK=//g"|sed 's/; .*$//g'|awk '{print $1}'
+}
+
+cat <<EOF
+$( get_ip mysite_onos_vbng flat-lan-1-net) onos_vbng
+$( get_ip mysite_vbng flat-lan-1-net) switch_vbng
+$( get_ip mysite_onos_volt flat-lan-1-net) onos_volt
+$( get_ip mysite_volt flat-lan-1-net) switch_volt
+$( get_ip mysite_clients flat-lan-1-net) client
+$( get_ip mysite_vsg flat-lan-1-net) vcpe
+EOF
diff --git a/cord-deprecated/dataplane/gen-inventory.sh b/cord-deprecated/dataplane/gen-inventory.sh
new file mode 100755
index 0000000..bacd2dd
--- /dev/null
+++ b/cord-deprecated/dataplane/gen-inventory.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+# set -x
+
+source ../../setup/admin-openrc.sh
+
+get_ip () {
+    LABEL=$1
+    NETWORK=$2
+    nova list --all-tenants|grep $LABEL|sed "s/^.*$NETWORK=//g"|sed 's/; .*$//g'|awk '{print $1}'
+}
+
+cat <<EOF
+onos_vbng    ansible_ssh_host=$( get_ip mysite_onos_vbng flat-lan-1-net)
+switch_vbng  ansible_ssh_host=$( get_ip mysite_vbng flat-lan-1-net) wan_ip=$( get_ip mysite_vbng wan_network) public_ip=$( get_ip mysite_vbng tun0-net )
+
+onos_volt    ansible_ssh_host=$( get_ip mysite_onos_volt flat-lan-1-net)
+switch_volt  ansible_ssh_host=$( get_ip mysite_volt flat-lan-1-net) subscriber_ip=$( get_ip mysite_volt subscriber_network) lan_ip=$( get_ip mysite_volt lan_network)
+
+client       ansible_ssh_host=$( get_ip mysite_clients flat-lan-1-net) subscriber_ip=$( get_ip mysite_clients subscriber_network)
+vcpe         ansible_ssh_host=$( get_ip mysite_vsg flat-lan-1-net) lan_ip=$( get_ip mysite_vsg lan_network)
+EOF
diff --git a/cord-deprecated/dataplane/generate-bm.sh b/cord-deprecated/dataplane/generate-bm.sh
new file mode 100755
index 0000000..f9b8787
--- /dev/null
+++ b/cord-deprecated/dataplane/generate-bm.sh
@@ -0,0 +1,44 @@
+source ../../setup/admin-openrc.sh
+
+get_ip () {
+    LABEL=$1
+    NETWORK=$2
+    nova list --all-tenants|grep $LABEL|sed "s/^.*$NETWORK=//g"|sed 's/; .*$//g'|awk '{print $1}'
+    }
+
+GRENAMES=()
+BM_IPS=()
+
+NODES=`sudo bash -c "source ../../setup/admin-openrc.sh ; nova hypervisor-list" |grep enabled|awk '{print $4}'`
+I=1
+for NODE in $NODES; do
+    BM_SSH_IP=`getent hosts $NODE | awk '{ print $1 }'`
+    IFS=. read BM_NAME BM_REMAINDER <<< $NODE
+    BM_IP=`sudo grep -i $BM_NAME /root/setup/data-hosts.flat-lan-1 | awk '{print $1}'`
+
+    GRE_NAMES+=("gre-bm-$I")
+    BM_IPS+=("$BM_IP")
+
+    #echo switch_volt$I    ansible_ssh_host=$( get_ip mysite_volt flat-lan-1-net) grename=gre-bm-$I bm_addr=$BM_IP
+    echo bm$I           ansible_ssh_host=$BM_SSH_IP grename=gre-bm-$I volt_addr=$( get_ip mysite_volt flat-lan-1-net)  ansible_ssh_private_key_file=/root/.ssh/id_rsa
+    I=$(( I+1 ))
+done
+
+GRE_NAMES=${GRE_NAMES[@]}
+BM_IPS=${BM_IPS[@]}
+
+echo switch_volt ansible_ssh_host=$( get_ip mysite_volt flat-lan-1-net) grenames=\"$GRE_NAMES\" bm_ips=\"$BM_IPS\"
+
+NM=`grep "^nm" /root/setup/fqdn.map | awk '{ print $2 }'`
+echo "nm1 ansible_ssh_host=$NM ansible_ssh_private_key_file=/root/.ssh/id_rsa"
+
+echo "[baremetal]"
+I=1
+for NODE in $NODES; do
+    echo bm$I
+    I=$((I+1))
+done
+
+# now for the network management node
+echo "[nm]"
+echo "nm1"
diff --git a/cord-deprecated/dataplane/scripts/if_from_ip.py b/cord-deprecated/dataplane/scripts/if_from_ip.py
new file mode 100644
index 0000000..28524fe
--- /dev/null
+++ b/cord-deprecated/dataplane/scripts/if_from_ip.py
@@ -0,0 +1,14 @@
+#!/usr/bin/python
+
+import sys
+import netifaces
+
+def main (argv):
+    addr = argv[0]
+    for iface in netifaces.interfaces():
+        addrs = netifaces.ifaddresses(iface)
+        if 2 in addrs and addrs[2][0]['addr'] == addr:
+            sys.stdout.write(iface)
+
+if __name__ == "__main__":
+    main(sys.argv[1:])
diff --git a/cord-deprecated/dataplane/scripts/restart-vcpes.sh b/cord-deprecated/dataplane/scripts/restart-vcpes.sh
new file mode 100644
index 0000000..d1c9fce
--- /dev/null
+++ b/cord-deprecated/dataplane/scripts/restart-vcpes.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+for VCPE in $( docker ps|grep vcpe|awk '{print $NF}' )
+do
+  service $VCPE stop
+  sleep 1
+  service $VCPE start
+done
diff --git a/cord-deprecated/docker-compose.yml b/cord-deprecated/docker-compose.yml
new file mode 100644
index 0000000..28eeeb4
--- /dev/null
+++ b/cord-deprecated/docker-compose.yml
@@ -0,0 +1,106 @@
+xos_db:
+    image: xosproject/xos-postgres
+    expose:
+        - "5432"
+
+xos_synchronizer_openstack:
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/openstack/xos-synchronizer.py"
+    image: xosproject/xos-synchronizer-openstack
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: openstack
+    links:
+        - xos_db
+    extra_hosts:
+        - ctl:${MYIP}
+    volumes:
+        - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config:ro
+        - ./xos_cord_config:/opt/xos/xos_configuration/xos_cord_config:ro
+        - ../setup:/root/setup:ro
+
+xos_synchronizer_onos:
+    image: xosproject/xos-synchronizer-openstack
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/onos/onos-synchronizer.py -C /opt/xos/synchronizers/onos/onos_synchronizer_config"
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: onos
+    links:
+        - xos_db
+    extra_hosts:
+        - ctl:${MYIP}
+    volumes:
+        - ../setup/id_rsa:/opt/xos/synchronizers/onos/onos_key:ro  # private key
+
+xos_synchronizer_vcpe:
+    image: xosproject/xos-synchronizer-openstack
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/vcpe/vcpe-synchronizer.py -C /opt/xos/synchronizers/vcpe/vcpe_synchronizer_config"
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: vcpe
+    links:
+        - xos_db
+    extra_hosts:
+        - ctl:${MYIP}
+    volumes:
+        - ../setup/id_rsa:/opt/xos/synchronizers/vcpe/vcpe_private_key:ro  # private key
+        - ../setup:/root/setup:ro
+
+xos_synchronizer_vbng:
+    image: xosproject/xos-synchronizer-openstack
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/vbng/vbng-synchronizer.py -C /opt/xos/synchronizers/vbng/vbng_synchronizer_config"
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: vbng
+    links:
+        - xos_db
+    extra_hosts:
+        - ctl:${MYIP}
+
+xos_synchronizer_monitoring_channel:
+    image: xosproject/xos-synchronizer-openstack
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/monitoring_channel/monitoring_channel_synchronizer.py -C /opt/xos/synchronizers/monitoring_channel/monitoring_channel_synchronizer_config"
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: monitoring_channel
+    links:
+        - xos_db
+    extra_hosts:
+        - ctl:${MYIP}
+    volumes:
+        - ../setup/id_rsa:/opt/xos/synchronizers/monitoring_channel/monitoring_channel_private_key:ro  # private key
+
+xos_synchronizer_vtr:
+    image: xosproject/xos-synchronizer-openstack
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/vtr/vtr-synchronizer.py -C /opt/xos/synchronizers/vtr/vtr_synchronizer_config"
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: vtr
+    links:
+        - xos_db
+    extra_hosts:
+        - ctl:${MYIP}
+    volumes:
+        - ../setup/id_rsa:/opt/xos/synchronizers/vtr/vcpe_private_key:ro  # private key
+        - ../setup:/root/setup:ro
+
+# FUTURE
+#xos_swarm_synchronizer:
+#    image: xosproject/xos-swarm-synchronizer
+#    labels:
+#        org.xosproject.kind: synchronizer
+#        org.xosproject.target: swarm
+
+xos:
+    image: xosproject/xos
+    command: python /opt/xos/manage.py runserver 0.0.0.0:8000 --insecure --makemigrations
+    ports:
+        - "9999:8000"
+    links:
+        - xos_db
+    volumes:
+      - ../setup:/root/setup:ro
+      - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config:ro
+      - ./xos_cord_config:/opt/xos/xos_configuration/xos_cord_config:ro
+      - ../setup/id_rsa.pub:/opt/xos/synchronizers/onos/onos_key.pub:ro
+      - ../setup/id_rsa.pub:/opt/xos/synchronizers/vcpe/vcpe_public_key:ro
+      - ../setup/id_rsa.pub:/opt/xos/synchronizers/monitoring_channel/monitoring_channel_public_key:ro
diff --git a/cord-deprecated/make-virtualbng-json.sh b/cord-deprecated/make-virtualbng-json.sh
new file mode 100644
index 0000000..993643c
--- /dev/null
+++ b/cord-deprecated/make-virtualbng-json.sh
@@ -0,0 +1,38 @@
+FN=$SETUPDIR/virtualbng.json
+
+rm -f $FN
+
+cat >> $FN <<EOF
+{
+    "localPublicIpPrefixes" : [
+        "10.254.0.128/25"
+    ],
+    "nextHopIpAddress" : "10.254.0.1",
+    "publicFacingMac" : "00:00:00:00:00:66",
+    "xosIpAddress" : "10.11.10.1",
+    "xosRestPort" : "9999",
+    "hosts" : {
+EOF
+
+NODES=$( sudo bash -c "source $SETUPDIR/admin-openrc.sh ; nova hypervisor-list" |grep -v ID|grep -v +|awk '{print $4}' )
+
+NODECOUNT=0
+for NODE in $NODES; do
+    ((NODECOUNT++))
+done
+
+I=0
+for NODE in $NODES; do
+    echo $NODE
+    ((I++))
+    if [[ "$I" -lt "$NODECOUNT" ]]; then
+        echo "      \"$NODE\" : \"of:0000000000000001/1\"," >> $FN
+    else
+        echo "      \"$NODE\" : \"of:0000000000000001/1\"" >> $FN
+    fi
+done
+
+cat >> $FN <<EOF
+    }
+}
+EOF
diff --git a/cord-deprecated/make-vtn-networkconfig-json.sh b/cord-deprecated/make-vtn-networkconfig-json.sh
new file mode 100644
index 0000000..77b855d
--- /dev/null
+++ b/cord-deprecated/make-vtn-networkconfig-json.sh
@@ -0,0 +1,88 @@
+FN=$SETUPDIR/vtn-network-cfg.json
+
+echo "Writing to $FN"
+
+rm -f $FN
+
+cat >> $FN <<EOF
+{
+    "apps" : {
+        "org.onosproject.cordvtn" : {
+            "cordvtn" : {
+                "privateGatewayMac" : "00:00:00:00:00:01",
+                "localManagementIp": "172.27.0.1/24",
+                "ovsdbPort": "6641",
+                "sshPort": "22",
+                "sshUser": "root",
+                "sshKeyFile": "/root/node_key",
+                "publicGateways": [
+                    {
+                        "gatewayIp": "10.123.0.1",
+                        "gatewayMac": "00:8c:fa:5b:09:d8"
+                    }
+                ],
+                "nodes" : [
+EOF
+
+NODES=$( sudo bash -c "source $SETUPDIR/admin-openrc.sh ; nova hypervisor-list" |grep -v ID|grep -v +|awk '{print $4}' )
+
+# XXX disabled - we don't need or want the nm node at this time
+# also configure ONOS to manage the nm node
+# NM=`grep "^nm" /root/setup/fqdn.map | awk '{ print $2 }'`
+# NODES="$NODES $NM"
+
+NODECOUNT=0
+for NODE in $NODES; do
+    ((NODECOUNT++))
+done
+
+I=0
+for NODE in $NODES; do
+    echo $NODE
+    NODEIP=`getent hosts $NODE | awk '{ print $1 }'`
+
+    # This part is cloudlab-specific. It examines the flat-net-1 network and extracts
+    # the eth device and ip address that was assigned to flat-net-1.
+    sudo scp root@$NODE:/root/setup/info.flat-lan-1 $SETUPDIR/flat-lan-$NODE
+    PHYPORT=`bash -c "source $SETUPDIR/flat-lan-$NODE; echo \\\$DATADEV"`
+    LOCALIP=`bash -c "source $SETUPDIR/flat-lan-$NODE; echo \\\$DATAIP"`
+
+    ((I++))
+    cat >> $FN <<EOF
+                    {
+                      "hostname": "$NODE",
+                      "hostManagementIp": "$NODEIP/24",
+                      "bridgeId": "of:000000000000000$I",
+                      "dataPlaneIntf": "$PHYPORT",
+                      "dataPlaneIp": "$LOCALIP/24"
+EOF
+    if [[ "$I" -lt "$NODECOUNT" ]]; then
+        echo "                    }," >> $FN
+    else
+        echo "                    }" >> $FN
+    fi
+done
+
+# get the openstack admin password and username
+source $SETUPDIR/admin-openrc.sh
+
+HOSTNAME=`hostname`
+NEUTRONIP=`getent hosts $HOSTNAME | awk '{ print $1 }'`
+KEYSTONEIP=`getent hosts $HOSTNAME | awk '{ print $1 }'`
+
+cat >> $FN <<EOF
+                ]
+            }
+        },
+        "org.onosproject.openstackinterface" : {
+            "openstackinterface" : {
+                 "do_not_push_flows" : "true",
+                 "neutron_server" : "http://$NEUTRONIP:9696/v2.0/",
+                 "keystone_server" : "http://$KEYSTONEIP:5000/v2.0/",
+                 "user_name" : "$OS_USERNAME",
+                 "password" : "$OS_PASSWORD"
+             }
+        }
+    }
+}
+EOF
diff --git a/cord-deprecated/xos_cord_config b/cord-deprecated/xos_cord_config
new file mode 100644
index 0000000..a5448f7
--- /dev/null
+++ b/cord-deprecated/xos_cord_config
@@ -0,0 +1,6 @@
+[gui]
+branding_name=CORD
+#branding_css=/static/cord.css
+branding_icon=/static/cord-logo.png
+branding_favicon=/static/cord-favicon.png
+branding_bg=/static/cord-bg.jpg