move over configurations from xos repo

Change-Id: I66583bdaea582986d8f16a29066a79c6687b97fb
diff --git a/cord-pod/Makefile b/cord-pod/Makefile
new file mode 100644
index 0000000..f006c51
--- /dev/null
+++ b/cord-pod/Makefile
@@ -0,0 +1,173 @@
+include ../common/Makedefs
+
+CONFIG_DIR:=$(shell pwd)
+DOCKER_COMPOSE_YML=./onboarding-docker-compose/docker-compose.yml
+BOOTSTRAP_YML=./docker-compose-bootstrap.yml
+DOCKER_PROJECT=cordpod
+BOOTSTRAP_PROJECT=cordpodbs
+XOS_BOOTSTRAP_PORT=81
+XOS_UI_PORT=80
+ADMIN_USERNAME=padmin@vicci.org
+ADMIN_PASSWORD=letmein
+RUN_TOSCA_BOOTSTRAP=python ../common/run_tosca.py $(XOS_BOOTSTRAP_PORT) $(ADMIN_USERNAME) $(ADMIN_PASSWORD)
+RUN_TOSCA=python ../common/run_tosca.py $(XOS_UI_PORT) $(ADMIN_USERNAME) $(ADMIN_PASSWORD)
+
+.PHONY: xos
+xos: prereqs dirs download_services bootstrap onboarding podconfig
+
+prereqs:
+	sudo make -f ../common/Makefile.prereqs
+
+dirs:
+	# if this directory doesn't exist, then docker-compose will create it with root permission
+	mkdir -p key_import
+	mkdir -p onboarding-docker-compose
+
+bootstrap:
+	echo "[BOOTSTRAP]"
+	sudo rm -f onboarding-docker-compose/docker-compose.yml
+	sudo CONFIG_DIR=$(CONFIG_DIR) docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) up -d
+	bash ../common/wait_for_xos_port.sh 81
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) run xos_bootstrap_ui python /opt/xos/tosca/run.py none - < ../common/fixtures.yaml
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) run xos_bootstrap_ui python /opt/xos/tosca/run.py none - < ../common/mydeployment.yaml
+	$(RUN_TOSCA_BOOTSTRAP) xos.yaml
+
+download_services:
+	make -f ../common/Makefile.services
+
+update_services:
+	make -f ../common/Makefile.services update
+
+onboarding:
+	echo "[ONBOARDING]"
+	# on-board any services here
+	bash ../common/wait_for_onboarding_ready.sh 81 xos
+	$(RUN_TOSCA_BOOTSTRAP) ../common/disable-onboarding.yaml
+	sudo cp id_rsa key_import/vsg_rsa
+	sudo cp id_rsa.pub key_import/vsg_rsa.pub
+	sudo cp id_rsa key_import/volt_rsa
+	sudo cp id_rsa.pub key_import/volt_rsa.pub
+	sudo cp id_rsa key_import/onos_rsa
+	sudo cp id_rsa key_import/onos_rsa.pub
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/vrouter/xos/vrouter-onboard.yaml
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/olt/xos/volt-onboard.yaml
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/vsg/xos/vsg-onboard.yaml
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/vtn/xos/vtn-onboard.yaml
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/onos-service/xos/onos-onboard.yaml
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/fabric/xos/\fabric-onboard.yaml
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/vtr/xos/vtr-onboard.yaml
+	$(RUN_TOSCA_BOOTSTRAP) synchronizers.yaml
+	$(RUN_TOSCA_BOOTSTRAP) ../common/enable-onboarding.yaml
+	bash ../common/wait_for_onboarding_ready.sh 81 services/vrouter
+	bash ../common/wait_for_onboarding_ready.sh 81 services/volt
+	bash ../common/wait_for_onboarding_ready.sh 81 services/vsg
+	bash ../common/wait_for_onboarding_ready.sh 81 services/vtn
+	bash ../common/wait_for_onboarding_ready.sh 81 services/onos
+	bash ../common/wait_for_onboarding_ready.sh 81 services/fabric
+	bash ../common/wait_for_onboarding_ready.sh 81 services/vtr
+	bash ../common/wait_for_onboarding_ready.sh 81 xos
+	bash ../common/wait_for_xos_port.sh 80
+
+podconfig: nodes.yaml images.yaml
+	echo "[PODCONFIG]"
+	$(RUN_TOSCA) setup.yaml
+	$(RUN_TOSCA) nodes.yaml
+	$(RUN_TOSCA) images.yaml
+
+vtn: vtn-external.yaml
+	$(RUN_TOSCA) vtn-external.yaml
+
+fabric: fabric.yaml
+	$(RUN_TOSCA) fabric.yaml
+
+cord: vsg_custom_images
+	$(RUN_TOSCA) mgmt-net.yaml
+	$(RUN_TOSCA) cord-vtn-vsg.yaml
+	$(RUN_TOSCA) cord-volt-devices.yaml
+
+clean-nodes:
+	rm -f nodes.yaml
+
+update-nodes: nodes.yaml
+	$(RUN_TOSCA) nodes.yaml
+
+new-nodes: clean-nodes update-nodes vtn
+
+exampleservice: onboard-exampleservice
+	$(RUN_TOSCA) pod-exampleservice.yaml
+
+onboard-exampleservice:
+	sudo cp id_rsa key_import/exampleservice_rsa
+	sudo cp id_rsa.pub key_import/exampleservice_rsa.pub
+	$(RUN_TOSCA_BOOSTRAP) $(SERVICE_DIR)/exampleservice/exampleservice-onboard.yaml
+	bash ../common/wait_for_onboarding_ready.sh 81 services/exampleservice
+	bash ../common/wait_for_onboarding_ready.sh 81 xos
+	bash ../common/wait_for_xos_port.sh 80
+
+cord-ceilometer: ceilometer_custom_images cord onboard-ceilometer
+	$(RUN_TOSCA) ceilometer.yaml
+
+onboard-ceilometer: download-ceilometer
+	sudo cp id_rsa key_import/monitoring_channel_rsa
+	sudo cp id_rsa.pub key_import/monitoring_channel_rsa.pub
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/monitoring/xos/ceilometer-onboard.yaml
+	bash ../common/wait_for_onboarding_ready.sh 81 services/ceilometer
+	bash ../common/wait_for_onboarding_ready.sh 81 xos
+	bash ../common/wait_for_xos_port.sh 80
+
+download-ceilometer:
+	make -f ../common/Makefile.services monitoring_services
+
+nodes.yaml:
+	export SETUPDIR=.; bash ../common/make-nodes-yaml.sh
+
+images.yaml:
+	export SETUPDIR=.; bash ../common/make-images-yaml.sh
+
+vtn-external.yaml:
+	export SETUPDIR=.; bash ./make-vtn-external-yaml.sh
+
+fabric.yaml:
+	export SETUPDIR=.; bash ./make-fabric-yaml.sh
+
+virtualbng_json:
+	export SETUPDIR=.; bash ./make-virtualbng-json.sh
+
+vtn_network_cfg_json:
+	export SETUPDIR=.; bash ./make-vtn-networkconfig-json.sh
+
+stop:
+	test ! -s $(DOCKER_COMPOSE_YML) || sudo docker-compose -p $(DOCKER_PROJECT) -f $(DOCKER_COMPOSE_YML) stop
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) stop
+
+rm:
+	test ! -s $(DOCKER_COMPOSE_YML) || sudo docker-compose -p $(DOCKER_PROJECT) -f $(DOCKER_COMPOSE_YML) rm
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) rm
+
+showlogs:
+	sudo MYIP=$(MYIP) docker-compose logs
+
+cleanup: stop rm
+	./cleanup.sh
+	bash -c "source ./admin-openrc.sh; nova list --all-tenants; neutron net-list"
+
+ceilometer_custom_images: images/ceilometer-trusty-server-multi-nic.img
+	bash -c "source ./admin-openrc.sh; glance image-show ceilometer-trusty-server-multi-nic || glance image-create --name ceilometer-trusty-server-multi-nic --disk-format qcow2 --file ./images/ceilometer-trusty-server-multi-nic.img --container-format bare"
+
+vsg_custom_images: images/vsg-1.0.img
+	bash -c "source ./admin-openrc.sh; glance image-show vsg-1.0 || glance image-create --name vsg-1.0 --disk-format qcow2 --file ./images/vsg-1.0.img --container-format bare"
+
+images/ceilometer-trusty-server-multi-nic.img: images
+	wget http://www.vicci.org/cord/ceilometer-trusty-server-multi-nic.compressed.qcow2 -P ./images
+	mv ./images/ceilometer-trusty-server-multi-nic.compressed.qcow2 ./images/ceilometer-trusty-server-multi-nic.img
+
+images/vsg-1.0.img: images
+	wget http://www.vicci.org/cord/vsg-1.0.img -P ./images
+
+images:
+	mkdir -p ./images
+
+.PHONY: local_containers
+local_containers:
+	make -f ../common/Makefile.containers update_certs xos_devel synchronizer onboarding_synchronizer
+
diff --git a/cord-pod/NOTES.txt b/cord-pod/NOTES.txt
new file mode 100644
index 0000000..d832f2b
--- /dev/null
+++ b/cord-pod/NOTES.txt
@@ -0,0 +1,37 @@
+Notes on setup
+
+Requirements:
+* admin-openrc.sh: Admin credentials for your OpenStack cloud
+* id_rsa[.pub]: Keypair for use by the various services
+* node_key: Private key that allows root login to the compute nodes
+
+Steps for bringing up the POD:
+
+OpenStack
+* Configure management net
+  - mgmtbr on head nodes
+  - dnsmasq on head1 using cord config file
+* Install OpenStack using the openstack-cluster-install repo
+
+VTN
+* onos-cord VM is created by openstack-cluster-install
+* Bring up ONOS
+  # cd cord; docker-compose up -d
+* On each compute node it's necessary perform a few manual steps (FIX ME)
+  - Disable neutron-plugin-openvswitch-agent. As root:
+    # service neutron-plugin-openvswitch-agent stop
+    # echo manual > /etc/init/neutron-plugin-openvswitch-agent.override
+  - Clean up OVS: delete br-int and any other bridges
+  - Listen for connections from VTN:
+    # ovs-appctl -t ovsdb-server ovsdb-server/add-remote ptcp:6641
+
+XOS
+* xos VM is created by openstack-cluster-install
+  - requirements listed above should already be satisfied by install
+* cd xos/xos/configurations/cord-pod
+* Bring up XOS cord-pod configuration
+  # make
+  # make vtn
+  # make cord
+* Login to XOS at http://xos
+  - padmin@vicci.org / letmein
diff --git a/cord-pod/README-Tutorial.md b/cord-pod/README-Tutorial.md
new file mode 100644
index 0000000..9f8c9e9
--- /dev/null
+++ b/cord-pod/README-Tutorial.md
@@ -0,0 +1,182 @@
+# Setting up the XOS Tutorial
+
+The XOS Tutorial demonstrates how to add a new subscriber-facing
+service to CORD.  
+
+## Prepare the development POD
+
+This tutorial runs on a single-node CORD POD development environment.
+For best results, prepare a clean Ubuntu 14.04
+LTS installation on a server with at least 48GB RAM and 12 CPU cores.
+Update the packages to the latest versions.
+
+To set up the POD, run
+[this script](https://github.com/open-cloud/openstack-cluster-setup/blob/master/scripts/single-node-pod.sh)
+with the `-e` option:
+
+```
+ubuntu@pod:~$ wget https://raw.githubusercontent.com/open-cloud/openstack-cluster-setup/master/scripts/single-node-pod.sh
+ubuntu@pod:~$ bash single-node-pod.sh -e
+```
+
+> NOTE: The above script can also automatically perform all tutoral steps if run as `bash single-node-pod -e -t`.  
+
+Be patient... it will take **at least one hour** to fully set up the single-node POD.
+
+## Include ExampleService in XOS
+
+On the POD, SSH into the XOS VM: `$ ssh ubuntu@xos`.  You will see the XOS repository
+checked out under `~/xos/`
+
+Change the XOS code as described in the
+[ExampleService Tutorial](http://guide.xosproject.org/devguide/exampleservice/)
+under the **Install the Service in Django** heading, and rebuild the XOS containers as
+follows:
+
+```
+ubuntu@xos:~$ cd xos/xos/configurations/cord-pod
+ubuntu@xos:~/xos/xos/configurations/cord-pod$ make local_containers
+```
+
+Modify the `docker-compose.yml` file in the `cord-pod` directory to include the synchronizer
+for ExampleService:
+
+```yaml
+xos_synchronizer_exampleservice:
+    image: xosproject/xos-synchronizer-openstack
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/exampleservice/exampleservice-synchronizer.py -C /root/setup/files/exampleservice_config"
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: exampleservice
+    links:
+        - xos_db
+    volumes:
+        - .:/root/setup:ro
+        - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config:ro
+        - ./id_rsa:/opt/xos/synchronizers/exampleservice/exampleservice_private_key:ro
+```
+
+Also, add ExampleService's public key to the `volumes` section of the `xos` docker container:
+
+```yaml
+xos:
+    ...
+    volumes:
+        ...
+        - ./id_rsa.pub:/opt/xos/synchronizers/exampleservice/exampleservice_public_key:ro 
+```
+
+## Bring up XOS
+
+Run the `make` commands described in the [Bringing up XOS](https://github.com/open-cloud/xos/blob/master/xos/configurations/cord-pod/README.md#bringing-up-xos)
+section of the README.md file.
+
+## Configure ExampleService in XOS
+
+The TOSCA file `pod-exampleservice.yaml` contains the service declaration.
+Tell XOS to process it by running:
+
+```
+ubuntu@xos:~/xos/xos/configurations/cord-pod$ make exampleservice
+```
+
+This will add the ExampleService to XOS.  It will also create an ExampleTenant,
+which causes a VM to be created with Apache running inside.
+
+
+## Set up a Subscriber Device
+
+The single-node POD does not include a virtual OLT, but a device at the
+subscriber’s premises can be simulated by an LXC container running on the
+nova-compute node.
+
+In the nova-compute VM:
+
+```
+ubuntu@nova-compute:~$ sudo apt-get install lxc
+```
+
+Next edit `/etc/lxc/default.conf` and change the default bridge name to `databr`:
+
+```
+  lxc.network.link = databr
+```
+
+Create the client container and attach to it:
+
+```
+ubuntu@nova-compute:~$ sudo lxc-create -t ubuntu -n testclient
+ubuntu@nova-compute:~$ sudo lxc-start -n testclient
+ubuntu@nova-compute:~$ sudo lxc-attach -n testclient
+```
+
+(The lxc-start command may throw an error but it seems to be unimportant.)
+
+Finally, inside the container set up an interface so that outgoing traffic
+is tagged with the s-tag (222) and c-tag (111) configured for the
+sample subscriber:
+
+```
+root@testclient:~# ip link add link eth0 name eth0.222 type vlan id 222
+root@testclient:~# ip link add link eth0.222 name eth0.222.111 type vlan id 111
+root@testclient:~# ifconfig eth0.222 up
+root@testclient:~# ifconfig eth0.222.111 up
+root@testclient:~# dhclient eth0.222.111
+```
+
+If the vSG is up and everything is working correctly, the eth0.222.111
+interface should acquire an IP address via DHCP and have external connectivity.
+
+## Access ExampleService from the Subscriber Device
+
+To test that the subscriber device can access the ExampleService, find the IP
+address of the ExampleService Instance in the XOS GUI, and then curl this
+address from inside the testclient container:
+
+```
+root@testclient:~# sudo apt-get install curl
+root@testclient:~# curl 10.168.1.3
+ExampleService
+ Service Message: "service_message"
+ Tenant Message: "tenant_message"
+```
+
+Hooray!  This shows that the subscriber (1) has external connectivity, and
+(2) can access the new service via the vSG.
+
+## Troubleshooting
+
+Sometimes the ExampleService instance comes up with the wrong default route.  If the 
+ExampleService instance is active but the `curl` command does not work, SSH to the
+instance and check its default gateway.  Assuming the management address of the `mysite_exampleservice`
+VM is 172.27.0.2:
+
+```
+ubuntu@pod:~$ ssh-agent bash
+ubuntu@pod:~$ ssh-add
+ubuntu@pod:~$ ssh -A ubuntu@nova-compute
+ubuntu@nova-compute:~$ ssh ubuntu@172.27.0.2
+ubuntu@mysite-exampleservice-2:~$ route -n
+Kernel IP routing table
+Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
+0.0.0.0         172.27.0.1      0.0.0.0         UG    0      0        0 eth1
+10.168.1.0      0.0.0.0         255.255.255.0   U     0      0        0 eth0
+172.27.0.0      0.0.0.0         255.255.255.0   U     0      0        0 eth1
+```
+
+If the default gateway is not `10.168.1.1`, manually set it to this value.
+
+```
+ubuntu@mysite-exampleservice-2:~$ sudo bash
+root@mysite-exampleservice-2:~# route del default gw 172.27.0.1
+root@mysite-exampleservice-2:~# route add default gw 10.168.1.1
+root@mysite-exampleservice-2:~# route -n
+Kernel IP routing table
+Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
+0.0.0.0         10.168.1.1      0.0.0.0         UG    0      0        0 eth0
+10.168.1.0      0.0.0.0         255.255.255.0   U     0      0        0 eth0
+172.27.0.0      0.0.0.0         255.255.255.0   U     0      0        0 eth1
+```
+
+Now the VM should have Internet connectivity and XOS will start downloading Apache. 
+A short while later the `curl` test should complete.
diff --git a/cord-pod/README.md b/cord-pod/README.md
new file mode 100644
index 0000000..8813d3e
--- /dev/null
+++ b/cord-pod/README.md
@@ -0,0 +1,200 @@
+# XOS Configuration for CORD development POD
+
+## Introduction
+
+This directory holds files that are used to configure a development POD for
+CORD.  For more information on the CORD project, check out
+[the CORD website](http://cord.onosproject.org/).
+
+XOS is composed of several core services:
+
+  * A database backend (postgres)
+  * A webserver front end (django)
+  * A synchronizer daemon that interacts with the openstack backend
+  * A synchronizer for each configured XOS service
+
+Each service runs in a separate Docker container.  The containers are built
+automatically by [Docker Hub](https://hub.docker.com/u/xosproject/) using
+the HEAD of the XOS repository.
+
+## How to bring up CORD
+
+Installing a CORD POD involves these steps:
+ 1. Install OpenStack on a cluster
+ 2. Set up the ONOS VTN app and configuring OVS on the nova-compute nodes to be
+    controlled by VTN
+ 3. Set up external connectivity for VMs (if not using the CORD fabric)
+ 4. Bring up XOS with the CORD services
+
+### Install OpenStack
+
+To set up OpenStack, follow the instructions in the
+[README.md](https://github.com/open-cloud/openstack-cluster-setup/blob/master/README.md)
+file of the [open-cloud/openstack-cluster-setup](https://github.com/open-cloud/openstack-cluster-setup/)
+repository.  If you're just getting started with CORD, it's probably best to begin with the
+single-node CORD test environment to familiarize yourself with the overall setup.
+
+**NOTE: In order to use the cord-pod configuration, you must set up OpenStack using the above recipe.**
+
+### Set up ONOS VTN
+
+The OpenStack installer above creates a VM called *onos-cord* on the head node.
+To bring up ONOS in this VM, log into the head node and run:
+```
+$ ssh ubuntu@onos-cord
+ubuntu@onos-cord:~$ cd cord; sudo docker-compose up -d
+```
+
+### Set up external connectivity for VMs
+
+The CORD fabric is responsible for providing external (Internet) connectivity
+for VMs created on CORD.  If you are running on CloudLab (or another development
+environment) and want external connectivity without the fabric, download [this script](https://raw.githubusercontent.com/open-cloud/openstack-cluster-setup/master/scripts/compute-ext-net.sh)
+ and run it on the Nova compute node(s) as root:
+ ```
+ $ sudo compute-ext-net.sh
+ ```
+
+The script creates a bridge (*databr*) on the node as well as a veth pair
+(*veth0/veth1*).  The *veth0* interface is added as a port on *databr* and
+VTN is configured to use *veth1* as its data plane interface.  Traffic coming
+from *databr* is NAT'ed to the external network via `iptables`.  The configuration
+assumes that *databr* takes the MAC address of *veth0* when it is added as a port
+-- this seems to always be the case (though not sure why).
+
+Note that setting up the full fabric is beyond the scope of this README.
+
+### Bringing up XOS
+
+The OpenStack installer above creates a VM called *xos* on the head node.
+To bring up XOS in this VM, first log into the head node and run:
+```
+$ ssh ubuntu@xos
+ubuntu@xos:~$ cd xos/xos/configurations/cord-pod
+```
+
+Next, check that the following files exist in this directory
+(they will have been put there for you by the cluster installation scripts):
+
+ * *admin-openrc.sh*: Admin credentials for your OpenStack cloud
+ * *id_rsa[.pub]*: A keypair that will be used by the various services
+ * *node_key*: A private key that allows root login to the compute nodes
+
+XOS can then be brought up for CORD by running a few `make` commands.
+First, run:
+
+```
+ubuntu@xos:~/xos/xos/configurations/cord-pod$ make
+```
+
+Before proceeding, you should verify that objects in XOS are
+being sync'ed with OpenStack. [Login to the XOS GUI](#logging-into-xos-on-cloudlab-or-any-remote-host) 
+and select *Users* at left.  Make sure there is a green check next to `padmin@vicci.org`.
+
+> If you are **not** building the single-node development POD, the next
+> step is to create and edit the VTN configuration.  Run `make vtn-external.yaml`
+> then edit the `vtn-external.yml` TOSCA file.  The `rest_hostname:`
+> field points to the host where ONOS should run the VTN app.  The
+> fields in the `service_vtn` and the objects of type `tosca.nodes.Tag`
+> correspond to the VTN fields listed
+> on [the CORD VTN page on the ONOS Wiki](https://wiki.onosproject.org/display/ONOS/CORD+VTN),
+> under the **ONOS Settings** heading; refer there for the fields'
+> meanings.  
+
+Then run:
+
+```
+ubuntu@xos:~/xos/xos/configurations/cord-pod$ make vtn
+```
+The above step configures the ONOS VTN app by generating a configuration
+and pushing it to ONOS.  You are able to see and modify the configuration
+via the GUI as follows:
+
+* To see the generated configuration, go to *http://xos/admin/onos/onosapp/* 
+([caveat](#logging-into-xos-on-cloudlab-or-any-remote-host)), select
+*VTN_ONOS_app*, then the *Attributes* tab, and look for the
+`rest_onos/v1/network/configuration/` attribute.  
+
+* To change the VTN configuration, modify the fields of the VTN Service object
+and the Tag objects associated with Nodes.  Don't forget to select *Save*.
+
+* After modifying the above fields, delete the `rest_onos/v1/network/configuration/` attribute
+in the *ONOS_VTN_app* and select *Save*.  The attribute will be regenerated using the new information.
+
+* Alternatively, if you want to load your own VTN configuration manually, you can delete the
+`autogenerate` attribute from the *ONOS_VTN_app*, edit the configuration in the
+`rest_onos/v1/network/configuration/` attribute, and select *Save*.
+
+Before proceeding, check that the VTN app is controlling Open vSwitch on the compute nodes.  Log
+into ONOS and run the `cordvtn-nodes` command:
+
+```
+$ ssh -p 8101 karaf@onos-cord   # password is karaf
+onos> cordvtn-nodes
+hostname=nova-compute, hostMgmtIp=192.168.122.177/24, dpIp=192.168.199.1/24, br-int=of:0000000000000001, dpIntf=veth1, init=COMPLETE
+Total 1 nodes
+```
+The important part is the `init=COMPLETE` at the end.  If you do not see this, refer to
+[the CORD VTN page on the ONOS Wiki](https://wiki.onosproject.org/display/ONOS/CORD+VTN) for
+help fixing the problem.  This must be working to bring up VMs on the POD.
+
+> If you are **not** building the single-node development POD, modify `cord-vtn-vsg.yml` 
+> and change `addresses_vsg` so that it contains the IP address block,
+> gateway IP, and gateway MAC of the CORD fabric.  
+
+Then run:
+
+```
+ubuntu@xos:~/xos/xos/configurations/cord-pod$ make cord
+```
+
+
+### Inspecting the vSG
+
+The above series of `make` commands will spin up a vSG for a sample subscriber.  The
+vSG is implemented as a Docker container (using the
+[andybavier/docker-vcpe](https://hub.docker.com/r/andybavier/docker-vcpe/) image
+hosted on Docker Hub) running inside an Ubuntu VM.  Once the VM is created, you
+can login as the `ubuntu` user at the management network IP (172.27.0.x) on the compute node
+hosting the VM, using the private key generated on the head node by the install process.
+For example, in the single-node development POD configuration, you can login to the VM
+with management IP 172.27.0.2 using a ProxyCommand as follows:
+
+```
+ubuntu@pod:~$ ssh -o ProxyCommand="ssh -W %h:%p ubuntu@nova-compute" ubuntu@172.27.0.2
+```
+
+Alternatively, you could copy the generated private key to the compute node
+and login from there:
+
+```
+ubuntu@pod:~$ scp ~/.ssh/id_rsa ubuntu@nova-compute:~/.ssh
+ubuntu@pod:~$ ssh ubuntu@nova-compute
+ubuntu@nova-compute:~$ ssh ubuntu@172.27.0.2
+```
+
+Once logged in to the VM, you can run `sudo docker ps` to see the running
+vSG containers:
+
+```
+ubuntu@mysite-vsg-1:~$ sudo docker ps
+CONTAINER ID        IMAGE                    COMMAND             CREATED             STATUS              PORTS               NAMES
+2b0bfb3662c7        andybavier/docker-vcpe   "/sbin/my_init"     5 days ago          Up 5 days                               vcpe-222-111
+```
+
+### Logging into XOS on CloudLab (or any remote host)
+
+The XOS service is accessible on the POD at `http://xos/`, but `xos` maps to a private IP address
+on the management network.  If you install CORD on CloudLab 
+you will not be able to directly access the XOS GUI.
+In order to log into the XOS GUI in the browser on your local machine (desktop or laptop), 
+you can set up an SSH tunnel to your CloudLab node.  Assuming that 
+`<your-cloudlab-node>` is the DNS name of the CloudLab node hosting your experiment,
+run the following on your local machine to create the tunnel:
+
+```
+$ ssh -L 8888:xos:80 <your-cloudlab-node>
+```
+
+Then you should be able to access the XOS GUI by pointing your browser to
+`http://localhost:8888`.  Default username/password is `padmin@vicci.org/letmein`.
diff --git a/cord-pod/admin-openrc.sh b/cord-pod/admin-openrc.sh
new file mode 100644
index 0000000..bfc9eab
--- /dev/null
+++ b/cord-pod/admin-openrc.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+# Replace with the OpenStack admin credentials for your cluster
+export OS_TENANT_NAME=admin
+export OS_USERNAME=admin
+export OS_PASSWORD=admin
+export OS_AUTH_URL=http://localhost:35357/v2.0
+
diff --git a/cord-pod/cdn/README.md b/cord-pod/cdn/README.md
new file mode 100644
index 0000000..be8c184
--- /dev/null
+++ b/cord-pod/cdn/README.md
@@ -0,0 +1,77 @@
+## Set up a new CDN
+
+### CDN on VTN - headnode
+
+1. nova flavor-create --is-public true m1.cdnnode auto 8192 110 4
+2. in XOS create flavor m1.cdnnode and add to deployment
+
+### CDN on VTN - CMI
+
+1. Make sure plenty of glance space on ctl node
+2. Make sure plenty of instance space on compute nodes
+3. Install cmi-0.3.img into XOS images/ directory
+4. Install CentOS-6-cdnnode-0.3.img into XOS images/ directory
+5. Wait for these two images to be loaded into glance (check glance image-list for status)
+6. XOS UI: Add cmi and CentOS images to MyDeployment
+7. Run recipe xos/configurations/cord-pod/pod-cdn.yaml
+       * this will create mysite_cdn slice, cdn-public network, and add management and cdn-public networks to slice
+8. Instantiate CMI instance in mysite_cdn
+       * flavor: m1.large
+       * image: cmi-0.3.img
+9. edit configurations/cord-pod/cdn/cmi-settings.sh
+       * update COMPUTE_NODE and MGMT_IP to match CMI instance
+       * update NODE_KEY to match ssh key for root @ the compute node
+       * do not change VM_KEY; the pubkey is baked into the instance
+10. edit configurations/cord-pod/cdn/cmi.yaml
+       * update gateway_ip and gateway_mac to reflect public internet gateway CMI will use
+11. copy the keygen and allkeys.template to the private/ directory
+12. copy cmi_id_rsa
+13. run setup-cmi.sh
+       * this will SSH into the CMI and run setup, then modify some settings.
+       * it may take a long time, 10-20 minutes or more
+       * takeover script will be saved to takeovers/. Takeover script will be used in the next phase.
+
+### CDN on VTN - cdnnode
+
+1. Instantiate cdnnode instance in mysite_cdn
+       * flavor: m1.cdnnode
+       * CenOS-6-cdnnode-0.3.img
+2. Log into compute node and Attach disk
+       * virsh attach-disk <instance_name> /dev/sdc vdc --cache none
+       * (make sure this disk wasn't used anywhere else!)
+3. log into cdnnode VM
+       * make sure default gateway is good (check public connectivity)
+       * make sure arp table is good
+       * make sure CMI is reachable from cdnnode
+       * run takeover script that was created by the CMI 
+       * (I suggest commenting out the final reboot -f, and make sure the rest of it worked right before rebooting)
+       * Node will take a long time to install
+4. log into cdnnode
+       * to SSH into cdnnode, go into CMI, vserver coplc, cd /etc/planetlab, and use debug_ssh_key.rsa w/ root user
+       * check default gateway
+       * fix arp entry for default gateway
+
+### CDN on VTN - cmi part 2
+
+1. run setup-logicalinterfaces.sh
+
+### CDN on VTN - important notes
+
+We manually edited synchronizers/vcpe/templates/dnsasq_safe_servers.j2 inside the vcpe synchronizer VM:
+
+    # temporary for ONS demo
+    address=/z.cdn.turner.com/207.141.192.134
+    address=/cnn-vh.akamaihd.net/207.141.192.134
+
+### Test Commands
+
+* First, make sure the vSG is the only DNS server available in the test client. 
+* Second, make sure cdn_enable bit is set in CordSubscriber object for your vSG.
+* curl -L -vvvv http://downloads.onosproject.org/vm/onos-tutorial-1.1.0r220-ovf.zip > /dev/null
+* curl -L -vvvv http://onlab.vicci.org/onos-videos/Nov-planning-day1/Day1+00+Bill+-+Community+Growth.mp4 > /dev/null
+* curl -L -vvvv http://downloads.onosproject.org/release/onos-1.2.0.zip > /dev/null
+
+## Restart CDN after power-down
+
+To do...
+test
diff --git a/cord-pod/cdn/cmi-logicalinterfaces.yaml b/cord-pod/cdn/cmi-logicalinterfaces.yaml
new file mode 100644
index 0000000..d45b63a
--- /dev/null
+++ b/cord-pod/cdn/cmi-logicalinterfaces.yaml
@@ -0,0 +1,11 @@
+---
+- hosts: cmi
+  connection: ssh
+  user: root
+  tasks:
+  - name: copy over cmi logical interface template
+    template: src=templates/setup_cmi_logicalinterfaces.sh dest=/vservers/coplc/root/setup_cmi_logicalinterfaces.sh
+
+  - name: run logical interface script
+    command: vserver coplc exec onevsh /root/setup_cmi_logicalinterfaces.sh
+
diff --git a/cord-pod/cdn/cmi-settings.sh b/cord-pod/cdn/cmi-settings.sh
new file mode 100644
index 0000000..db6c5f3
--- /dev/null
+++ b/cord-pod/cdn/cmi-settings.sh
@@ -0,0 +1,12 @@
+# This holds the connection information necessary to talk to your CMI.
+# It will be used by setup-cmi.sh and ssh-cmi.sh
+
+#COMPUTE_NODE=cp-2.smbaker-xos-vtn.xos-pg0.clemson.cloudlab.us
+#MGMT_IP=172.27.0.22
+#NODE_KEY=/root/setup/id_rsa
+#VM_KEY=cmi_id_rsa
+
+COMPUTE_NODE=10.90.0.65
+MGMT_IP=172.27.0.17
+NODE_KEY=cord_pod_node_key
+VM_KEY=cmi_id_rsa
diff --git a/cord-pod/cdn/cmi.yaml b/cord-pod/cdn/cmi.yaml
new file mode 100644
index 0000000..62abe01
--- /dev/null
+++ b/cord-pod/cdn/cmi.yaml
@@ -0,0 +1,69 @@
+---
+- hosts: cmi
+  connection: ssh
+  user: root
+  vars:
+    eth_device: eth0
+    eth_mac: 02:42:CF:8D:C0:82
+    cmi_password: XOScdn123$
+    cmi_hostname: xos-cloudlab-cmi-vtn.opencloud.us
+    cmi_dns: 8.8.8.8
+    cdn_site: CoBlitz Test
+    cdn_short_name: cobtest
+    cdn_name: CoBlitz
+#    gateway_ip: 10.124.0.1
+#    gateway_mac: 00:8c:fa:5b:09:d8
+    gateway_ip: 207.141.192.129
+    gateway_mac: a4:23:05:45:56:79
+    node_hostname: xos-cloudlab-node1-vtn.opencloud.us
+  tasks:
+  - name: fix the networking
+    shell: "{{ item }}"
+    with_items:
+      - ifconfig {{ eth_device }} hw ether {{ eth_mac }}
+      - ip route del default || true
+      - ip route add default via {{ gateway_ip }}
+      - arp -s {{ gateway_ip }} {{ gateway_mac }}
+
+  - name: copy over setup answers
+    template: src=templates/setup_answers.txt dest=/root/setup_answers.txt
+
+  - name: run the setup script
+    shell: /a/sbin/setup.sh < /root/setup_answers.txt
+    args:
+        creates: /a/var/log/setup.log
+
+  - name: fix onevapi CDNPrefix bug
+    shell: sed -i 's/hostname/str/g' /vservers/coplc/usr/share/cob_api/COB/PublicObjects/CDNPrefix.py
+
+  - name: fix onevapi OriginServer bug
+    shell: sed -i 's/attrToCheck = "edge_hosttype"/attrToCheck = "edge_hosttype_broken"/g' /vservers/coplc/usr/share/cob_api/COB/PublicObjects/OriginServer.py
+
+  - name: copy over cmi setup template
+    template: src=templates/setup_cmi_onevsh.sh dest=/vservers/coplc/root/setup_cmi_onevsh.sh
+
+  - name: run cmi setup script
+    command: vserver coplc exec onevsh /root/setup_cmi_onevsh.sh
+
+  - name: copy over cmi node setup template
+    template: src=templates/setup_cmi_node.sh dest=/vservers/coplc/root/setup_cmi_node.sh
+
+  - name: run node setup script
+    command: vserver coplc exec plcsh /root/setup_cmi_node.sh
+    args:
+        creates: /vservers/coplc/root/takeover-{{ node_hostname }}
+
+  - name: retrieve node takeover script
+    fetch: src=/vservers/coplc/root/takeover-{{ node_hostname }} dest=takeovers/takeover-{{ node_hostname }}
+
+  - name: update all keys script
+    copy: src=private/allkeys.template dest=/vservers/coplc/etc/onevantage/services/HPC/templates/usr/local/CoBlitz/var/allkeys.template
+
+  - name: install keygen
+    copy: src=private/keygen dest=/vservers/coplc/etc/onevantage/services/HPC/templates/usr/local/CoBlitz/var/keygen mode=0755
+
+  - name: download socat
+    get_url: url=http://pkgs.repoforge.org/socat/socat-1.7.2.1-1.el6.rf.x86_64.rpm dest=/root/socat-1.7.2.1-1.el6.rf.x86_64.rpm
+ 
+  - name: install socat
+    yum: name=/root/socat-1.7.2.1-1.el6.rf.x86_64.rpm state=present
diff --git a/cord-pod/cdn/cmi_id_rsa.pub b/cord-pod/cdn/cmi_id_rsa.pub
new file mode 100644
index 0000000..4acc08f
--- /dev/null
+++ b/cord-pod/cdn/cmi_id_rsa.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+4THhqOmPNnFa/C/WbR7+BOvjJAZPRmB5d2c81CNHzkyk7OLaibEQ84Hkcaaj+KLQPKcyWhpyCLlXiaOufHQUqI4rKpFceEPpuaGRGvFrs6JRK1m3X+fj2Xw9Odg+SnJ+zHyJqwyh+8QTubFZfPXx0Gti5P6utkgzWqUmh0XuqC0JLVXBGs5M3ViIq7NemlUPcWPCLfsCzDMHMMvSeJfnT7+LB76YXqMNpmwyq9Dwv/MRd/8lV6C4q3ZmlwBBuXl4JxdUha6LtecLO+2Wdjoin+g3otCmgCnVZKAh2H1BLcZkHSy9ILs+vj22m8oB2ufyefP+R6Xsyne+G3kpJqWV smbaker@fc16-64.lan
diff --git a/cord-pod/cdn/private/README b/cord-pod/cdn/private/README
new file mode 100644
index 0000000..e5cfbc1
--- /dev/null
+++ b/cord-pod/cdn/private/README
@@ -0,0 +1 @@
+Stuff in here is private and will not be uploaded to github. 
diff --git a/cord-pod/cdn/setup-cmi-logicalinterfaces.sh b/cord-pod/cdn/setup-cmi-logicalinterfaces.sh
new file mode 100644
index 0000000..b1acd65
--- /dev/null
+++ b/cord-pod/cdn/setup-cmi-logicalinterfaces.sh
@@ -0,0 +1,18 @@
+#! /bin/bash
+
+source cmi-settings.sh
+
+echo "[ssh_connection]" > cmi.conf
+echo "ssh_args = -o \"ProxyCommand ssh -q -i $NODE_KEY -o StrictHostKeyChecking=no root@$COMPUTE_NODE nc $MGMT_IP 22\"" >> cmi.conf
+echo "scp_if_ssh = True" >> cmi.conf
+echo "pipelining = True" >> cmi.conf
+echo >> cmi.conf
+echo "[defaults]" >> cmi.conf
+echo "host_key_checking = False" >> cmi.conf
+
+echo "cmi ansible_ssh_private_key_file=$VM_KEY" > cmi.hosts
+
+export ANSIBLE_CONFIG=cmi.conf
+export ANSIBLE_HOSTS=cmi.hosts
+
+ansible-playbook -v --step cmi-logicalinterfaces.yaml
diff --git a/cord-pod/cdn/setup-cmi.sh b/cord-pod/cdn/setup-cmi.sh
new file mode 100644
index 0000000..8cfe11c
--- /dev/null
+++ b/cord-pod/cdn/setup-cmi.sh
@@ -0,0 +1,20 @@
+#! /bin/bash
+
+source cmi-settings.sh
+
+#apt-get -y install sshpass
+
+echo "[ssh_connection]" > cmi.conf
+echo "ssh_args = -o \"ProxyCommand ssh -q -i $NODE_KEY -o StrictHostKeyChecking=no root@$COMPUTE_NODE nc $MGMT_IP 22\"" >> cmi.conf
+echo "scp_if_ssh = True" >> cmi.conf
+echo "pipelining = True" >> cmi.conf
+echo >> cmi.conf
+echo "[defaults]" >> cmi.conf
+echo "host_key_checking = False" >> cmi.conf
+
+echo "cmi ansible_ssh_private_key_file=$VM_KEY" > cmi.hosts
+
+export ANSIBLE_CONFIG=cmi.conf
+export ANSIBLE_HOSTS=cmi.hosts
+
+ansible-playbook -v cmi.yaml
diff --git a/cord-pod/cdn/ssh-cmi.sh b/cord-pod/cdn/ssh-cmi.sh
new file mode 100644
index 0000000..15a0408
--- /dev/null
+++ b/cord-pod/cdn/ssh-cmi.sh
@@ -0,0 +1,5 @@
+#! /bin/bash
+
+source ./cmi-settings.sh
+
+ssh -i $VM_KEY -o "ProxyCommand ssh -q -i $NODE_KEY -o StrictHostKeyChecking=no root@$COMPUTE_NODE nc $MGMT_IP 22" root@cmi
diff --git a/cord-pod/cdn/templates/setup_answers.txt b/cord-pod/cdn/templates/setup_answers.txt
new file mode 100644
index 0000000..1c20be9
--- /dev/null
+++ b/cord-pod/cdn/templates/setup_answers.txt
@@ -0,0 +1,18 @@
+y
+{{ cmi_password }}
+{{ cmi_password }}
+n
+{{ eth_device }}
+y
+{{ cmi_hostname }}
+{{ eth_device }}
+
+
+{{ cdn_site }}
+{{ cdn_short_name }}
+{{ cmi_dns }}
+
+{{ cdn_name }}
+{{ cmi_password }}
+{{ cmi_password }}
+y
diff --git a/cord-pod/cdn/templates/setup_cmi_logicalinterfaces.sh b/cord-pod/cdn/templates/setup_cmi_logicalinterfaces.sh
new file mode 100644
index 0000000..2ac8422
--- /dev/null
+++ b/cord-pod/cdn/templates/setup_cmi_logicalinterfaces.sh
@@ -0,0 +1,14 @@
+lab="External"
+for service in ["HyperCache", "RequestRouter"]:
+    for node in ListAll("Node"):
+        node_id = node["node_id"]
+        for interface_id in node["interface_ids"]:
+            iface=Read("Interface", interface_id)
+            if iface["is_primary"] and len(iface["ip_address_ids"])==1:
+                ip_id = iface["ip_address_ids"][0]
+                if ListAll("LogicalInterface", {"node_id": node_id, "ip_address_ids": [ip_id], "label": lab, "service": service}):
+                    print "External label exists for node", node_id, "ip", ip_id, "service", service
+                else:
+                    print "Adding external label for node", node_id, "ip", ip_id, "service", service
+                    li = Create("LogicalInterface", {"node_id": node_id, "label": lab, "service": service})
+	            Bind("LogicalInterface", li, "IpAddress", ip_id)
diff --git a/cord-pod/cdn/templates/setup_cmi_node.sh b/cord-pod/cdn/templates/setup_cmi_node.sh
new file mode 100644
index 0000000..93435a3
--- /dev/null
+++ b/cord-pod/cdn/templates/setup_cmi_node.sh
@@ -0,0 +1,20 @@
+site_id=GetSites()[0]["site_id"]
+nodeinfo = {'hostname': "{{ node_hostname }}", 'dns': "8.8.8.8"}
+n_id = AddNode(site_id, nodeinfo)
+mac = "DE:AD:BE:EF:00:01"
+interfacetemplate = {'mac': mac, 'kind': 'physical', 'method': 'static', 'is_primary': True, 'if_name': 'eth0'}
+i_id = AddInterface(n_id, interfacetemplate)
+ip_addr = "169.254.169.1" # TO DO: get this from Neutron in the future
+netmask = "255.255.255.254" # TO DO: get this from Neutron in the future
+ipinfo = {'ip_addr': ip_addr, 'netmask': netmask, 'type': 'ipv4'}
+ip_id = AddIpAddress(i_id, ipinfo)
+routeinfo = {'interface_id': i_id, 'next_hop': "127.0.0.127", 'subnet': '0.0.0.0', 'metric': 1}
+r_id = AddRoute(n_id, routeinfo)
+hpc_slice_id = GetSlices({"name": "co_coblitz"})[0]["slice_id"]
+AddSliceToNodes(hpc_slice_id, [n_id])
+dnsdemux_slice_id = GetSlices({"name": "co_dnsdemux"})[0]["slice_id"]
+dnsredir_slice_id = GetSlices({"name": "co_dnsredir_coblitz"})[0]["slice_id"]
+AddSliceToNodes(dnsdemux_slice_id, [n_id])
+AddSliceToNodes(dnsredir_slice_id, [n_id])
+takeoverscript=GetBootMedium(n_id, "node-cloudinit", '')
+file("/root/takeover-{{ node_hostname }}","w").write(takeoverscript)
diff --git a/cord-pod/cdn/templates/setup_cmi_onevsh.sh b/cord-pod/cdn/templates/setup_cmi_onevsh.sh
new file mode 100644
index 0000000..c517780
--- /dev/null
+++ b/cord-pod/cdn/templates/setup_cmi_onevsh.sh
@@ -0,0 +1,19 @@
+def CreateOrFind(kind, args):
+    objs=ListAll(kind, args.copy())
+    if objs:
+        id_name = {"ServiceProvider": "service_provider_id",
+                   "ContentProvider": "content_provider_id",
+                   "OriginServer": "origin_server_id",
+                   "CDNPrefix": "cdn_prefix_id"}
+        print kind, "exists with args", args
+        return objs[0].get(id_name[kind])
+    else:
+	print "create", kind, "with args", args
+        return Create(kind, args)
+sp=CreateOrFind("ServiceProvider", {"account": "cord", "name": "cord", "enabled": True})
+cp=CreateOrFind("ContentProvider", {"account": "test", "name": "test", "enabled": True, "service_provider_id": sp})
+ors=CreateOrFind("OriginServer", {"url": "http://www.cs.arizona.edu", "content_provider_id": cp, "service_type": "HyperCache"})
+pre=CreateOrFind("CDNPrefix", {"service": "HyperCache", "enabled": True, "content_provider_id": cp, "cdn_prefix": "test.vicci.org", "default_origin_server": "http://www.cs.arizona.edu"})
+cp=CreateOrFind("ContentProvider", {"account": "onlab", "name": "onlab", "enabled": True, "service_provider_id": sp})
+ors=CreateOrFind("OriginServer", {"url": "http://onlab.vicci.org", "content_provider_id": cp, "service_type": "HyperCache"})
+pre=CreateOrFind("CDNPrefix", {"service": "HyperCache", "enabled": True, "content_provider_id": cp, "cdn_prefix": "onlab.vicci.org", "default_origin_server": "http://onlab.vicci.org"})
diff --git a/cord-pod/ceilometer.yaml b/cord-pod/ceilometer.yaml
new file mode 100644
index 0000000..07b163e
--- /dev/null
+++ b/cord-pod/ceilometer.yaml
@@ -0,0 +1,263 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Setup CORD-related services -- vOLT, vCPE, vBNG.
+
+imports:
+   - custom_types/xos.yaml
+
+node_types:
+    tosca.nodes.SFlowService:
+        derived_from: tosca.nodes.Root
+        description: >
+            XOS SFlow Collection Service
+        capabilities:
+            scalable:
+                type: tosca.capabilities.Scalable
+            service:
+                type: tosca.capabilities.xos.Service
+        properties:
+            kind:
+                type: string
+                default: generic
+                description: Type of service.
+            view_url:
+                type: string
+                required: false
+                description: URL to follow when icon is clicked in the Service Directory.
+            icon_url:
+                type: string
+                required: false
+                description: ICON to display in the Service Directory.
+            enabled:
+                type: boolean
+                default: true
+            published:
+                type: boolean
+                default: true
+                description: If True then display this Service in the Service Directory.
+            public_key:
+                type: string
+                required: false
+                description: Public key to install into Instances to allows Services to SSH into them.
+            private_key_fn:
+                type: string
+                required: false
+                description: Location of private key file
+            versionNumber:
+                type: string
+                required: false
+                description: Version number of Service.
+            sflow_port:
+              type: integer
+              required: false
+              default: 6343
+              description: sFlow listening port
+            sflow_api_port:
+              type: integer
+              required: false
+              default: 33333
+              description: sFlow publish subscribe api listening port
+
+    tosca.nodes.CeilometerService:
+        derived_from: tosca.nodes.Root
+        description: >
+            XOS Ceilometer Service
+        capabilities:
+            scalable:
+                type: tosca.capabilities.Scalable
+            service:
+                type: tosca.capabilities.xos.Service
+        properties:
+            kind:
+                type: string
+                default: generic
+                description: Type of service.
+            view_url:
+                type: string
+                required: false
+                description: URL to follow when icon is clicked in the Service Directory.
+            icon_url:
+                type: string
+                required: false
+                description: ICON to display in the Service Directory.
+            enabled:
+                type: boolean
+                default: true
+            published:
+                type: boolean
+                default: true
+                description: If True then display this Service in the Service Directory.
+            public_key:
+                type: string
+                required: false
+                description: Public key to install into Instances to allows Services to SSH into them.
+            private_key_fn:
+                type: string
+                required: false
+                description: Location of private key file
+            versionNumber:
+                type: string
+                required: false
+                description: Version number of Service.
+            ceilometer_pub_sub_url:
+                type: string
+                required: false
+                description: REST URL of ceilometer PUB/SUB component
+
+    tosca.nodes.CeilometerTenant:
+        derived_from: tosca.nodes.Root
+        description: >
+            CORD: A Tenant of the Ceilometer Service.
+        properties:
+            kind:
+                type: string
+                default: generic
+                description: Kind of tenant
+
+topology_template:
+  node_templates:
+    service_ceilometer:
+      type: tosca.nodes.CeilometerService
+      requirements:
+      properties:
+          view_url: /admin/ceilometer/ceilometerservice/$id$/
+          kind: ceilometer
+          ceilometer_pub_sub_url: http://10.11.10.1:4455/
+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+          private_key_fn: /opt/xos/synchronizers/monitoring_channel/monitoring_channel_private_key
+      artifacts:
+          pubkey: /opt/xos/synchronizers/monitoring_channel/monitoring_channel_public_key
+
+#    service_sflow:
+#      type: tosca.nodes.SFlowService
+#      requirements:
+#      properties:
+#          view_url: /admin/ceilometer/sflowservice/$id$/
+#          kind: sflow
+#          sflow_port: 6343
+#          sflow_api_port: 33333
+
+    Private:
+      type: tosca.nodes.NetworkTemplate
+
+    management:
+      type: tosca.nodes.network.Network.XOS
+      properties:
+          no-create: true
+          no-delete: true
+          no-update: true
+
+#    ceilometer_network:
+#      type: tosca.nodes.network.Network.XOS
+#      properties:
+#          ip_version: 4
+#          labels: ceilometer_client_access
+#      requirements:
+#          - network_template:
+#              node: Private
+#              relationship: tosca.relationships.UsesNetworkTemplate
+#          - owner:
+#              node: mysite_ceilometer
+#              relationship: tosca.relationships.MemberOfSlice
+#          - connection:
+#              node: mysite_ceilometer
+#              relationship: tosca.relationships.ConnectsToSlice
+
+    mysite:
+      type: tosca.nodes.Site
+
+    trusty-server-multi-nic:
+      type: tosca.nodes.Image
+
+    ceilometer-trusty-server-multi-nic:
+      type: tosca.nodes.Image
+
+    m1.small:
+      type: tosca.nodes.Flavor
+
+    mysite_ceilometer:
+      description: Ceilometer Proxy Slice
+      type: tosca.nodes.Slice
+      requirements:
+          - ceilometer_service:
+              node: service_ceilometer
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+          - default_image:
+                node: ceilometer-trusty-server-multi-nic
+                relationship: tosca.relationships.DefaultImage
+          - management:
+              node: management
+              relationship: tosca.relationships.ConnectsToNetwork
+          - m1.small:
+              node: m1.small
+              relationship: tosca.relationships.DefaultFlavor
+
+#    mysite_sflow:
+#      description: Slice for sFlow service
+#      type: tosca.nodes.Slice
+#      requirements:
+#          - sflow_service:
+#              node: service_sflow
+#              relationship: tosca.relationships.MemberOfService
+#          - site:
+#              node: mysite
+#              relationship: tosca.relationships.MemberOfSite
+
+    my_ceilometer_tenant:
+      description: Ceilometer Service default Tenant
+      type: tosca.nodes.CeilometerTenant
+      requirements:
+          - provider_service:
+              node: service_ceilometer
+              relationship: tosca.relationships.MemberOfService
+       
+    # Virtual machines
+#    sflow_service_instance:
+#      type: tosca.nodes.Compute
+#      capabilities:
+#        # Host container properties
+#        host:
+#         properties:
+#           num_cpus: 1
+#           disk_size: 10 GB
+#           mem_size: 4 MB
+#        # Guest Operating System properties
+#        os:
+#          properties:
+#            # host Operating System image properties
+#            architecture: x86_64
+#            type: linux
+#            distribution: Ubuntu
+#            version: 14.10
+#      requirements:
+#          - slice:
+#                node: mysite_sflow
+#                relationship: tosca.relationships.MemberOfSlice
+
+    Ceilometer:
+      type: tosca.nodes.DashboardView
+      properties:
+          url: template:xosCeilometerDashboard
+    Tenant:
+      type: tosca.nodes.DashboardView
+      properties:
+          no-create: true
+          no-update: true
+          no-delete: true
+
+    padmin@vicci.org:
+      type: tosca.nodes.User
+      properties:
+          firstname: XOS
+          lastname: admin
+          is_admin: true
+      requirements:
+          - tenant_dashboard:
+              node: Tenant
+              relationship: tosca.relationships.UsesDashboard
+          - ceilometer_dashboard:
+              node: Ceilometer
+              relationship: tosca.relationships.UsesDashboard
diff --git a/cord-pod/cleanup.sh b/cord-pod/cleanup.sh
new file mode 100755
index 0000000..704cacb
--- /dev/null
+++ b/cord-pod/cleanup.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+function cleanup_network {
+  NETWORK=$1
+  SUBNETS=`neutron net-show $NETWORK | grep -i subnets | awk '{print $4}'`
+  if [[ $SUBNETS != "" ]]; then
+      PORTS=`neutron port-list | grep -i $SUBNETS | awk '{print $2}'`
+      for PORT in $PORTS; do
+          echo "Deleting port $PORT"
+          neutron port-delete $PORT
+      done
+  fi
+  neutron net-delete $NETWORK
+}
+
+source ./admin-openrc.sh
+
+echo "Deleting VMs"
+# Delete all VMs
+VMS=$( nova list --all-tenants|grep mysite|awk '{print $2}' )
+for VM in $VMS
+do
+    nova delete $VM
+done
+
+echo "Waiting 5 seconds..."
+sleep 5
+
+cleanup_network lan_network
+cleanup_network wan_network
+cleanup_network mysite_vcpe-private
+cleanup_network mysite_vsg-access
+cleanup_network management
+
+echo "Deleting networks"
+# Delete all networks beginning with mysite_
+NETS=$( neutron net-list --all-tenants|grep mysite|awk '{print $2}' )
+for NET in $NETS
+do
+    neutron net-delete $NET
+done
+
+neutron net-delete lan_network || true
+neutron net-delete subscriber_network || true
+neutron net-delete public_network || true
+neutron net-delete hpc_client_network || true
+neutron net-delete ceilometer_network || true
+neutron net-delete management || true
+neutron net-delete mysite_vsg-access || true
+neutron net-delete exampleservice-public || true
diff --git a/cord-pod/cord-volt-devices.yaml b/cord-pod/cord-volt-devices.yaml
new file mode 100644
index 0000000..8b41623
--- /dev/null
+++ b/cord-pod/cord-volt-devices.yaml
@@ -0,0 +1,47 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Just enough Tosca to get the vSG slice running on the CORD POD
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    service#volt:
+      type: tosca.nodes.VOLTService
+      properties:
+          no-create: True
+          no-delete: True
+          no-update: True
+
+    voltdev-1:
+      type: tosca.nodes.VOLTDevice
+      properties:
+            driver: pmc-olt
+            openflow_id: of:1000000000000001
+            access_devices: >
+              2 222,
+              3 223,
+              4 224
+      requirements:
+          - volt_service:
+              node: service#volt
+              relationship: tosca.relationships.MemberOfService
+          - access_agent:
+              node: agent-1
+              relationship: tosca.relationships.UsesAgent
+
+    agent-1:
+      type: tosca.nodes.AccessAgent
+      properties:
+          mac: AA:BB:CC:DD:EE:FF
+          port_mappings: >
+            of:0000000000000002/2 DE:AD:BE:EF:BA:11,
+            of:0000000000000002/3 BE:EF:DE:AD:BE:EF
+      requirements:
+          - volt_service:
+              node: service#volt
+              relationship: tosca.relationships.MemberOfService
+
+
+
diff --git a/cord-pod/cord-vtn-vsg.yaml b/cord-pod/cord-vtn-vsg.yaml
new file mode 100644
index 0000000..4f8b9ec
--- /dev/null
+++ b/cord-pod/cord-vtn-vsg.yaml
@@ -0,0 +1,257 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Just enough Tosca to get the vSG slice running on the CORD POD
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    # CORD Services
+    service#vtr:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /admin/vtr/vtrservice/$id$/
+          kind: vTR
+          replaces: service_vtr
+
+    service#volt:
+      type: tosca.nodes.VOLTService
+      requirements:
+          - vsg_tenant:
+              node: service#vsg
+              relationship: tosca.relationships.TenantOfService
+      properties:
+          view_url: /admin/volt/voltservice/$id$/
+          kind: vOLT
+          replaces: service_volt
+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+          private_key_fn: /opt/xos/services/volt/keys/volt_rsa
+      artifacts:
+          pubkey: /opt/xos/services/volt/keys/volt_rsa.pub
+
+    addresses_vsg:
+      type: tosca.nodes.AddressPool
+      properties:
+          addresses: 10.168.0.0/24
+          gateway_ip: 10.168.0.1
+          gateway_mac: 02:42:0a:a8:00:01
+
+    addresses_exampleservice-public:
+      type: tosca.nodes.AddressPool
+      properties:
+          addresses: 10.168.1.0/24
+          gateway_ip: 10.168.1.1
+          gateway_mac: 02:42:0a:a8:00:01
+
+    service#vsg:
+      type: tosca.nodes.VSGService
+      requirements:
+          - vrouter_tenant:
+              node: service#vrouter
+              relationship: tosca.relationships.TenantOfService
+      properties:
+          view_url: /admin/vsg/vsgservice/$id$/
+          backend_network_label: hpc_client
+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+          private_key_fn: /opt/xos/services/vsg/keys/vsg_rsa
+#          node_label: label_vsg
+          replaces: service_vsg
+      artifacts:
+          pubkey: /opt/xos/services/vsg/keys/vsg_rsa.pub
+
+    service#vrouter:
+      type: tosca.nodes.VRouterService
+      properties:
+          view_url: /admin/vrouter/vrouterservice/$id$/
+          replaces: service_vrouter
+      requirements:
+          - addresses_vsg:
+              node: addresses_vsg
+              relationship: tosca.relationships.ProvidesAddresses
+          - addresses_service1:
+              node: addresses_exampleservice-public
+              relationship: tosca.relationships.ProvidesAddresses
+
+
+    service#ONOS_CORD:
+      type: tosca.nodes.ONOSService
+      properties:
+          no-delete: true
+          no-create: true
+          no-update: true
+
+    service#ONOS_Fabric:
+      type: tosca.nodes.ONOSService
+      properties:
+          no-delete: true
+          no-create: true
+          no-update: true
+
+    vOLT_ONOS_app:
+      type: tosca.nodes.ONOSvOLTApp
+      requirements:
+          - onos_tenant:
+              node: service#ONOS_CORD
+              relationship: tosca.relationships.TenantOfService
+          - volt_service:
+              node: service#volt
+              relationship: tosca.relationships.UsedByService
+      properties:
+          install_dependencies: onos-ext-notifier-1.0-SNAPSHOT.oar, onos-ext-volt-event-publisher-1.0-SNAPSHOT.oar
+          dependencies: org.onosproject.openflow-base, org.onosproject.olt, org.ciena.onos.ext_notifier, org.ciena.onos.volt_event_publisher
+          autogenerate: volt-network-cfg
+
+    vRouter_ONOS_app:
+      type: tosca.nodes.ONOSvRouterApp
+      requirements:
+          - onos_tenant:
+              node: service#ONOS_Fabric
+              relationship: tosca.relationships.TenantOfService
+          - vrouter_service:
+              node: service#vrouter
+              relationship: tosca.relationships.UsedByService
+      properties:
+          dependencies: org.onosproject.vrouter
+          autogenerate: vrouter-network-cfg
+
+    Private:
+      type: tosca.nodes.NetworkTemplate
+
+    management:
+      type: tosca.nodes.network.Network.XOS
+      properties:
+          no-create: true
+          no-delete: true
+          no-update: true
+
+    image#vsg-1.0:
+      type: tosca.nodes.Image
+
+    mysite:
+      type: tosca.nodes.Site
+
+    label_vsg:
+      type: tosca.nodes.NodeLabel
+
+    # Networks required by the CORD setup
+    mysite_vsg-access:
+      type: tosca.nodes.network.Network
+      properties:
+          ip_version: 4
+      requirements:
+          - network_template:
+              node: Private
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_vsg
+              relationship: tosca.relationships.MemberOfSlice
+          - connection:
+              node: mysite_vsg
+              relationship: tosca.relationships.ConnectsToSlice
+
+    # CORD Slices
+    mysite_vsg:
+      description: vSG Controller Slice
+      type: tosca.nodes.Slice
+      properties:
+          network: noauto
+      requirements:
+          - vsg_service:
+              node: service#vsg
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+          - management:
+              node: management
+              relationship: tosca.relationships.ConnectsToNetwork
+          - image:
+              node: image#vsg-1.0
+              relationship: tosca.relationships.DefaultImage
+
+    # Let's add a user who can be administrator of the household
+    johndoe@myhouse.com:
+      type: tosca.nodes.User
+      properties:
+          password: letmein
+          firstname: john
+          lastname: doe
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+          - dependency:
+                node: mysite_vsg
+                relationship: tosca.relationships.DependsOn
+
+    # A subscriber
+    My House:
+       type: tosca.nodes.CORDSubscriber
+       properties:
+           service_specific_id: 123
+           firewall_enable: false
+           cdn_enable: false
+           url_filter_enable: false
+           url_filter_level: R
+       requirements:
+          - house_admin:
+              node: johndoe@myhouse.com
+              relationship: tosca.relationships.AdminPrivilege
+
+    Mom's PC:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 01:02:03:04:05:06
+           level: PG_13
+       requirements:
+           - household:
+               node: My House
+               relationship: tosca.relationships.SubscriberDevice
+
+    Dad's PC:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 90:E2:BA:82:F9:75
+           level: PG_13
+       requirements:
+           - household:
+               node: My House
+               relationship: tosca.relationships.SubscriberDevice
+
+    Jack's Laptop:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 68:5B:35:9D:91:D5
+           level: PG_13
+       requirements:
+           - household:
+               node: My House
+               relationship: tosca.relationships.SubscriberDevice
+
+    Jill's Laptop:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 34:36:3B:C9:B6:A6
+           level: PG_13
+       requirements:
+           - household:
+               node: My House
+               relationship: tosca.relationships.SubscriberDevice
+
+    My Volt:
+        type: tosca.nodes.VOLTTenant
+        properties:
+            service_specific_id: 123
+            s_tag: 222
+            c_tag: 111
+        requirements:
+            - provider_service:
+                node: service#volt
+                relationship: tosca.relationships.MemberOfService
+            - subscriber:
+                node: My House
+                relationship: tosca.relationships.BelongsToSubscriber
+            - dependency:
+                node: mysite_vsg
+                relationship: tosca.relationships.DependsOn
diff --git a/cord-pod/docker-compose-bootstrap.yml b/cord-pod/docker-compose-bootstrap.yml
new file mode 100644
index 0000000..78a014e
--- /dev/null
+++ b/cord-pod/docker-compose-bootstrap.yml
@@ -0,0 +1,61 @@
+xos_db:
+    image: xosproject/xos-postgres
+    expose:
+        - "5432"
+
+xos_synchronizer_onboarding:
+    image: xosproject/xos-synchronizer-onboarding
+    command: bash -c "cd /opt/xos/synchronizers/onboarding; ./run.sh"
+    #command: sleep 86400
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: onboarding
+    links:
+        - xos_db
+    volumes:
+        - /var/run/docker.sock:/var/run/docker.sock
+        - ./key_import:/opt/xos/key_import:ro
+        - ./onboarding-docker-compose:/opt/xos/synchronizers/onboarding/docker-compose
+        - ../../xos_services:/opt/xos_services
+    log_driver: "json-file"
+    log_opt:
+            max-size: "100k"
+            max-file: "5"
+
+xos_synchronizer_openstack:
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/openstack/xos-synchronizer.py"
+    image: xosproject/xos-synchronizer-openstack
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: openstack
+    links:
+        - xos_db
+    volumes:
+        - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config:ro
+        - ./xos_cord_config:/opt/xos/xos_configuration/xos_cord_config:ro
+        - .:/root/setup:ro
+        - ../vtn/files/xos_vtn_config:/opt/xos/xos_configuration/xos_vtn_config:ro
+        - ./images:/opt/xos/images:ro
+    log_driver: "json-file"
+    log_opt:
+            max-size: "100k"
+            max-file: "5"
+
+xos_bootstrap_ui:
+    command: python /opt/xos/manage.py runserver 0.0.0.0:81 --insecure --makemigrations
+    environment:
+        - CONFIG_DIR
+    image: xosproject/xos
+    links:
+        - xos_db
+    ports:
+        - "81:81"
+    volumes:
+        - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config:ro
+        - ./xos_cord_config:/opt/xos/xos_configuration/xos_cord_config:ro
+        - ../vtn/files/xos_vtn_config:/opt/xos/xos_configuration/xos_vtn_config:ro
+        - ../../xos_services:/opt/xos_services
+    log_driver: "json-file"
+    log_opt:
+            max-size: "100k"
+            max-file: "5"
diff --git a/cord-pod/files/exampleservice_config b/cord-pod/files/exampleservice_config
new file mode 100644
index 0000000..823e31d
--- /dev/null
+++ b/cord-pod/files/exampleservice_config
@@ -0,0 +1,29 @@
+# Required by XOS
+[db]
+name=xos
+user=postgres
+password=password
+host=localhost
+port=5432
+
+# Required by XOS
+[api]
+nova_enabled=True
+
+# Sets options for the synchronizer
+[observer]
+name=exampleservice
+dependency_graph=/opt/xos/synchronizers/exampleservice/model-deps
+steps_dir=/opt/xos/synchronizers/exampleservice/steps
+sys_dir=/opt/xos/synchronizers/exampleservice/sys
+logfile=/var/log/xos_backend.log
+pretend=False
+backoff_disabled=True
+save_ansible_output=True
+proxy_ssh=True
+proxy_ssh_key=/root/setup/node_key
+proxy_ssh_user=root
+
+[networking]
+use_vtn=True
+
diff --git a/cord-pod/files/monitoring_channel_synchronizer_config b/cord-pod/files/monitoring_channel_synchronizer_config
new file mode 100644
index 0000000..fb3f22a
--- /dev/null
+++ b/cord-pod/files/monitoring_channel_synchronizer_config
@@ -0,0 +1,43 @@
+
+[plc]
+name=plc
+deployment=VICCI
+
+[db]
+name=xos
+user=postgres
+password=password
+host=localhost
+port=5432
+
+[api]
+host=128.112.171.237
+port=8000
+ssl_key=None
+ssl_cert=None
+ca_ssl_cert=None
+ratelimit_enabled=0
+omf_enabled=0
+mail_support_address=support@localhost
+nova_enabled=True
+
+[observer]
+name=monitoring_channel
+dependency_graph=/opt/xos/synchronizers/monitoring_channel/model-deps
+steps_dir=/opt/xos/synchronizers/monitoring_channel/steps
+sys_dir=/opt/xos/synchronizers/monitoring_channel/sys
+deleters_dir=/opt/xos/synchronizers/monitoring_channel/deleters
+log_file=console
+driver=None
+pretend=False
+backoff_disabled=True
+save_ansible_output=True
+full_setup=True
+# For CORD_POD config, set proxy_ssh to True even on cloudlab
+proxy_ssh=True
+proxy_ssh_key=/root/setup/node_key
+proxy_ssh_user=root
+
+[feefie]
+client_id='vicci_dev_central'
+user_id='pl'
diff --git a/cord-pod/files/vcpe_synchronizer_config b/cord-pod/files/vcpe_synchronizer_config
new file mode 100644
index 0000000..9da6ede
--- /dev/null
+++ b/cord-pod/files/vcpe_synchronizer_config
@@ -0,0 +1,47 @@
+
+[plc]
+name=plc
+deployment=VICCI
+
+[db]
+name=xos
+user=postgres
+password=password
+host=localhost
+port=5432
+
+[api]
+host=128.112.171.237
+port=8000
+ssl_key=None
+ssl_cert=None
+ca_ssl_cert=None
+ratelimit_enabled=0
+omf_enabled=0
+mail_support_address=support@localhost
+nova_enabled=True
+
+[observer]
+name=vcpe
+dependency_graph=/opt/xos/synchronizers/vsg/model-deps
+steps_dir=/opt/xos/synchronizers/vsg/steps
+sys_dir=/opt/xos/synchronizers/vsg/sys
+deleters_dir=/opt/xos/synchronizers/vsg/deleters
+log_file=console
+#/var/log/hpc.log
+driver=None
+pretend=False
+backoff_disabled=True
+save_ansible_output=True
+# set proxy_ssh to false on cloudlab
+full_setup=True
+proxy_ssh=True
+proxy_ssh_key=/root/setup/node_key
+proxy_ssh_user=root
+
+[networking]
+use_vtn=True
+
+[feefie]
+client_id='vicci_dev_central'
+user_id='pl'
diff --git a/cord-pod/files/vtr_synchronizer_config b/cord-pod/files/vtr_synchronizer_config
new file mode 100644
index 0000000..223ab00
--- /dev/null
+++ b/cord-pod/files/vtr_synchronizer_config
@@ -0,0 +1,47 @@
+
+[plc]
+name=plc
+deployment=VICCI
+
+[db]
+name=xos
+user=postgres
+password=password
+host=localhost
+port=5432
+
+[api]
+host=128.112.171.237
+port=8000
+ssl_key=None
+ssl_cert=None
+ca_ssl_cert=None
+ratelimit_enabled=0
+omf_enabled=0
+mail_support_address=support@localhost
+nova_enabled=True
+
+[observer]
+name=vtr
+dependency_graph=/opt/xos/synchronizers/vtr/model-deps
+steps_dir=/opt/xos/synchronizers/vtr/steps
+sys_dir=/opt/xos/synchronizers/vtr/sys
+deleters_dir=/opt/xos/synchronizers/vtr/deleters
+log_file=console
+#/var/log/hpc.log
+driver=None
+pretend=False
+backoff_disabled=True
+save_ansible_output=True
+# set proxy_ssh to false on cloudlab
+full_setup=True
+proxy_ssh=True
+proxy_ssh_key=/root/setup/node_key
+proxy_ssh_user=root
+
+[networking]
+use_vtn=True
+
+[feefie]
+client_id='vicci_dev_central'
+user_id='pl'
diff --git a/cord-pod/files/xos_vtn_config b/cord-pod/files/xos_vtn_config
new file mode 100644
index 0000000..5dfd459
--- /dev/null
+++ b/cord-pod/files/xos_vtn_config
@@ -0,0 +1,2 @@
+[networking]
+use_vtn=True
diff --git a/cord-pod/images/.gitignore b/cord-pod/images/.gitignore
new file mode 100644
index 0000000..6949d1f
--- /dev/null
+++ b/cord-pod/images/.gitignore
@@ -0,0 +1,3 @@
+*.img
+*.qcow2
+*.qcow
diff --git a/cord-pod/images/README.md b/cord-pod/images/README.md
new file mode 100644
index 0000000..aca55a9
--- /dev/null
+++ b/cord-pod/images/README.md
@@ -0,0 +1,5 @@
+# VM images for XOS
+
+Any Cloud image files placed in this directory (with suffix .img) will be automatically
+imported by XOS and added to Glance (OpenStack's image repository).  For instance, the image
+`trusty-server-multi-nic.img` will be imported with name `trusty-server-multi-nic`.
diff --git a/cord-pod/make-fabric-yaml.sh b/cord-pod/make-fabric-yaml.sh
new file mode 100644
index 0000000..a829690
--- /dev/null
+++ b/cord-pod/make-fabric-yaml.sh
@@ -0,0 +1,71 @@
+FN=$SETUPDIR/fabric.yaml
+
+rm -f $FN
+
+cat >> $FN <<EOF
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+imports:
+   - custom_types/xos.yaml
+
+description: generate fabric configuration
+
+topology_template:
+  node_templates:
+
+    service#ONOS_Fabric:
+      type: tosca.nodes.ONOSService
+      requirements:
+      properties:
+          kind: onos
+          view_url: /admin/onos/onosservice/\$id$/
+          no_container: true
+          rest_hostname: onos-fabric
+          replaces: service_ONOS_Fabric
+
+    service#fabric:
+      type: tosca.nodes.FabricService
+      properties:
+          view_url: /admin/fabric/fabricservice/\$id\$/
+          replaces: service_fabric
+
+
+EOF
+
+NODES=$( bash -c "source $SETUPDIR/admin-openrc.sh ; nova host-list" |grep compute|awk '{print $2}' )
+I=0
+for NODE in $NODES; do
+    echo $NODE
+    cat >> $FN <<EOF
+    $NODE:
+      type: tosca.nodes.Node
+
+    # Fabric location field for node $NODE
+    ${NODE}_location_tag:
+      type: tosca.nodes.Tag
+      properties:
+          name: location
+          value: of:0000000000000001/1
+      requirements:
+          - target:
+              node: $NODE
+              relationship: tosca.relationships.TagsObject
+          - service:
+              node: service#ONOS_Fabric
+              relationship: tosca.relationships.MemberOfService
+EOF
+done
+
+cat >> $FN <<EOF
+    Fabric_ONOS_app:
+      type: tosca.nodes.ONOSApp
+      requirements:
+          - onos_tenant:
+              node: service#ONOS_Fabric
+              relationship: tosca.relationships.TenantOfService
+          - fabric_service:
+              node: service#fabric
+              relationship: tosca.relationships.UsedByService
+      properties:
+          dependencies: org.onosproject.lldpprovider, org.onosproject.hostprovider, org.onosproject.openflow-base, org.onosproject.openflow, org.onosproject.drivers, org.onosproject.segmentrouting
+EOF
diff --git a/cord-pod/make-virtualbng-json.sh b/cord-pod/make-virtualbng-json.sh
new file mode 100644
index 0000000..993643c
--- /dev/null
+++ b/cord-pod/make-virtualbng-json.sh
@@ -0,0 +1,38 @@
+FN=$SETUPDIR/virtualbng.json
+
+rm -f $FN
+
+cat >> $FN <<EOF
+{
+    "localPublicIpPrefixes" : [
+        "10.254.0.128/25"
+    ],
+    "nextHopIpAddress" : "10.254.0.1",
+    "publicFacingMac" : "00:00:00:00:00:66",
+    "xosIpAddress" : "10.11.10.1",
+    "xosRestPort" : "9999",
+    "hosts" : {
+EOF
+
+NODES=$( sudo bash -c "source $SETUPDIR/admin-openrc.sh ; nova hypervisor-list" |grep -v ID|grep -v +|awk '{print $4}' )
+
+NODECOUNT=0
+for NODE in $NODES; do
+    ((NODECOUNT++))
+done
+
+I=0
+for NODE in $NODES; do
+    echo $NODE
+    ((I++))
+    if [[ "$I" -lt "$NODECOUNT" ]]; then
+        echo "      \"$NODE\" : \"of:0000000000000001/1\"," >> $FN
+    else
+        echo "      \"$NODE\" : \"of:0000000000000001/1\"" >> $FN
+    fi
+done
+
+cat >> $FN <<EOF
+    }
+}
+EOF
diff --git a/cord-pod/make-vtn-external-yaml.sh b/cord-pod/make-vtn-external-yaml.sh
new file mode 100644
index 0000000..71437d5
--- /dev/null
+++ b/cord-pod/make-vtn-external-yaml.sh
@@ -0,0 +1,110 @@
+FN=$SETUPDIR/vtn-external.yaml
+
+rm -f $FN
+
+cat >> $FN <<EOF
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+imports:
+   - custom_types/xos.yaml
+
+description: autogenerated node tags file for VTN configuration
+
+topology_template:
+  node_templates:
+
+    service#ONOS_CORD:
+      type: tosca.nodes.ONOSService
+      requirements:
+      properties:
+          kind: onos
+          view_url: /admin/onos/onosservice/\$id$/
+          no_container: true
+          rest_hostname: onos-cord
+          replaces: service_ONOS_CORD
+
+    service#vtn:
+      type: tosca.nodes.VTNService
+      properties:
+          view_url: /admin/vtn/vtnservice/\$id$/
+          privateGatewayMac: 00:00:00:00:00:01
+          localManagementIp: 172.27.0.1/24
+          ovsdbPort: 6641
+          sshUser: root
+          sshKeyFile: /root/node_key
+          sshPort: 22
+          xosEndpoint: http://xos/
+          xosUser: padmin@vicci.org
+          xosPassword: letmein
+          replaces: service_vtn
+
+EOF
+
+NODES=$( bash -c "source $SETUPDIR/admin-openrc.sh ; nova host-list" |grep compute|awk '{print $2}' )
+I=0
+for NODE in $NODES; do
+    echo $NODE
+    cat >> $FN <<EOF
+    $NODE:
+      type: tosca.nodes.Node
+
+    # VTN bridgeId field for node $NODE
+    ${NODE}_bridgeId_tag:
+      type: tosca.nodes.Tag
+      properties:
+          name: bridgeId
+          value: of:0000000000000001
+      requirements:
+          - target:
+              node: $NODE
+              relationship: tosca.relationships.TagsObject
+          - service:
+              node: service#ONOS_CORD
+              relationship: tosca.relationships.MemberOfService
+
+    # VTN dataPlaneIntf field for node $NODE
+    ${NODE}_dataPlaneIntf_tag:
+      type: tosca.nodes.Tag
+      properties:
+          name: dataPlaneIntf
+          value: fabric
+      requirements:
+          - target:
+              node: $NODE
+              relationship: tosca.relationships.TagsObject
+          - service:
+              node: service#ONOS_CORD
+              relationship: tosca.relationships.MemberOfService
+
+    # VTN dataPlaneIp field for node $NODE
+    ${NODE}_dataPlaneIp_tag:
+      type: tosca.nodes.Tag
+      properties:
+          name: dataPlaneIp
+          value: 10.168.0.253/24
+      requirements:
+          - target:
+              node: $NODE
+              relationship: tosca.relationships.TagsObject
+          - service:
+              node: service#ONOS_CORD
+              relationship: tosca.relationships.MemberOfService
+
+EOF
+done
+
+cat >> $FN <<EOF
+    VTN_ONOS_app:
+      type: tosca.nodes.ONOSVTNApp
+      requirements:
+          - onos_tenant:
+              node: service#ONOS_CORD
+              relationship: tosca.relationships.TenantOfService
+          - vtn_service:
+              node: service#vtn
+              relationship: tosca.relationships.UsedByService
+      properties:
+          install_dependencies: http://new-host:8080/repository/org/opencord/cord-config/1.0-SNAPSHOT/cord-config-1.0-SNAPSHOT.oar,http://new-host:8080/repository/org/opencord/vtn/1.0-SNAPSHOT/vtn-1.0-SNAPSHOT.oar
+          dependencies: org.onosproject.drivers, org.onosproject.drivers.ovsdb, org.onosproject.openflow-base, org.onosproject.ovsdb-base, org.onosproject.dhcp
+          autogenerate: vtn-network-cfg
+EOF
diff --git a/cord-pod/mgmt-net.yaml b/cord-pod/mgmt-net.yaml
new file mode 100644
index 0000000..2bd0173
--- /dev/null
+++ b/cord-pod/mgmt-net.yaml
@@ -0,0 +1,40 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Set up management network for CORD POD
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+
+    management_template:
+      type: tosca.nodes.NetworkTemplate
+      properties:
+          visibility: private
+          translation: none
+
+    management:
+      type: tosca.nodes.network.Network
+      properties:
+          ip_version: 4
+          cidr: 172.27.0.0/24
+      requirements:
+          - network_template:
+              node: management_template
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_management
+              relationship: tosca.relationships.MemberOfSlice
+
+    mysite:
+      type: tosca.nodes.Site
+
+    mysite_management:
+      description: This slice exists solely to own the management network
+      type: tosca.nodes.Slice
+      properties:
+          network: noauto
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
diff --git a/cord-pod/pod-cdn.yaml b/cord-pod/pod-cdn.yaml
new file mode 100644
index 0000000..2229686
--- /dev/null
+++ b/cord-pod/pod-cdn.yaml
@@ -0,0 +1,52 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Setup the CDN on the pod
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+
+    Private:
+      type: tosca.nodes.NetworkTemplate
+
+    management:
+      type: tosca.nodes.network.Network.XOS
+      properties:
+          no-create: true
+          no-delete: true
+          no-update: true
+
+    cdn-public:
+      type: tosca.nodes.network.Network
+      properties:
+          ip_version: 4
+          cidr: 207.141.192.128/28
+      requirements:
+          - network_template:
+              node: Private
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_cdn
+              relationship: tosca.relationships.MemberOfSlice
+          - connection:
+              node: mysite_cdn
+              relationship: tosca.relationships.ConnectsToSlice
+
+    mysite:
+      type: tosca.nodes.Site
+
+    mysite_cdn:
+      description: This slice holds the CDN
+      type: tosca.nodes.Slice
+      properties:
+          network: noauto
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+          - management:
+              node: management
+              relationship: tosca.relationships.ConnectsToNetwork
+
diff --git a/cord-pod/pod-exampleservice.yaml b/cord-pod/pod-exampleservice.yaml
new file mode 100644
index 0000000..0182a59
--- /dev/null
+++ b/cord-pod/pod-exampleservice.yaml
@@ -0,0 +1,94 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Setup the ExampleService on the pod
+
+imports:
+   - custom_types/xos.yaml
+   - custom_types/exampleservice.yaml
+
+topology_template:
+  node_templates:
+
+    Private:
+      type: tosca.nodes.NetworkTemplate
+
+    management:
+      type: tosca.nodes.network.Network.XOS
+      properties:
+          no-create: true
+          no-delete: true
+          no-update: true
+
+    service#vrouter:
+      type: tosca.nodes.Service
+      properties:
+          no-create: true
+          no-delete: true
+          no-update: true
+
+    exampleservice-public:
+      type: tosca.nodes.network.Network
+      properties:
+          ip_version: 4
+      requirements:
+          - network_template:
+              node: Private
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_exampleservice
+              relationship: tosca.relationships.MemberOfSlice
+          - connection:
+              node: mysite_exampleservice
+              relationship: tosca.relationships.ConnectsToSlice
+          - vrouter_tenant:
+              node: service#vrouter
+              relationship: tosca.relationships.TenantOfService
+
+    mysite:
+      type: tosca.nodes.Site
+
+    trusty-server-multi-nic:
+      type: tosca.nodes.Image
+
+    mysite_exampleservice:
+      description: This slice holds the ExampleService
+      type: tosca.nodes.Slice
+      properties:
+          network: noauto
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+          - management:
+              node: management
+              relationship: tosca.relationships.ConnectsToNetwork
+          - exmapleserver:
+              node: service#exampleservice
+              relationship: tosca.relationships.MemberOfService
+          - image:
+              node: trusty-server-multi-nic
+              relationship: tosca.relationships.DefaultImage
+
+    service#exampleservice:
+      type: tosca.nodes.ExampleService
+      requirements:
+          - management:
+              node: management
+              relationship: tosca.relationships.UsesNetwork
+      properties:
+          view_url: /admin/exampleservice/exampleservice/$id$/
+          kind: exampleservice
+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+          private_key_fn: /opt/xos/services/exampleservice/keys/exampleservice_rsa
+          service_message: hello
+      artifacts:
+          pubkey: /opt/xos/services/exampleservice/keys/exampleservice_rsa.pub
+
+    tenant#exampletenant1:
+        type: tosca.nodes.ExampleTenant
+        properties:
+            tenant_message: world
+        requirements:
+          - tenant:
+              node: service#exampleservice
+              relationship: tosca.relationships.TenantOfService
diff --git a/cord-pod/setup.yaml b/cord-pod/setup.yaml
new file mode 100644
index 0000000..c13f0eb
--- /dev/null
+++ b/cord-pod/setup.yaml
@@ -0,0 +1,61 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >
+    * Adds OpenCloud Sites, Deployments, and Controllers.
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+
+    MyDeployment:
+      type: tosca.nodes.Deployment
+      properties:
+          flavors: m1.large, m1.medium, m1.small
+
+    MyOpenStack:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: MyDeployment
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Kilo
+          auth_url: { get_script_env: [ SELF, adminrc, OS_AUTH_URL, LOCAL_FILE] }
+          admin_user: { get_script_env: [ SELF, adminrc, OS_USERNAME, LOCAL_FILE] }
+          admin_password: { get_script_env: [ SELF, adminrc, OS_PASSWORD, LOCAL_FILE] }
+          admin_tenant: { get_script_env: [ SELF, adminrc, OS_TENANT_NAME, LOCAL_FILE] }
+          domain: Default
+      artifacts:
+          adminrc: /root/setup/admin-openrc.sh
+
+    mysite:
+      type: tosca.nodes.Site
+      properties:
+          display_name: MySite
+          site_url: http://xosproject.org/
+      requirements:
+          - deployment:
+               node: MyDeployment
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: MyOpenStack
+                       relationship: tosca.relationships.UsesController
+
+    # This user already exists in XOS with this password
+    # It's an example of how to create new users
+    padmin@vicci.org:
+      type: tosca.nodes.User
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+      properties:
+          is_admin: true
+          is_active: true
+          firstname: XOS
+          lastname: admin
+          password: letmein
diff --git a/cord-pod/synchronizers.yaml b/cord-pod/synchronizers.yaml
new file mode 100644
index 0000000..02035e3
--- /dev/null
+++ b/cord-pod/synchronizers.yaml
@@ -0,0 +1,19 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: This recipe provides additional configuration for the onboarded services.
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    servicecontroller#vsg:
+      type: tosca.nodes.ServiceController
+      properties:
+        no-create: true
+        synchronizer_config: /root/setup/files/vcpe_synchronizer_config
+    servicecontroller#vtr:
+      type: tosca.nodes.ServiceController
+      properties:
+        no-create: true
+        synchronizer_config: /root/setup/files/vtr_synchronizer_config
diff --git a/cord-pod/xos.yaml b/cord-pod/xos.yaml
new file mode 100644
index 0000000..33374c0
--- /dev/null
+++ b/cord-pod/xos.yaml
@@ -0,0 +1,86 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Onboard the exampleservice
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    xos:
+      type: tosca.nodes.XOS
+      properties:
+        ui_port: 80
+        bootstrap_ui_port: 81
+        docker_project_name: cordpod
+        db_container_name: cordpodbs_xos_db_1
+
+    /opt/xos/xos_configuration/xos_common_config:
+      type: tosca.nodes.XOSVolume
+      properties:
+          host_path: { path_join: [ SELF, CONFIG_DIR, ../common/xos_common_config, ENV_VAR ] }
+          read_only: true
+      requirements:
+          - xos:
+             node: xos
+             relationship: tosca.relationships.UsedByXOS
+
+    /opt/xos/xos_configuration/xos_cord_config:
+      type: tosca.nodes.XOSVolume
+      properties:
+          host_path: { path_join: [ SELF, CONFIG_DIR, xos_cord_config, ENV_VAR ] }
+          read_only: true
+      requirements:
+          - xos:
+             node: xos
+             relationship: tosca.relationships.UsedByXOS
+
+    /opt/xos/xos_configuration/xos_vtn_config:
+      type: tosca.nodes.XOSVolume
+      properties:
+          host_path: { path_join: [ SELF, CONFIG_DIR, files/xos_vtn_config, ENV_VAR ] }
+          read_only: true
+      requirements:
+          - xos:
+              node: xos
+              relationship: tosca.relationships.UsedByXOS
+
+    /root/setup:
+      type: tosca.nodes.XOSVolume
+      properties:
+          host_path: { path_join: [ SELF, CONFIG_DIR, ., ENV_VAR ] }
+          read_only: true
+      requirements:
+          - xos:
+             node: xos
+             relationship: tosca.relationships.UsedByXOS
+
+#    /opt/xos/synchronizers/onos/onos_key.pub:
+#      type: tosca.nodes.XOSVolume
+#      properties:
+#          host_path: { path_join: [ SELF, CONFIG_DIR, id_rsa.pub, ENV_VAR ] }
+#          read_only: true
+#      requirements:
+#          - xos:
+#             node: xos
+#             relationship: tosca.relationships.UsedByXOS
+
+#    /opt/xos/synchronizers/vcpe/vcpe_public_key:
+#      type: tosca.nodes.XOSVolume
+#      properties:
+#          host_path: { path_join: [ SELF, CONFIG_DIR, id_rsa.pub, ENV_VAR ] }
+#          read_only: true
+#      requirements:
+#          - xos:
+#             node: xos
+#             relationship: tosca.relationships.UsedByXOS
+
+    /opt/xos/synchronizers/monitoring_channel/monitoring_channel_public_key:
+      type: tosca.nodes.XOSVolume
+      properties:
+          host_path: { path_join: [ SELF, CONFIG_DIR, id_rsa.pub, ENV_VAR ] }                                                      
+          read_only: true
+      requirements:
+          - xos:
+             node: xos
+             relationship: tosca.relationships.UsedByXOS
diff --git a/cord-pod/xos_cord_config b/cord-pod/xos_cord_config
new file mode 100644
index 0000000..a5448f7
--- /dev/null
+++ b/cord-pod/xos_cord_config
@@ -0,0 +1,6 @@
+[gui]
+branding_name=CORD
+#branding_css=/static/cord.css
+branding_icon=/static/cord-logo.png
+branding_favicon=/static/cord-favicon.png
+branding_bg=/static/cord-bg.jpg