move over configurations from xos repo

Change-Id: I66583bdaea582986d8f16a29066a79c6687b97fb
diff --git a/acord/Makefile b/acord/Makefile
new file mode 100644
index 0000000..e9b234c
--- /dev/null
+++ b/acord/Makefile
@@ -0,0 +1,94 @@
+SETUPDIR:=../setup
+MYIP:=$(shell hostname -i)
+
+cloudlab: common_cloudlab cloudlab_ceilometer_custom_images ceilometer_cloudlab_cord_plugins acord
+
+devstack: upgrade_pkgs common_devstack devstack_net_fix devstack_images ceilometer_devstack_cord_plugins acord
+
+cord: 
+	sudo MYIP=$(MYIP) docker-compose up -d
+	bash ../common/wait_for_xos.sh
+	sudo MYIP=$(MYIP) docker-compose run xos python /opt/xos/tosca/run.py none /opt/xos/configurations/common/fixtures.yaml
+	sudo docker-compose run xos python /opt/xos/tosca/run.py none /opt/xos/configurations/common/mydeployment.yaml
+	sudo MYIP=$(MYIP) docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/cloudlab-openstack.yaml
+	sudo MYIP=$(MYIP) docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /root/setup/nodes.yaml
+
+acord: cord exampleservice
+	sudo MYIP=$(MYIP) docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/acord/ceilometer.yaml
+
+exampleservice:
+	#Ensure exampleservice is enabled in xos/tools/xos-manage and xos/settings.py file before uncommenting below lines
+	#sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/acord/acord-exampleservice.yaml
+
+containers:
+	cd ../../../containers/xos; make devel
+	cd ../../../containers/synchronizer; make
+
+common_cloudlab:
+	make -C ../common -f Makefile.cloudlab
+
+common_devstack:
+	make -C ../common -f Makefile.devstack
+
+ceilometer_cloudlab_cord_plugins:
+	if [ -d ./ceilometer-plugins ]; then rm -fr ./ceilometer-plugins; fi
+	git clone https://github.com/srikanthvavila/ceilometer-plugins.git
+	sudo cp -r ceilometer-plugins/network/ext_services /usr/lib/python2.7/dist-packages/ceilometer/network/
+	sudo cp -r ceilometer-plugins/network/statistics/onos /usr/lib/python2.7/dist-packages/ceilometer/network/statistics/
+	sudo cp ceilometer-plugins/network/statistics/__init__.py /usr/lib/python2.7/dist-packages/ceilometer/network/statistics/ 
+	sudo cp ceilometer-plugins/entry_points.txt /usr/lib/python2.7/dist-packages/ceilometer-*egg-info/
+	sudo cp ceilometer-plugins/pipeline.yaml /etc/ceilometer/
+	echo "Restarting ceilometer-agent-notification"
+	sudo service ceilometer-agent-notification restart
+	echo "Restarting ceilometer-agent-central"
+	sudo service ceilometer-agent-central restart
+
+ceilometer_devstack_cord_plugins:
+	if [ -d ./ceilometer-plugins ]; then rm -fr ./ceilometer-plugins; fi
+	git clone https://github.com/srikanthvavila/ceilometer-plugins.git
+	sudo cp -r ceilometer-plugins/network/ext_services /opt/stack/ceilometer/ceilometer/network/
+	sudo cp ceilometer-plugins/entry_points.txt /opt/stack/ceilometer/ceilometer*egg-info/
+	sudo cp ceilometer-plugins/pipeline.yaml /etc/ceilometer/
+	echo "Remember to restart ceilometer-anotification ceilometer-acentral screens"
+
+ceilometer_pub_sub:
+	if [ -d ./pub-sub ]; then rm -fr ./pub-sub; fi
+	git clone https://github.com/srikanthvavila/pub-sub.git
+	echo "Starting Ceilometer PUB/SUB service...Ensure zookeeper and kafka services are launched (if required)"
+	cd pub-sub/ceilometer_pub_sub/ ; python sub_main.py & 
+	cd ../..
+
+stop:
+	sudo MYIP=$(MYIP) docker-compose stop
+
+rm: stop
+	sudo MYIP=$(MYIP) docker-compose rm
+
+showlogs:
+	sudo MYIP=$(MYIP) docker-compose logs
+
+ps:
+	sudo MYIP=$(MYIP) docker-compose ps
+
+cleanup: stop rm
+	./cleanup.sh
+	bash -c "source ../setup/admin-openrc.sh; nova list --all-tenants; neutron net-list"
+
+devstack_net_fix:
+	sudo ../common/devstack/net-fix.sh
+	sudo bash -c "source ../setup/admin-openrc.sh; neutron subnet-update private-subnet --dns-nameservers list=true 8.8.8.8 8.8.4.4"
+
+upgrade_pkgs:
+	sudo pip install httpie --upgrade
+
+rebuild_xos:
+	make -C ../../../containers/xos devel
+
+rebuild_synchronizer:
+	make -C ../../../containers/synchronizer
+
+devstack_images:
+	bash -c "source ../setup/admin-openrc.sh; glance image-show ceilometer-trusty-server-multi-nic || ! mkdir -p /opt/stack/images || ! wget http://www.vicci.org/cord/ceilometer-trusty-server-multi-nic.compressed.qcow2 -P /opt/stack/images || glance image-create --name ceilometer-trusty-server-multi-nic --disk-format qcow2 --file /opt/stack/images/ceilometer-trusty-server-multi-nic.compressed.qcow2 --container-format bare"
+
+cloudlab_ceilometer_custom_images:
+	bash -c "source ../setup/admin-openrc.sh; glance image-show ceilometer-trusty-server-multi-nic || if test -f /proj/xos-PG0/images/ceilometer-trusty-server-multi-nic.compressed.qcow2 ; then glance image-create --name ceilometer-trusty-server-multi-nic --disk-format qcow2 --file /proj/xos-PG0/images/ceilometer-trusty-server-multi-nic.compressed.qcow2 --container-format bare ; else mkdir -p /tmp/images && wget http://www.vicci.org/cord/ceilometer-trusty-server-multi-nic.compressed.qcow2 -P /tmp/images && glance image-create --name ceilometer-trusty-server-multi-nic --disk-format qcow2 --file /tmp/images/ceilometer-trusty-server-multi-nic.compressed.qcow2 --container-format bare ; fi "
diff --git a/acord/acord-exampleservice.yaml b/acord/acord-exampleservice.yaml
new file mode 100644
index 0000000..b6b23dd
--- /dev/null
+++ b/acord/acord-exampleservice.yaml
@@ -0,0 +1,56 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Setup the ExampleService on the ACORD setup
+
+imports:
+   - custom_types/xos.yaml
+   - custom_types/exampleservice.yaml
+
+topology_template:
+  node_templates:
+
+    mysite:
+      type: tosca.nodes.Site
+
+    trusty-server-multi-nic:
+      type: tosca.nodes.Image
+
+    m1.small:
+      type: tosca.nodes.Flavor
+
+    mysite_exampleservice:
+      description: This slice holds the ExampleService
+      type: tosca.nodes.Slice
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+          - exmapleservice:
+              node: service_example
+              relationship: tosca.relationships.MemberOfService
+          - default_image:
+                node: trusty-server-multi-nic
+                relationship: tosca.relationships.DefaultImage
+          - m1.small:
+                node: m1.small
+                relationship: tosca.relationships.DefaultFlavor
+
+    service_example:
+      type: tosca.nodes.ExampleService
+      properties:
+          view_url: /admin/exampleservice/exampleservice/$id$/
+          kind: exampleservice
+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+          private_key_fn: /opt/xos/synchronizers/exampleservice/exampleservice_private_key
+          service_message: hello
+      artifacts:
+          pubkey: /opt/xos/synchronizers/exampleservice/exampleservice_public_key
+
+    exampletenant1:
+        type: tosca.nodes.ExampleTenant
+        properties:
+            tenant_message: world
+        requirements:
+          - tenant:
+              node: service_example
+              relationship: tosca.relationships.TenantOfService
diff --git a/acord/ceilometer.yaml b/acord/ceilometer.yaml
new file mode 100644
index 0000000..089837d
--- /dev/null
+++ b/acord/ceilometer.yaml
@@ -0,0 +1,262 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Setup CORD-related services -- vOLT, vCPE, vBNG.
+
+imports:
+   - custom_types/xos.yaml
+
+node_types:
+    tosca.nodes.SFlowService:
+        derived_from: tosca.nodes.Root
+        description: >
+            XOS SFlow Collection Service
+        capabilities:
+            scalable:
+                type: tosca.capabilities.Scalable
+            service:
+                type: tosca.capabilities.xos.Service
+        properties:
+            kind:
+                type: string
+                default: generic
+                description: Type of service.
+            view_url:
+                type: string
+                required: false
+                description: URL to follow when icon is clicked in the Service Directory.
+            icon_url:
+                type: string
+                required: false
+                description: ICON to display in the Service Directory.
+            enabled:
+                type: boolean
+                default: true
+            published:
+                type: boolean
+                default: true
+                description: If True then display this Service in the Service Directory.
+            public_key:
+                type: string
+                required: false
+                description: Public key to install into Instances to allows Services to SSH into them.
+            private_key_fn:
+                type: string
+                required: false
+                description: Location of private key file
+            versionNumber:
+                type: string
+                required: false
+                description: Version number of Service.
+            sflow_port:
+              type: integer
+              required: false
+              default: 6343
+              description: sFlow listening port
+            sflow_api_port:
+              type: integer
+              required: false
+              default: 33333
+              description: sFlow publish subscribe api listening port
+
+    tosca.nodes.CeilometerService:
+        derived_from: tosca.nodes.Root
+        description: >
+            XOS Ceilometer Service
+        capabilities:
+            scalable:
+                type: tosca.capabilities.Scalable
+            service:
+                type: tosca.capabilities.xos.Service
+        properties:
+            kind:
+                type: string
+                default: generic
+                description: Type of service.
+            view_url:
+                type: string
+                required: false
+                description: URL to follow when icon is clicked in the Service Directory.
+            icon_url:
+                type: string
+                required: false
+                description: ICON to display in the Service Directory.
+            enabled:
+                type: boolean
+                default: true
+            published:
+                type: boolean
+                default: true
+                description: If True then display this Service in the Service Directory.
+            public_key:
+                type: string
+                required: false
+                description: Public key to install into Instances to allows Services to SSH into them.
+            private_key_fn:
+                type: string
+                required: false
+                description: Location of private key file
+            versionNumber:
+                type: string
+                required: false
+                description: Version number of Service.
+            ceilometer_pub_sub_url:
+                type: string
+                required: false
+                description: REST URL of ceilometer PUB/SUB component
+
+    tosca.nodes.CeilometerTenant:
+        derived_from: tosca.nodes.Root
+        description: >
+            CORD: A Tenant of the Ceilometer Service.
+        properties:
+            kind:
+                type: string
+                default: generic
+                description: Kind of tenant
+
+topology_template:
+  node_templates:
+    service_ceilometer:
+      type: tosca.nodes.CeilometerService
+      requirements:
+      properties:
+          view_url: /admin/ceilometer/ceilometerservice/$id$/
+          kind: ceilometer
+          ceilometer_pub_sub_url: http://130.127.133.58:4455/
+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+      artifacts:
+          pubkey: /opt/xos/synchronizers/monitoring_channel/monitoring_channel_public_key
+
+    service_sflow:
+      type: tosca.nodes.SFlowService
+      requirements:
+      properties:
+          view_url: /admin/ceilometer/sflowservice/$id$/
+          kind: sflow
+          sflow_port: 6343
+          sflow_api_port: 33333
+
+    Private:
+      type: tosca.nodes.NetworkTemplate
+#
+#    ceilometer_network:
+#      type: tosca.nodes.network.Network.XOS
+#      properties:
+#          ip_version: 4
+#          labels: ceilometer_client_access
+#      requirements:
+#          - network_template:
+#              node: Private
+#              relationship: tosca.relationships.UsesNetworkTemplate
+#          - owner:
+#              node: mysite_ceilometer
+#              relationship: tosca.relationships.MemberOfSlice
+#          - connection:
+#              node: mysite_ceilometer
+#              relationship: tosca.relationships.ConnectsToSlice
+
+    mysite:
+      type: tosca.nodes.Site
+
+    trusty-server-multi-nic:
+      type: tosca.nodes.Image
+
+    ceilometer-trusty-server-multi-nic:
+      type: tosca.nodes.Image
+
+    m1.small:
+      type: tosca.nodes.Flavor
+
+    mysite_ceilometer:
+      description: Ceilometer Proxy Slice
+      type: tosca.nodes.Slice
+      requirements:
+          - ceilometer_service:
+              node: service_ceilometer
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+          - default_image:
+                node: ceilometer-trusty-server-multi-nic
+                relationship: tosca.relationships.DefaultImage
+          - m1.small:
+                node: m1.small
+                relationship: tosca.relationships.DefaultFlavor
+      properties:
+          max_instances: 2
+
+    mysite_sflow:
+      description: Slice for sFlow service
+      type: tosca.nodes.Slice
+      requirements:
+          - sflow_service:
+              node: service_sflow
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+          - default_image:
+                node: trusty-server-multi-nic
+                relationship: tosca.relationships.DefaultImage
+          - m1.small:
+                node: m1.small
+                relationship: tosca.relationships.DefaultFlavor
+      properties:
+          max_instances: 2
+
+    my_ceilometer_tenant:
+      description: Ceilometer Service default Tenant
+      type: tosca.nodes.CeilometerTenant
+      requirements:
+          - provider_service:
+              node: service_ceilometer
+              relationship: tosca.relationships.MemberOfService
+       
+    # Virtual machines
+    sflow_service_instance:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: Ubuntu
+            version: 14.10
+      requirements:
+          - slice:
+                node: mysite_sflow
+                relationship: tosca.relationships.MemberOfSlice
+
+    Ceilometer:
+      type: tosca.nodes.DashboardView
+      properties:
+          url: template:xosCeilometerDashboard
+    Tenant:
+      type: tosca.nodes.DashboardView
+      properties:
+          no-create: true
+          no-update: true
+          no-delete: true
+
+    padmin@vicci.org:
+      type: tosca.nodes.User
+      properties:
+          firstname: XOS
+          lastname: admin
+          is_admin: true
+      requirements:
+          - tenant_dashboard:
+              node: Tenant
+              relationship: tosca.relationships.UsesDashboard
+          - ceilometer_dashboard:
+              node: Ceilometer
+              relationship: tosca.relationships.UsesDashboard
diff --git a/acord/cleanup.sh b/acord/cleanup.sh
new file mode 100755
index 0000000..dfa1438
--- /dev/null
+++ b/acord/cleanup.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+function cleanup_network {
+  NETWORK=$1
+  SUBNETS=`neutron net-show $NETWORK | grep -i subnets | awk '{print $4}'`
+  if [[ $SUBNETS != "" ]]; then
+      PORTS=`neutron port-list | grep -i $SUBNETS | awk '{print $2}'`
+      for PORT in $PORTS; do
+          echo "Deleting port $PORT"
+          neutron port-delete $PORT
+      done
+  fi
+  neutron net-delete $NETWORK
+}
+
+source ../setup/admin-openrc.sh
+
+echo "Deleting VMs"
+# Delete all VMs
+VMS=$( nova list --all-tenants|grep mysite|awk '{print $2}' )
+for VM in $VMS
+do
+    nova delete $VM
+done
+
+echo "Waiting 5 seconds..."
+sleep 5
+
+cleanup_network lan_network
+cleanup_network wan_network
+cleanup_network mysite_vsg-private
+cleanup_network mysite_vsg-access
+cleanup_network management
+
+echo "Deleting networks"
+# Delete all networks beginning with mysite_
+NETS=$( neutron net-list --all-tenants|grep mysite|awk '{print $2}' )
+for NET in $NETS
+do
+    neutron net-delete $NET
+done
+
+neutron net-delete lan_network || true
+neutron net-delete subscriber_network || true
+neutron net-delete public_network || true
+neutron net-delete hpc_client_network || true
+neutron net-delete ceilometer_network || true
+neutron net-delete management || true
+neutron net-delete mysite_vsg-access || true
diff --git a/acord/docker-compose.yml b/acord/docker-compose.yml
new file mode 100644
index 0000000..da9562e
--- /dev/null
+++ b/acord/docker-compose.yml
@@ -0,0 +1,67 @@
+xos_db:
+    image: xosproject/xos-postgres
+    expose:
+        - "5432"
+
+xos_synchronizer_openstack:
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/openstack/xos-synchronizer.py"
+    image: xosproject/xos-synchronizer-openstack
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: openstack
+    links:
+        - xos_db
+    extra_hosts:
+        - ctl:${MYIP}
+    volumes:
+        - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config:ro
+        - ./xos_cord_config:/opt/xos/xos_configuration/xos_cord_config:ro
+        - ../setup:/root/setup:ro
+
+xos_synchronizer_monitoring_channel:
+    image: xosproject/xos-synchronizer-openstack
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/monitoring_channel/monitoring_channel_synchronizer.py -C /opt/xos/synchronizers/monitoring_channel/monitoring_channel_synchronizer_config"
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: monitoring_channel
+    links:
+        - xos_db
+    extra_hosts:
+        - ctl:${MYIP}
+    volumes:
+        - ../setup/id_rsa:/opt/xos/synchronizers/monitoring_channel/monitoring_channel_private_key:ro  # private key
+
+#Ensure exampleservice is enabled in xos/tools/xos-manage and xos/settings.py file before uncommenting below lines
+#xos_synchronizer_exampleservice:
+#    image: xosproject/xos-synchronizer-openstack
+#    command: bash -c "sleep 120; python /opt/xos/synchronizers/exampleservice/exampleservice-synchronizer.py -C /opt/xos/synchronizers/exampleservice/exampleservice_config"
+#    labels:
+#        org.xosproject.kind: synchronizer
+#        org.xosproject.target: exampleservice
+#    links:
+#        - xos_db
+#    volumes:
+#        - ../setup:/root/setup:ro
+#        - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config:ro
+#        - ../setup/id_rsa:/opt/xos/synchronizers/exampleservice/exampleservice_private_key:ro
+
+# FUTURE
+#xos_swarm_synchronizer:
+#    image: xosproject/xos-swarm-synchronizer
+#    labels:
+#        org.xosproject.kind: synchronizer
+#        org.xosproject.target: swarm
+
+xos:
+    image: xosproject/xos
+    command: python /opt/xos/manage.py runserver 0.0.0.0:8000 --insecure --makemigrations
+    ports:
+        - "9999:8000"
+    links:
+        - xos_db
+    volumes:
+      - ../setup:/root/setup:ro
+      - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config:ro
+      - ./xos_cord_config:/opt/xos/xos_configuration/xos_cord_config:ro
+      - ../setup/id_rsa.pub:/opt/xos/synchronizers/monitoring_channel/monitoring_channel_public_key:ro
+#      - ../setup/id_rsa.pub:/opt/xos/synchronizers/exampleservice/exampleservice_public_key:ro
diff --git a/acord/xos_cord_config b/acord/xos_cord_config
new file mode 100644
index 0000000..a5448f7
--- /dev/null
+++ b/acord/xos_cord_config
@@ -0,0 +1,6 @@
+[gui]
+branding_name=CORD
+#branding_css=/static/cord.css
+branding_icon=/static/cord-logo.png
+branding_favicon=/static/cord-favicon.png
+branding_bg=/static/cord-bg.jpg
diff --git a/common/Dockerfile.common b/common/Dockerfile.common
new file mode 100644
index 0000000..aedd245
--- /dev/null
+++ b/common/Dockerfile.common
@@ -0,0 +1,144 @@
+FROM       ubuntu:14.04.2
+MAINTAINER Andy Bavier <acb@cs.princeton.edu>
+
+# XXX Workaround for docker bug:
+# https://github.com/docker/docker/issues/6345
+# Kernel 3.15 breaks docker, uss the line below as a workaround
+# until there is a fix
+RUN ln -s -f /bin/true /usr/bin/chfn
+# XXX End workaround
+
+# Install.
+RUN apt-get update && apt-get install -y \
+    git \
+    postgresql \
+    python-psycopg2 \
+    graphviz \
+    graphviz-dev \
+    libxslt1.1 \
+    libxslt1-dev \
+    python-pip \
+    tar \
+    gcc \
+    python-httplib2 \
+    geoip-database \
+    libgeoip1 \
+    wget \
+    curl \
+    python-dev \
+    libyaml-dev \
+    pkg-config \
+    python-pycurl
+
+RUN pip install django==1.7
+RUN pip install djangorestframework==2.4.4
+RUN pip install markdown  # Markdown support for the browseable API.
+RUN pip install pyyaml    # YAML content-type support.
+RUN pip install django-filter==0.11.0  # Filtering support
+RUN pip install lxml  # XML manipulation library
+RUN pip install netaddr # IP Addr library
+RUN pip install pytz
+RUN pip install django-timezones
+RUN pip install requests
+RUN pip install python-logstash
+RUN pip install django-crispy-forms
+RUN pip install django-geoposition
+RUN pip install django-extensions
+RUN pip install django-suit==0.3a1
+RUN pip install django-bitfield
+RUN pip install django-ipware
+RUN pip install django-encrypted-fields
+RUN pip install python-keyczar
+RUN pip install pygraphviz --install-option="--include-path=/usr/include/graphviz" --install-option="--library-path=/usr/lib/graphviz/"
+RUN pip install dnslib
+
+RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-keystoneclient
+RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-novaclient
+RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-neutronclient
+RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-glanceclient
+RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-ceilometerclient
+
+RUN pip install django_rest_swagger
+
+RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-setuptools
+RUN easy_install python_gflags
+RUN easy_install --upgrade httplib2
+RUN easy_install google_api_python_client
+RUN easy_install httplib2.ca_certs_locater
+
+# Install custom Ansible
+RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-crypto
+RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-yaml
+RUN DEBIAN_FRONTEND=noninteractive apt-get install -y openssh-client
+RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-paramiko
+RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-jinja2
+RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-httplib2
+RUN git clone -b release1.8.2 git://github.com/ansible/ansible.git /opt/ansible
+RUN git clone -b release1.8.2 git://github.com/ansible/ansible-modules-extras.git /opt/ansible/lib/ansible/modules/extras
+RUN git clone -b release1.8.2 git://github.com/ansible/ansible-modules-extras.git /opt/ansible/v2/ansible/modules/extras
+RUN git clone git://github.com/sb98052/ansible-modules-core.git /opt/ansible/lib/ansible/modules/core
+RUN git clone git://github.com/sb98052/ansible-modules-core.git /opt/ansible/v2/ansible/modules/core
+ADD ansible-hosts /etc/ansible/hosts
+
+ADD http://code.jquery.com/jquery-1.9.1.min.js /usr/local/lib/python2.7/dist-packages/suit/static/suit/js/
+
+# For Observer
+RUN git clone git://git.planet-lab.org/fofum.git /tmp/fofum
+RUN cd /tmp/fofum; python setup.py install
+RUN rm -rf /tmp/fofum
+
+RUN mkdir -p /usr/local/share /bin
+ADD http://phantomjs.googlecode.com/files/phantomjs-1.7.0-linux-x86_64.tar.bz2 /usr/local/share/
+RUN tar jxvf /usr/local/share/phantomjs-1.7.0-linux-x86_64.tar.bz2 -C /usr/local/share/
+RUN rm -f /usr/local/share/phantomjs-1.7.0-linux-x86_64.tar.bz2
+RUN ln -s /usr/local/share/phantomjs-1.7.0-linux-x86_64 /usr/local/share/phantomjs
+RUN ln -s /usr/local/share/phantomjs/bin/phantomjs /bin/phantomjs
+
+# Supervisor
+RUN DEBIAN_FRONTEND=noninteractive apt-get install -y supervisor
+ADD observer.conf /etc/supervisor/conf.d/
+
+# Get XOS
+ADD xos /opt/xos
+
+# Initscript is broken in Ubuntu
+#ADD observer-initscript /etc/init.d/xosobserver
+
+RUN chmod +x /opt/xos/tools/xos-manage
+RUN /opt/xos/tools/xos-manage genkeys
+
+# Workaround for AUFS issue
+# https://github.com/docker/docker/issues/783#issuecomment-56013588
+RUN mkdir /etc/ssl/private-copy; mv /etc/ssl/private/* /etc/ssl/private-copy/; rm -r /etc/ssl/private; mv /etc/ssl/private-copy /etc/ssl/private; chmod -R 0700 /etc/ssl/private; chown -R postgres /etc/ssl/private
+
+# Set postgres password to match default value in settings.py
+RUN service postgresql start; sudo -u postgres psql -c "alter user postgres with password 'password';"
+
+# Turn DEBUG on so that devel server will serve static files
+#    (not necessary if --insecure is passed to 'manage.py runserver')
+# RUN sed -i 's/DEBUG = False/DEBUG = True/' /opt/xos/xos/settings.py
+
+# Cruft to workaround problems with migrations, should go away...
+RUN /opt/xos/tools/xos-manage remigrate
+
+# git clone uses cached copy, doesn't pick up latest
+RUN git -C /opt/ansible pull
+RUN git -C /opt/ansible/lib/ansible/modules/core pull
+RUN git -C /opt/ansible/v2/ansible/modules/core pull
+
+# install Tosca engine
+RUN apt-get install -y m4
+RUN pip install python-dateutil
+RUN bash /opt/xos/tosca/install_tosca.sh
+
+EXPOSE 8000
+
+# Set environment variables.
+ENV HOME /root
+
+# Define working directory.
+WORKDIR /root
+
+# Define default command.
+#CMD ["/bin/bash"]
+#CMD /opt/xos/tools/docker_start_xos
diff --git a/common/Makedefs b/common/Makedefs
new file mode 100644
index 0000000..7111b42
--- /dev/null
+++ b/common/Makedefs
@@ -0,0 +1,2 @@
+SERVICE_DIR=../../xos_services
+XOS_DIR=../../xos
diff --git a/common/Makefile.cloudlab b/common/Makefile.cloudlab
new file mode 100644
index 0000000..7ab8a54
--- /dev/null
+++ b/common/Makefile.cloudlab
@@ -0,0 +1,41 @@
+MYFLATLANIF:=$(shell netstat -i |grep "flat"|awk '{print $$1}' )
+ifndef MYFLATLANIF
+$(error MYFLATLANIF is empty)
+endif
+MYFLATLANIP:=$(shell ifconfig $(MYFLATLANIF) | grep "inet addr" | awk -F: '{print $$2}' | awk '{print $$1}' )
+SETUPDIR:=../setup
+
+all: prereqs admin-openrc flat_name nodes_yaml public_key private_key ceilometer_url other_keys
+
+prereqs:
+	make -f Makefile.prereqs
+	mkdir -p $(SETUPDIR)
+
+admin-openrc:
+	sudo cat /root/setup/admin-openrc.sh > $(SETUPDIR)/admin-openrc.sh
+	sudo cat /root/setup/settings > $(SETUPDIR)/controller_settings
+	echo 'CONTROLLER_FLAT_LAN_IP=$(MYFLATLANIP)' >> $(SETUPDIR)/controller_settings
+
+flat_name:
+	bash -c "source $(SETUPDIR)/admin-openrc.sh ; neutron net-list" |grep flat|awk '{printf "%s",$$4}' > $(SETUPDIR)/flat_net_name
+	[ -s $(SETUPDIR)/flat_net_name ] # throw error if flat_net_name is empty
+
+nodes_yaml:
+	export SETUPDIR=$(SETUPDIR); bash ./make-nodes-yaml.sh 
+
+ceilometer_url:
+	echo http://`hostname -i`/xosmetering/ > $(SETUPDIR)/ceilometer_url
+
+public_key: ~/.ssh/id_rsa.pub
+	cp ~/.ssh/id_rsa.pub $(SETUPDIR)
+
+private_key: ~/.ssh/id_rsa
+	cp ~/.ssh/id_rsa $(SETUPDIR)
+
+~/.ssh/id_rsa.pub:
+	cat /dev/zero | ssh-keygen -q -N ""
+
+other_keys: public_key private_key
+	sudo cat /root/setup/id_rsa > $(SETUPDIR)/node_key
+	sudo cat /root/setup/id_rsa.pub > $(SETUPDIR)/node_key.pub
+	sudo cp $(SETUPDIR)/id_rsa.pub $(SETUPDIR)/padmin_public_key
diff --git a/common/Makefile.containers b/common/Makefile.containers
new file mode 100644
index 0000000..0db49d6
--- /dev/null
+++ b/common/Makefile.containers
@@ -0,0 +1,26 @@
+include ../common/Makedefs
+
+local_containers: xos_devel synchronizer onboarding_synchronizer
+
+xos_devel:
+	cd $(XOS_DIR)/containers/xos; make devel
+
+xos_base:
+	cd $(XOS_DIR)/containers/xos; make base
+
+xos_test:
+	cd $(XOS_DIR)/containers/xos; make test
+
+synchronizer:
+	cd $(XOS_DIR)/containers/synchronizer; make
+
+onboarding_synchronizer:
+	cd $(XOS_DIR)/containers/onboarding_synchronizer; make
+
+update_certs:
+	echo "" > $(XOS_DIR)/containers/xos/local_certs.crt
+	for CRT in $$(ls /usr/local/share/ca-certificates/*) ; do \
+	        echo Adding Certificate: $$CRT ;\
+	        cat $$CRT >> $(XOS_DIR)/containers/xos/local_certs.crt ;\
+	        echo "" >> $(XOS_DIR)/containers/xos/local_certs.crt ;\
+	done
diff --git a/common/Makefile.devstack b/common/Makefile.devstack
new file mode 100644
index 0000000..0dff27c
--- /dev/null
+++ b/common/Makefile.devstack
@@ -0,0 +1,42 @@
+# This shouldn't be hardcoded
+DEVSTACK_ROOT:=~/devstack
+SETUPDIR:=../setup
+
+all: prereqs admin-openrc flat_name nodes_yaml public_key private_key ceilometer_url other_keys net_fix
+
+prereqs:
+	make -f Makefile.prereqs
+	sudo pip install httpie --upgrade
+	mkdir -p $(SETUPDIR)
+
+admin-openrc:
+	bash ./devstack-creds.sh $(DEVSTACK_ROOT) > $(SETUPDIR)/admin-openrc.sh
+	touch $(SETUPDIR)/controller_settings
+
+flat_name:
+	echo private|tr -d '\n' > $(SETUPDIR)/flat_net_name
+	bash -c "source $(SETUPDIR)/admin-openrc.sh; neutron net-update private --shared"
+
+nodes_yaml:
+	export SETUPDIR=$(SETUPDIR); bash ./make-nodes-yaml.sh
+
+ceilometer_url:
+	echo http://`hostname -i`/xosmetering/ > $(SETUPDIR)/ceilometer_url
+
+public_key: ~/.ssh/id_rsa.pub
+	cp ~/.ssh/id_rsa.pub $(SETUPDIR)
+
+private_key: ~/.ssh/id_rsa
+	cp ~/.ssh/id_rsa $(SETUPDIR)
+
+~/.ssh/id_rsa.pub:
+	cat /dev/zero | ssh-keygen -q -N ""
+
+other_keys: public_key private_key
+	cp $(SETUPDIR)/id_rsa $(SETUPDIR)/node_key
+	cp $(SETUPDIR)/id_rsa.pub $(SETUPDIR)/node_key.pub
+	cp $(SETUPDIR)/id_rsa.pub $(SETUPDIR)/padmin_public_key
+
+net_fix:
+	sudo devstack/net-fix.sh
+	bash -c "source $(SETUPDIR)/admin-openrc.sh; neutron subnet-update private-subnet --dns-nameservers list=true 8.8.8.8 8.8.4.4"
diff --git a/common/Makefile.opencloud b/common/Makefile.opencloud
new file mode 100644
index 0000000..8cf9f29
--- /dev/null
+++ b/common/Makefile.opencloud
@@ -0,0 +1,40 @@
+MYFLATLANIF:=$(shell netstat -i |grep "flat"|awk '{print $$1}' )
+MYFLATLANIP:=$(shell ifconfig $(MYFLATLANIF) | grep "inet addr" | awk -F: '{print $$2}' | awk '{print $$1}' )
+SETUPDIR:=../setup
+
+all: prereqs admin-openrc flat_name nodes_yaml public_key private_key ceilometer_url node_key
+
+prereqs:
+	make -f Makefile.prereqs
+	mkdir -p $(SETUPDIR)
+
+admin-openrc:
+	sudo cat /root/setup/admin-openrc.sh > $(SETUPDIR)/admin-openrc.sh
+	sudo cat /root/setup/settings > $(SETUPDIR)/controller_settings
+	echo 'CONTROLLER_FLAT_LAN_IP=$(MYFLATLANIP)' >> $(SETUPDIR)/controller_settings
+
+flat_name:
+	sudo bash -c "source /root/setup/admin-openrc.sh ; neutron net-list" |grep flat|awk '{printf "%s",$$4}' > $(SETUPDIR)/flat_net_name
+	[ -s $(SETUPDIR)/flat_net_name ] # throw error if flat_net_name is empty
+
+nodes_yaml:
+	bash ./make-nodes-yaml.sh > $(SETUPDIR)/nodes.yaml
+
+ceilometer_url:
+	echo http://`hostname -i`/xosmetering/ > $(SETUPDIR)/ceilometer_url
+
+public_key: ~/.ssh/id_rsa.pub
+	cp ~/.ssh/id_rsa.pub $(SETUPDIR)
+
+private_key: ~/.ssh/id_rsa
+	cp ~/.ssh/id_rsa $(SETUPDIR)
+
+~/.ssh/id_rsa.pub:
+	cat /dev/zero | ssh-keygen -q -N ""
+
+node_key:
+	sudo cat /root/setup/id_rsa > $(SETUPDIR)/node_key
+	sudo cat /root/setup/id_rsa.pub > $(SETUPDIR)/node_key.pub
+
+images:
+	source admin-openrc.sh; glance image-show trusty-server-multi-nic || glance image-create --name trusty-server-multi-nic --disk-format qcow2 --file /proj/xos-PG0/acb/images/trusty-server-multi-nic.img --container-format bare
diff --git a/common/Makefile.prereqs b/common/Makefile.prereqs
new file mode 100644
index 0000000..fce51be
--- /dev/null
+++ b/common/Makefile.prereqs
@@ -0,0 +1,41 @@
+UBUNTU:=$(shell which apt > /dev/null 2>&1; echo $$?)
+
+ifeq ($(UBUNTU),0)
+
+# ******************* apt-based distros ***************************
+prereqs: /usr/bin/http docker /usr/bin/curl /usr/local/bin/docker-compose
+
+/usr/bin/http:
+	sudo apt-get -y install httpie
+
+/usr/bin/curl:
+	sudo apt-get -y install curl
+
+docker:
+	which docker > /dev/null || wget -qO- https://get.docker.com/ | sh
+	sudo usermod -aG docker $(shell whoami)
+
+else
+
+# ****************** RPM-based distros ******************
+
+# (untested / work-in-progress)
+
+prereqs: /usr/bin/pip /usr/bin/http docker /usr/local/bin/docker-compose
+
+/usr/bin/pip:
+	curl "https://bootstrap.pypa.io/get-pip.py" -o "get-pip.py"
+	sudo python ./get-pip.py
+
+docker:
+	which docker > /dev/null || wget -qO- https://get.docker.com/ | sh
+	sudo usermod -aG docker $(shell whoami)
+
+/usr/bin/http:
+	sudo pip install httpie
+
+endif
+
+/usr/local/bin/docker-compose:
+	sudo bash -c "curl -L https://github.com/docker/compose/releases/download/1.5.2/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose"
+	sudo chmod +x /usr/local/bin/docker-compose
diff --git a/common/Makefile.services b/common/Makefile.services
new file mode 100644
index 0000000..6b62a1d
--- /dev/null
+++ b/common/Makefile.services
@@ -0,0 +1,64 @@
+include ../common/Makedefs
+
+services: $(SERVICE_DIR) \
+          $(SERVICE_DIR)/exampleservice \
+          $(SERVICE_DIR)/olt \
+	  $(SERVICE_DIR)/vsg \
+          $(SERVICE_DIR)/vtn \
+	  $(SERVICE_DIR)/vrouter \
+	  $(SERVICE_DIR)/vtr \
+	  $(SERVICE_DIR)/onos-service \
+	  $(SERVICE_DIR)/fabric
+
+monitoring_services: $(SERVICE_DIR)/monitoring
+
+xos_core: $(XOS_DIR)
+
+$(SERVICE_DIR):
+	mkdir -p $(SERVICE_DIR)
+
+$(SERVICE_DIR)/exampleservice:
+	git -C $(SERVICE_DIR) clone https://gerrit.opencord.org/p/exampleservice.git
+
+$(SERVICE_DIR)/olt:
+	git -C $(SERVICE_DIR) clone https://gerrit.opencord.org/p/olt.git
+
+$(SERVICE_DIR)/vsg:
+	git -C $(SERVICE_DIR) clone https://gerrit.opencord.org/p/vsg.git
+
+$(SERVICE_DIR)/vtn:
+	git -C $(SERVICE_DIR) clone https://gerrit.opencord.org/p/vtn.git
+
+$(SERVICE_DIR)/vrouter:
+	git -C $(SERVICE_DIR) clone https://gerrit.opencord.org/p/vrouter.git
+
+$(SERVICE_DIR)/vtr:
+	git -C $(SERVICE_DIR) clone https://gerrit.opencord.org/p/vtr.git
+
+$(SERVICE_DIR)/onos-service:
+	git -C $(SERVICE_DIR) clone https://gerrit.opencord.org/p/onos-service.git
+
+$(SERVICE_DIR)/fabric:
+	git -C $(SERVICE_DIR) clone https://gerrit.opencord.org/p/fabric.git
+
+$(SERVICE_DIR)/monitoring:
+	git -C $(SERVICE_DIR) clone https://gerrit.opencord.org/p/monitoring.git
+
+cleanup:
+	rm -rf $(SERVICE_DIR)/*
+
+update:
+	git -C $(SERVICE_DIR)/exampleservice pull
+	git -C $(SERVICE_DIR)/olt pull
+	git -C $(SERVICE_DIR)/vsg pull
+	git -C $(SERVICE_DIR)/vtn pull
+	git -C $(SERVICE_DIR)/vrouter pull
+	git -C $(SERVICE_DIR)/vtr pull
+	git -C $(SERVICE_DIR)/onos-service pull
+	git -C $(SERVICE_DIR)/fabric pull
+
+$(XOS_DIR):
+	git -C ../.. clone https://gerrit.opencord.org/p/xos.git
+
+update_xos:
+	git -C $(XOS_DIR) pull
diff --git a/common/base.yaml b/common/base.yaml
new file mode 100644
index 0000000..3638dd9
--- /dev/null
+++ b/common/base.yaml
@@ -0,0 +1,89 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+# Note:
+#   assumes the following have been created and filled with appropriate data:
+#       /root/setup/admin_openrc
+#       /root/setup/flat_net_name
+#       /root/setup/padmin_public_key
+
+description: >
+    * Adds OpenCloud Sites, Deployments, and Controllers.
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    trusty-server-multi-nic:
+      type: tosca.nodes.Image
+      properties:
+         disk_format: QCOW2
+         container_format: BARE
+
+    MyDeployment:
+      type: tosca.nodes.Deployment
+      properties:
+          flavors: m1.large, m1.medium, m1.small
+      requirements:
+          - image:
+              node: trusty-server-multi-nic
+              relationship: tosca.relationships.SupportsImage
+
+    CloudLab:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: MyDeployment
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Kilo
+          auth_url: { get_script_env: [ SELF, adminrc, OS_AUTH_URL, LOCAL_FILE] }
+          admin_user: { get_script_env: [ SELF, adminrc, OS_USERNAME, LOCAL_FILE] }
+          admin_password: { get_script_env: [ SELF, adminrc, OS_PASSWORD, LOCAL_FILE] }
+          admin_tenant: { get_script_env: [ SELF, adminrc, OS_TENANT_NAME, LOCAL_FILE] }
+          rabbit_user: { get_script_env: [ SELF, controller_settings, RABBIT_USER, LOCAL_FILE] }
+          rabbit_password: { get_script_env: [ SELF, controller_settings, RABBIT_PASS, LOCAL_FILE] }
+          rabbit_host: { get_script_env: [ SELF, controller_settings, CONTROLLER_FLAT_LAN_IP, LOCAL_FILE] }
+          domain: Default
+      artifacts:
+          adminrc: /root/setup/admin-openrc.sh
+          controller_settings: /root/setup/controller_settings
+
+    mysite:
+      type: tosca.nodes.Site
+      properties:
+          display_name: MySite
+          site_url: http://opencloud.us/
+      requirements:
+          - deployment:
+               node: MyDeployment
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: CloudLab
+                       relationship: tosca.relationships.UsesController
+
+    Public shared IPv4:
+      type: tosca.nodes.NetworkTemplate
+      properties:
+          visibility: private
+          translation: NAT
+          shared_network_name: { get_artifact: [ SELF, flat_net_name, LOCAL_FILE] }
+      artifacts:
+          flat_net_name: /root/setup/flat_net_name
+
+    padmin@vicci.org:
+      type: tosca.nodes.User
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+      properties:
+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE ] }
+          is_admin: true
+          is_active: true
+          firstname: XOS
+          lastname: admin
+      artifacts:
+          pubkey: /root/setup/padmin_public_key
diff --git a/common/cloudlab-openstack.yaml b/common/cloudlab-openstack.yaml
new file mode 100644
index 0000000..969f84c
--- /dev/null
+++ b/common/cloudlab-openstack.yaml
@@ -0,0 +1,89 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+# Note:
+#   assumes mydeployment.yaml has already been run, and the following exist:
+#       MyDeployment
+#       mysite
+#       padmin@vicci.org
+#       Public Shared IPv4
+#   assumes the following have been created and filled with appropriate data:
+#       /root/setup/admin_openrc
+#       /root/setup/flat_net_name
+#       /root/setup/padmin_public_key
+
+description: >
+    * Adds OpenCloud Sites, Deployments, and Controllers.
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    trusty-server-multi-nic:
+      type: tosca.nodes.Image
+      properties:
+         disk_format: QCOW2
+         container_format: BARE
+
+    MyDeployment:
+      type: tosca.nodes.Deployment
+      properties:
+          no-create: True
+          no-delete: True
+      requirements:
+          - image:
+              node: trusty-server-multi-nic
+              relationship: tosca.relationships.SupportsImage
+
+    CloudLab:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: MyDeployment
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Kilo
+          auth_url: { get_script_env: [ SELF, adminrc, OS_AUTH_URL, LOCAL_FILE] }
+          admin_user: { get_script_env: [ SELF, adminrc, OS_USERNAME, LOCAL_FILE] }
+          admin_password: { get_script_env: [ SELF, adminrc, OS_PASSWORD, LOCAL_FILE] }
+          admin_tenant: { get_script_env: [ SELF, adminrc, OS_TENANT_NAME, LOCAL_FILE] }
+          rabbit_user: { get_script_env: [ SELF, controller_settings, RABBIT_USER, LOCAL_FILE] }
+          rabbit_password: { get_script_env: [ SELF, controller_settings, RABBIT_PASS, LOCAL_FILE] }
+          rabbit_host: { get_script_env: [ SELF, controller_settings, CONTROLLER_FLAT_LAN_IP, LOCAL_FILE] }
+          domain: Default
+      artifacts:
+          adminrc: /root/setup/admin-openrc.sh
+          controller_settings: /root/setup/controller_settings
+
+    mysite:
+      type: tosca.nodes.Site
+      properties:
+          no-create: True
+          no-delete: True
+      requirements:
+          - deployment:
+               node: MyDeployment
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: CloudLab
+                       relationship: tosca.relationships.UsesController
+
+    Public shared IPv4:
+      type: tosca.nodes.NetworkTemplate
+      properties:
+          no-create: True
+          no-delete: True
+          shared_network_name: { get_artifact: [ SELF, flat_net_name, LOCAL_FILE] }
+      artifacts:
+          flat_net_name: /root/setup/flat_net_name
+
+    padmin@vicci.org:
+      type: tosca.nodes.User
+      properties:
+          no-create: True
+          no-delete: True
+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE ] }
+      artifacts:
+          pubkey: /root/setup/padmin_public_key
diff --git a/common/devstack-creds.sh b/common/devstack-creds.sh
new file mode 100644
index 0000000..b90e6ec
--- /dev/null
+++ b/common/devstack-creds.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+DEVSTACK_ROOT=$1
+
+source $DEVSTACK_ROOT/openrc admin admin
+echo export OS_TENANT_NAME=$OS_TENANT_NAME
+echo export OS_USERNAME=$OS_USERNAME
+echo export OS_PASSWORD=$OS_PASSWORD
+echo export OS_AUTH_URL=$OS_AUTH_URL
diff --git a/common/devstack/local.conf b/common/devstack/local.conf
new file mode 100644
index 0000000..15a95fb
--- /dev/null
+++ b/common/devstack/local.conf
@@ -0,0 +1,32 @@
+# A single node devstack configuration for use with XOS
+[[local|localrc]]
+
+DOWNLOAD_DEFAULT_IMAGES=false
+IMAGE_URLS="http://www.planet-lab.org/cord/trusty-server-multi-nic.img,"
+#IMAGE_URLS+="http://www.vicci.org/cord/ceilometer-trusty-server-multi-nic.compressed.qcow2"
+LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
+# Append the git branch name if you wish to download ceilometer from a specific branch
+#enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer 
+
+disable_service n-net
+enable_service q-svc
+enable_service q-agt
+enable_service q-dhcp
+enable_service q-l3
+enable_service q-meta
+# Optional, to enable tempest configuration as part of devstack
+enable_service tempest
+
+# Uncomment below lines if you are installing kilo version of ceilometer
+#enable_service ceilometer-acentral
+#enable_service ceilometer-anotification
+#enable_service ceilometer-collector
+#enable_service ceilometer-alarm-singleton
+#enable_service ceilometer-alarm-notifier
+#enable_service ceilometer-alarm-evaluator
+#enable_service ceilometer-api
+#enable_service ceilometer-acompute
+#CEILOMETER_BACKEND=mongodb
+
+## Neutron options
+Q_USE_SECGROUP=False
diff --git a/common/devstack/net-fix.sh b/common/devstack/net-fix.sh
new file mode 100755
index 0000000..5b486bd
--- /dev/null
+++ b/common/devstack/net-fix.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+PRIMARY=$( route | grep default | awk '{print $NF}' )
+RULE="POSTROUTING -t nat -o $PRIMARY -s 172.24.4.0/24 -j MASQUERADE"
+
+iptables -C $RULE || iptables -A $RULE
diff --git a/common/devstack/setup-devstack.sh b/common/devstack/setup-devstack.sh
new file mode 100644
index 0000000..bfbb8f8
--- /dev/null
+++ b/common/devstack/setup-devstack.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+# If running on a CloudLab node, set up extra disk space
+if [ -e /usr/testbed/bin/mkextrafs ]
+then
+    sudo mkdir -p /opt/stack
+    sudo /usr/testbed/bin/mkextrafs -f /opt/stack
+fi
+
+cd ~
+git clone https://github.com/open-cloud/xos.git
+git clone https://git.openstack.org/openstack-dev/devstack
+cd ~/devstack
+git checkout stable/kilo
+cp ~/xos/xos/configurations/common/devstack/local.conf .
+./stack.sh
diff --git a/common/disable-onboarding.yaml b/common/disable-onboarding.yaml
new file mode 100644
index 0000000..acb75c8
--- /dev/null
+++ b/common/disable-onboarding.yaml
@@ -0,0 +1,16 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Onboard the exampleservice
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    xos:
+      type: tosca.nodes.XOS
+      properties:
+        no-create: true
+        no-delete: true
+        enable_build: false
+
diff --git a/common/enable-onboarding.yaml b/common/enable-onboarding.yaml
new file mode 100644
index 0000000..98e433c
--- /dev/null
+++ b/common/enable-onboarding.yaml
@@ -0,0 +1,16 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Onboard the exampleservice
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    xos:
+      type: tosca.nodes.XOS
+      properties:
+        no-create: true
+        no-delete: true
+        enable_build: true
+
diff --git a/common/fixtures.yaml b/common/fixtures.yaml
new file mode 100644
index 0000000..198f5d2
--- /dev/null
+++ b/common/fixtures.yaml
@@ -0,0 +1,135 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Some basic fixtures
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+
+    xos:
+      type: tosca.nodes.XOS
+
+
+# -----------------------------------------------------------------------------
+# Network Parameter Types
+# -----------------------------------------------------------------------------
+
+    s_tag:
+      type: tosca.nodes.NetworkParameterType
+
+    c_tag:
+      type: tosca.nodes.NetworkParameterType
+
+    next_hop:
+      type: tosca.nodes.NetworkParameterType
+
+    device:
+      type: tosca.nodes.NetworkParameterType
+
+    bridge:
+      type: tosca.nodes.NetworkParameterType
+
+    neutron_port_name:
+      type: tosca.nodes.NetworkParameterType
+
+# ----------------------------------------------------------------------------
+# Roles
+# ----------------------------------------------------------------------------
+
+    siterole#admin:
+      type: tosca.nodes.SiteRole
+
+    siterole#pi:
+      type: tosca.nodes.SiteRole
+
+    siterole#tech:
+      type: tosca.nodes.SiteRole
+
+    tenantrole#admin:
+      type: tosca.nodes.TenantRole
+
+    tenantrole#access:
+      type: tosca.nodes.TenantRole
+
+    deploymentrole#admin:
+      type: tosca.nodes.DeploymentRole
+
+    slicerole#admin:
+      type: tosca.nodes.SliceRole
+
+    slicerole#access:
+      type: tosca.nodes.SliceRole
+
+# -----------------------------------------------------------------------------
+# Flavors
+# -----------------------------------------------------------------------------
+
+    m1.small:
+      type: tosca.nodes.Flavor
+
+    m1.medium:
+      type: tosca.nodes.Flavor
+
+    m1.large:
+      type: tosca.nodes.Flavor
+
+# -----------------------------------------------------------------------------
+# Dashboard Views
+# -----------------------------------------------------------------------------
+
+# Temporary removed, waiting for a new Angular Base Implementation
+#    xsh:
+#      type: tosca.nodes.DashboardView
+#      properties:
+#          url: template:xsh
+
+    Customize:
+      type: tosca.nodes.DashboardView
+      properties:
+          url: template:customize
+
+    Tenant:
+      type: tosca.nodes.DashboardView
+      properties:
+          url: template:xosTenant
+
+    Developer:
+      type: tosca.nodes.DashboardView
+      properties:
+          url: template:xosDeveloper
+
+    Services Grid:
+      type: tosca.nodes.DashboardView
+      properties:
+          url: template:xosServiceGrid
+
+# -----------------------------------------------------------------------------
+# Network Templates
+# -----------------------------------------------------------------------------
+
+    Private:
+      type: tosca.nodes.NetworkTemplate
+      properties:
+          visibility: private
+          translation: none
+
+    Public shared IPv4:
+      type: tosca.nodes.NetworkTemplate
+      properties:
+          visibility: private
+          translation: NAT
+          shared_network_name: nat-net
+
+    Public dedicated IPv4:
+      type: tosca.nodes.NetworkTemplate
+      properties:
+          visibility: public
+          translation: none
+          shared_network_name: ext-net
+
+
+
+
+
diff --git a/common/make-images-yaml.sh b/common/make-images-yaml.sh
new file mode 100644
index 0000000..6321a9d
--- /dev/null
+++ b/common/make-images-yaml.sh
@@ -0,0 +1,48 @@
+FN=$SETUPDIR/images.yaml
+
+rm -f $FN
+
+cat >> $FN <<EOF
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+imports:
+   - custom_types/xos.yaml
+
+description: autogenerated nodes file
+
+topology_template:
+  node_templates:
+    mysite:
+        type: tosca.nodes.Site
+
+EOF
+
+IMAGES=$( bash -c "source $SETUPDIR/admin-openrc.sh ; glance image-list" |grep -v ID|grep -v +|awk '{print $4}' )
+I=0
+for IMAGE in $IMAGES; do
+    echo $IMAGE
+    cat >> $FN <<EOF
+    $IMAGE:
+      type: tosca.nodes.Image
+      properties:
+         disk_format: QCOW2
+         container_format: BARE
+
+EOF
+done
+
+cat >> $FN <<EOF
+    MyDeployment:
+      type: tosca.nodes.Deployment
+      properties:
+          flavors: m1.large, m1.medium, m1.small
+      requirements:
+EOF
+
+for IMAGE in $IMAGES; do
+    cat >> $FN <<EOF
+          - image:
+              node: $IMAGE
+              relationship: tosca.relationships.SupportsImage
+EOF
+done
diff --git a/common/make-nodes-yaml.sh b/common/make-nodes-yaml.sh
new file mode 100644
index 0000000..65e16bb
--- /dev/null
+++ b/common/make-nodes-yaml.sh
@@ -0,0 +1,36 @@
+FN=$SETUPDIR/nodes.yaml
+
+rm -f $FN
+
+cat >> $FN <<EOF
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+imports:
+   - custom_types/xos.yaml
+
+description: autogenerated nodes file
+
+topology_template:
+  node_templates:
+    MyDeployment:
+        type: tosca.nodes.Deployment
+    mysite:
+        type: tosca.nodes.Site
+EOF
+
+NODES=$( bash -c "source $SETUPDIR/admin-openrc.sh ; nova host-list" |grep compute|awk '{print $2}' )
+I=0
+for NODE in $NODES; do
+    echo $NODE
+    cat >> $FN <<EOF
+    $NODE:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: mysite
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: MyDeployment
+            relationship: tosca.relationships.MemberOfDeployment
+EOF
+done
diff --git a/common/mydeployment.yaml b/common/mydeployment.yaml
new file mode 100644
index 0000000..c81fd93
--- /dev/null
+++ b/common/mydeployment.yaml
@@ -0,0 +1,75 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Some basic fixtures
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    m1.large:
+      type: tosca.nodes.Flavor
+
+    m1.medium:
+      type: tosca.nodes.Flavor
+
+    m1.small:
+      type: tosca.nodes.Flavor
+
+    m1.xlarge:
+      type: tosca.nodes.Flavor
+
+    MyDeployment:
+      type: tosca.nodes.Deployment
+      requirements:
+          - m1.xlarge:
+             node: m1.large
+             relationship: tosca.relationships.SupportsFlavor
+          - m1.large:
+             node: m1.large
+             relationship: tosca.relationships.SupportsFlavor
+          - m1.medium:
+             node: m1.medium
+             relationship: tosca.relationships.SupportsFlavor
+          - m1.small:
+             node: m1.small
+             relationship: tosca.relationships.SupportsFlavor
+
+    mysite:
+      type: tosca.nodes.Site
+      properties:
+          display_name: MySite
+      requirements:
+          - deployment:
+               node: MyDeployment
+               relationship: tosca.relationships.SiteDeployment
+
+    # Attach the Tenant view to the MyDeployment deployment
+    Tenant:
+      type: tosca.nodes.DashboardView
+      properties:
+          no-create: true
+          no-delete: true
+      requirements:
+          - deployment:
+              node: MyDeployment
+              relationship: tosca.relationships.SupportsDeployment
+
+    padmin@vicci.org:
+      type: tosca.nodes.User
+      properties:
+          password: letmein
+#          encrypted_password: pbkdf2_sha256$12000$Qufx9iqtaYma$xs0YurPOcj9qYQna/Qrb3K+im9Yr2XEVr0J4Kqek7AE=
+          firstname: XOS
+          lastname: admin
+          is_admin: true
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+          - tenant_dashboard:
+              node: Tenant
+              relationship: tosca.relationships.UsesDashboard
+
+
+
diff --git a/common/run_tosca.py b/common/run_tosca.py
new file mode 100755
index 0000000..35a0fb7
--- /dev/null
+++ b/common/run_tosca.py
@@ -0,0 +1,44 @@
+#! /usr/bin/env python
+
+import json
+import os
+import requests
+import sys
+import traceback
+
+def main():
+    global opencloud_auth
+
+    if len(sys.argv)!=5:
+        print >> sys.stderr, "syntax: run_tosca.py <port> <username> <password> <fn>"
+        sys.exit(-1)
+
+    port = int(sys.argv[1])
+    username = sys.argv[2]
+    password = sys.argv[3]
+    tosca_fn = sys.argv[4]
+
+    xos_auth=(username, password)
+
+    hostname = "127.0.0.1"
+    url = "http://%s:%d/api/utility/tosca/run/" % (hostname, port)
+
+    recipe = open(tosca_fn).read()
+
+    r = requests.post(url, data={"recipe": recipe}, auth=xos_auth)
+    if (r.status_code != 200):
+        print >> sys.stderr, "ERR: recieved status %d" % r.status_code
+        try:
+            print >> sys.stderr, r.json()["error_text"]
+        except:
+            traceback.print_exc("error while printing the error!")
+            print r.text
+        sys.exit(-1)
+
+    result = r.json()
+    if "log_msgs" in result:
+        print "\n".join(result["log_msgs"])+"\n"
+
+if __name__ == "__main__":
+    main()
+
diff --git a/common/wait_for_onboarding_ready.sh b/common/wait_for_onboarding_ready.sh
new file mode 100755
index 0000000..dbfdde8
--- /dev/null
+++ b/common/wait_for_onboarding_ready.sh
@@ -0,0 +1,28 @@
+#! /bin/bash
+
+display_usage() { 
+    echo -e "\nUsage:\n$0 [xos-listen-port] [name] \n" 
+} 
+
+if [  $# -lt 2 ] 
+then 
+    display_usage
+    exit 1
+fi 
+
+echo "Waiting for $2 to be onboarded"
+while [[ 1 ]]; do
+    STATUS=`curl 0.0.0.0:$1/api/utility/onboarding/$2/ready/ 2> /dev/null`
+    if [[ "$STATUS" == "true" ]]; then
+        echo "$2 is onboarded"
+        exit 0
+    fi
+    echo -ne "."
+    sleep 1
+#    RUNNING_CONTAINER=`sudo docker ps|grep "xos"|awk '{print $$NF}'`
+#    if [[ $RUNNING_CONTAINER == "" ]]; then
+#        echo Container may have failed. check with \"make showlogs\'
+#        exit 1
+#    fi
+done
+
diff --git a/common/wait_for_xos.sh b/common/wait_for_xos.sh
new file mode 100644
index 0000000..afffb86
--- /dev/null
+++ b/common/wait_for_xos.sh
@@ -0,0 +1,13 @@
+#! /bin/bash
+echo "Waiting for XOS to come up"
+until http 0.0.0.0:9999 &> /dev/null
+do
+    sleep 1
+    echo -ne "."
+    RUNNING_CONTAINER=`sudo docker ps|grep "xos"|awk '{print $$NF}'`
+    if [[ $RUNNING_CONTAINER == "" ]]; then
+        echo Container may have failed. check with \"make showlogs\'
+        exit 1
+    fi
+done
+echo "XOS is ready"
diff --git a/common/wait_for_xos_file.sh b/common/wait_for_xos_file.sh
new file mode 100755
index 0000000..e137164
--- /dev/null
+++ b/common/wait_for_xos_file.sh
@@ -0,0 +1,25 @@
+#! /bin/bash
+
+display_usage() { 
+    echo -e "\nUsage:\n$0 [fn] \n" 
+} 
+
+if [  $# -lt 1 ] 
+then 
+    display_usage
+    exit 1
+fi 
+
+echo "Waiting for XOS to create file $1"
+
+until find $1 &> /dev/null
+do
+    sleep 1
+    echo -ne "."
+    RUNNING_CONTAINER=`sudo docker ps|grep "xos"|awk '{print $$NF}'`
+    if [[ $RUNNING_CONTAINER == "" ]]; then
+        echo Container may have failed. check with \"make showlogs\'
+        exit 1
+    fi
+done
+echo "XOS is ready"
diff --git a/common/wait_for_xos_port.sh b/common/wait_for_xos_port.sh
new file mode 100755
index 0000000..b16639e
--- /dev/null
+++ b/common/wait_for_xos_port.sh
@@ -0,0 +1,24 @@
+#! /bin/bash
+
+display_usage() { 
+    echo -e "\nUsage:\n$0 [xos-listen-port] \n" 
+} 
+
+if [  $# -lt 1 ] 
+then 
+    display_usage
+    exit 1
+fi 
+
+echo "Waiting for XOS to start listening on port $1"
+until curl 0.0.0.0:$1 &> /dev/null
+do
+    sleep 1
+    echo -ne "."
+    RUNNING_CONTAINER=`sudo docker ps|grep "xos"|awk '{print $$NF}'`
+    if [[ $RUNNING_CONTAINER == "" ]]; then
+        echo Container may have failed. check with \"make showlogs\'
+        exit 1
+    fi
+done
+echo "XOS is ready"
diff --git a/common/xos_common_config b/common/xos_common_config
new file mode 100644
index 0000000..76ba747
--- /dev/null
+++ b/common/xos_common_config
@@ -0,0 +1,47 @@
+[plc]
+name=plc
+deployment=plc
+
+[db]
+name=xos
+user=postgres
+password=password
+host=localhost
+port=5432
+
+[api]
+host=localhost
+port=8000
+ssl_key=None
+ssl_cert=None
+ca_ssl_cert=None
+ratelimit_enabled=0
+omf_enabled=0
+mail_support_address=support@localhost
+nova_enabled=True
+logfile=/var/log/xos.log
+
+[nova]
+admin_user=admin@domain.com
+admin_password=admin
+admin_tenant=admin
+url=http://localhost:5000/v2.0/
+default_image=None
+default_flavor=m1.small
+default_security_group=default
+ca_ssl_cert=/etc/ssl/certs/ca-certificates.crt
+
+[observer]
+pretend=False
+backoff_disabled=True
+images_directory=/opt/xos/images
+dependency_graph=/opt/xos/model-deps
+logfile=/var/log/xos_backend.log
+save_ansible_output=True
+
+[gui]
+disable_minidashboard=True
+branding_name=Open Cloud
+branding_icon=/static/logo.png
+branding_favicon=/static/favicon.png
+branding_bg=/static/bg.jpg
diff --git a/cord-deprecated/Dockerfile.cord b/cord-deprecated/Dockerfile.cord
new file mode 100644
index 0000000..3e63eb2
--- /dev/null
+++ b/cord-deprecated/Dockerfile.cord
@@ -0,0 +1,27 @@
+RUN mkdir -p /root/setup
+ADD xos/configurations/common/admin-openrc.sh /root/setup/
+ADD xos/configurations/common/controller_settings /root/setup/
+ADD xos/configurations/common/flat_net_name /root/setup/
+ADD xos/configurations/common/nodes.yaml /opt/xos/configurations/commmon/
+ADD xos/configurations/common/id_rsa.pub /root/setup/padmin_public_key
+ADD xos/configurations/common/id_rsa.pub /opt/xos/synchronizers/vcpe/vcpe_public_key
+ADD xos/configurations/common/id_rsa /opt/xos/synchronizers/vcpe/vcpe_private_key
+ADD xos/configurations/common/id_rsa.pub /opt/xos/synchronizers/monitoring_channel/monitoring_channel_public_key
+ADD xos/configurations/common/id_rsa /opt/xos/synchronizers/monitoring_channel/monitoring_channel_private_key
+ADD xos/configurations/common/id_rsa.pub /opt/xos/synchronizers/onos/onos_key.pub
+ADD xos/configurations/common/id_rsa /opt/xos/synchronizers/onos/onos_key
+ADD xos/configurations/common/node_key.pub /root/setup/node_key.pub
+ADD xos/configurations/common/node_key /root/setup/node_key
+ADD xos/configurations/common/ceilometer_url /root/setup/ceilometer_url
+ADD xos/synchronizers/vcpe/supervisor/vcpe-observer.conf /etc/supervisor/conf.d/
+ADD xos/synchronizers/vbng/supervisor/vbng-observer.conf /etc/supervisor/conf.d/
+ADD xos/synchronizers/onos/supervisor/onos-observer.conf /etc/supervisor/conf.d/
+ADD xos/synchronizers/monitoring_channel/supervisor/monitoring_channel_observer.conf /etc/supervisor/conf.d/
+RUN sed -i 's/proxy_ssh=True/proxy_ssh=False/' /opt/xos/synchronizers/vcpe/vcpe_synchronizer_config
+RUN sed -i 's/proxy_ssh=True/proxy_ssh=False/' /opt/xos/synchronizers/monitoring_channel/monitoring_channel_synchronizer_config
+ADD xos/configurations/cord/virtualbng.json /root/setup/
+ADD xos/configurations/cord/vtn-network-cfg.json /root/setup/
+
+CMD /usr/bin/make -C /opt/xos/configurations/cord -f Makefile.inside; /bin/bash
+
+#CMD ["/bin/bash"]
diff --git a/cord-deprecated/Makefile b/cord-deprecated/Makefile
new file mode 100644
index 0000000..184f2d5
--- /dev/null
+++ b/cord-deprecated/Makefile
@@ -0,0 +1,100 @@
+SETUPDIR:=../setup
+MYIP:=$(shell hostname -i)
+
+cloudlab: common_cloudlab cord acord
+
+devstack: upgrade_pkgs common_devstack devstack_net_fix cord
+
+cord: virtualbng_json vtn_network_cfg_json
+	sudo MYIP=$(MYIP) docker-compose up -d
+	bash ../common/wait_for_xos.sh
+	sudo MYIP=$(MYIP) docker-compose run xos python /opt/xos/tosca/run.py none /opt/xos/configurations/common/fixtures.yaml
+	sudo docker-compose run xos python /opt/xos/tosca/run.py none /opt/xos/configurations/common/mydeployment.yaml
+	sudo MYIP=$(MYIP) docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/cloudlab-openstack.yaml
+	sudo MYIP=$(MYIP) docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /root/setup/nodes.yaml
+	sudo MYIP=$(MYIP) docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/cord/cord.yaml
+
+containers:
+	cd ../../../containers/xos; make devel
+	cd ../../../containers/synchronizer; make
+
+common_cloudlab:
+	make -C ../common -f Makefile.cloudlab
+
+common_devstack:
+	make -C ../common -f Makefile.devstack
+
+acord: cloudlab_ceilometer_custom_images ceilometer_cloudlab_cord_plugins
+	sudo MYIP=$(MYIP) docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/cord/ceilometer.yaml
+
+ceilometer_cloudlab_cord_plugins:
+	if [ -d ./ceilometer-plugins ]; then rm -fr ./ceilometer-plugins; fi
+	git clone https://github.com/srikanthvavila/ceilometer-plugins.git
+	sudo cp -r ceilometer-plugins/network/ext_services /usr/lib/python2.7/dist-packages/ceilometer/network/
+	sudo cp -r ceilometer-plugins/network/statistics/onos /usr/lib/python2.7/dist-packages/ceilometer/network/statistics/
+	sudo cp ceilometer-plugins/network/statistics/__init__.py /usr/lib/python2.7/dist-packages/ceilometer/network/statistics/ 
+	sudo cp ceilometer-plugins/entry_points.txt /usr/lib/python2.7/dist-packages/ceilometer-*egg-info/
+	sudo cp ceilometer-plugins/pipeline.yaml /etc/ceilometer/
+	echo "Restarting ceilometer-agent-notification"
+	sudo service ceilometer-agent-notification restart
+	echo "Restarting ceilometer-agent-central"
+	sudo service ceilometer-agent-central restart
+
+ceilometer_pub_sub:
+	if [ -d ./pub-sub ]; then rm -fr ./pub-sub; fi
+	git clone https://github.com/srikanthvavila/pub-sub.git
+	echo "Starting Ceilometer PUB/SUB service...Ensure zookeeper and kafka services are launched (if required)"
+	cd pub-sub/ceilometer_pub_sub/ ; python sub_main.py & 
+	cd ../..
+
+virtualbng_json:
+	export SETUPDIR=$(SETUPDIR); bash make-virtualbng-json.sh
+
+vtn_network_cfg_json:
+	export SETUPDIR=$(SETUPDIR); bash make-vtn-networkconfig-json.sh
+
+stop:
+	sudo MYIP=$(MYIP) docker-compose stop
+
+rm:
+	sudo MYIP=$(MYIP) docker-compose rm
+
+showlogs:
+	sudo MYIP=$(MYIP) docker-compose logs
+
+ps:
+	sudo MYIP=$(MYIP) docker-compose ps
+
+dataplane: etc_hosts
+	cd dataplane; ./gen-inventory.sh > hosts
+	cd dataplane; ansible-playbook -i hosts dataplane.yaml
+
+dataplane_bm: dataplane
+	cd dataplane; bash -c "./generate-bm.sh > hosts-bm"
+	cd dataplane; sudo bash -c "ansible-playbook -i hosts-bm dataplane-bm.yaml"
+
+etc_hosts:
+	sudo bash -c "sed -i '/^10.11.10/ d' /etc/hosts"
+	cd dataplane; sudo bash -c "./gen-etc-hosts.sh >> /etc/hosts"
+
+setup_client:
+	# add subscriber to vOLT?  Is there a REST API?
+	echo "Don't forget: add-subscriber-access of:0000000000000001 1 432"
+	cd dataplane; ansible -i hosts client -m shell -s -a "route del default gw 10.11.10.5; dhclient br-sub"
+	# reboot the vBNG ONOS
+	cd dataplane; ansible -i hosts onos_vbng -m shell -s -a "docker restart ONOS"
+
+cleanup: stop rm
+	cd dataplane; ./cleanup.sh
+	bash -c "source ../setup/admin-openrc.sh; nova list --all-tenants; neutron net-list"
+	echo "Don't forget to clean up vCPE containers"
+
+devstack_net_fix:
+	sudo ../common/devstack/net-fix.sh
+	sudo bash -c "source ../setup/admin-openrc.sh; neutron subnet-update private-subnet --dns-nameservers list=true 8.8.8.8 8.8.4.4"
+
+upgrade_pkgs:
+	sudo pip install httpie --upgrade
+
+cloudlab_ceilometer_custom_images:
+	bash -c "source ../setup/admin-openrc.sh; glance image-show ceilometer-trusty-server-multi-nic || if test -f /proj/xos-PG0/images/ceilometer-trusty-server-multi-nic.compressed.qcow2 ; then glance image-create --name ceilometer-trusty-server-multi-nic --disk-format qcow2 --file /proj/xos-PG0/images/ceilometer-trusty-server-multi-nic.compressed.qcow2 --container-format bare ; else mkdir -p /tmp/images && wget http://www.vicci.org/cord/ceilometer-trusty-server-multi-nic.compressed.qcow2 -P /tmp/images && glance image-create --name ceilometer-trusty-server-multi-nic --disk-format qcow2 --file /tmp/images/ceilometer-trusty-server-multi-nic.compressed.qcow2 --container-format bare ; fi "
diff --git a/cord-deprecated/Makefile.inside b/cord-deprecated/Makefile.inside
new file mode 100644
index 0000000..d7bdbaf
--- /dev/null
+++ b/cord-deprecated/Makefile.inside
@@ -0,0 +1,12 @@
+all: setup_xos run_develserver
+
+setup_xos:
+	bash /opt/xos/tools/docker_setup_xos
+	python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/fixtures.yaml
+	python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/base.yaml
+	python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/nodes.yaml
+	python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/cord/cord.yaml
+	python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/cord/ceilometer.yaml
+
+run_develserver:
+	cd /opt/xos; python manage.py runserver 0.0.0.0:8000 --insecure
diff --git a/cord-deprecated/README-VTN.md b/cord-deprecated/README-VTN.md
new file mode 100644
index 0000000..a3c4e69
--- /dev/null
+++ b/cord-deprecated/README-VTN.md
@@ -0,0 +1,156 @@
+# vtn notes:
+
+see also: https://github.com/hyunsun/documentations/wiki/Neutron-ONOS-Integration-for-CORD-VTN#onos-setup
+
+VTN doesn't seem to like cloudlab's networks (flat-net-1, ext-net, etc). I've placed a script in xos/scripts/ called destroy-all-networks.sh that will automate tearing down all of cloudlab's neutron networks.
+
+    cd xos/tools
+    ./destroy-all-networks.sh
+
+inside the xos container, update the configuration. Make sure to restart Openstack Synchronizer afterward. Might be a good idea to restart the XOS UI as well:
+
+    python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/tosca/samples/vtn.yaml
+    emacs /opt/xos/xos_configuration/xos_common_config
+        [networking]
+        use_vtn=True
+    supervisorctl restart observer
+
+### ctl node:
+
+    # set ONOS_VTN_HOSTNAME to the host where the VTN container was installed
+    ONOS_VTN_HOSTNAME="cp-2.smbaker-xos5.xos-pg0.clemson.cloudlab.us"
+    apt-get -y install python-pip
+    pip install -U setuptools pip
+    pip install testrepository
+    git clone https://github.com/openstack/networking-onos.git
+    cd networking-onos
+    python setup.py install
+    # the above fails the first time with an error about pbr.json
+    # I ran it again and it succeeded, but I am skeptical there's
+    # not still an issue lurking...
+    cat > /usr/local/etc/neutron/plugins/ml2/conf_onos.ini <<EOF
+    [onos]
+    url_path = http://$ONOS_VTN_HOSTNAME:8181/onos/cordvtn
+    username = karaf
+    password = karaf
+    EOF
+    emacs /etc/neutron/plugins/ml2/ml2_conf.ini
+        update settings as per vtn docs ([ml2] and [ml2_type_vxlan] sections)
+    systemctl stop neutron-server
+    # I started neutron manually to make sure it's using exactly the right config
+    # files. Maybe it can be restarted using systemctl instead...
+    /usr/bin/neutron-server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /usr/local/etc/neutron/plugins/ml2/conf_onos.ini
+
+### Compute nodes and nm nodes:
+
+    cd xos/configurations/cord/dataplane
+    ./generate-bm.sh > hosts-bm
+    ansible-playbook -i hosts-bm dataplane-vtn.yaml
+    # the playbook will:
+    #  1) turn off neutron openvswitch-agent
+    #  2) set openvswitch to listen on port 6641
+    #  3) restart openvswitch
+    #  4) delete any existing br-int bridge
+    #  5) [nm only] turn off neutron-dhcp-agent
+
+Additional compute node stuff:
+
+I've been deleting any existing unused bridges. Not sure if it's necesary.
+
+    ovs-vsctl del-br br-tun
+    ovs-vsctl del-br br-flat-lan-1
+
+To get the management network working, we need to create management network template, slice, and network. configurations/cord/vtn.yaml will do this for you. Then add a connection to the management network for any slice that needs management connectivity.
+    
+### Notes:
+* I've configured the OpenvSwitch switches to use port 6641 instead of port 6640. This is because the VTN app listens on 6640
+itself, and since we're running it in docker 'host' networking mode now, it would conflict with an Openvswitch that was
+also listening on 6640.
+* Adding use_vtn=True to the [networking] section in the XOS config file has two effects: 1) it sets the gateway in sync_controller_networks, and 2) it disables automatic creation of nat-net for new slices. This is because VTN will fail if there is no gateway on a network, and because we don't have nat-net under the VTN configuration.
+* When using of-vfctl to look at flow rules, if you get a protocol error, try "ovs-ofctl show -O OpenFlow13 br-int "
+* Note that the VTN Synchronizer isn't started automatically. It's only use for inter-Service connectivity, so no need to mess with it until intra-Slice connectivity is working first. 
+* Note that the VTN Synchronizer won't connect non-access networks. Any network templates you want VTN to connect must have Access set to "Direct" or "Indirect". 
+
+In case management network isn't working, you can use a VNC tunnel, like this:
+
+    # on compute node, run the following and note the IP address and port number
+    virsh vncdisplay <instance-id>
+    
+    # from home
+    ssh -o "GatewayPorts yes"  -L <port+5900>:<IP>:<port+5900> <username>@<compute_node_hostname>
+    
+    # example
+    ssh -o "GatewayPorts yes"  -L 5901:192.168.0.7:5901 smbaker@cp-1.smbaker-xos3.xos-pg0.clemson.cloudlab.us
+
+Then open a VNC session to the local port on your local machine. You'll have a console on the Instance. The username is "Ubuntu" and the password can be obtained from your cloudlab experiment description
+
+### Things that can be tested:
+
+* Create an Instance, it should have a Private network, and there should be a tap attached from the instance to br-int
+* Two Instances in the same Slice can talk to one another. They can be on the same machine or different machines.
+* Two Slices can talk to one another if the slices are associated with Services and those Services have a Tenancy relationship between them. Note that 1) The VTN Synchronizer must be running, 2) There must be a Private network with Access=[Direct|Indirect], and 3) The connectivity is unidirectional, from subscriber service to provider service.
+
+### Testing service composition
+
+1. Change the private network template's 'Access' field from None to Direct
+2. Create a Service, Service-A
+3. Enter Slice Admin for Slice-1 and assign it to Service-A
+4. Create a Service, Service-B
+5. Enter Slice Admin for Slice-2 and assign it to Service-B
+6. Enter Service Admin for Service-B, Go to Tenancy Tab
+7. In the 'Provided Tenants' section of Service-B, create a Tenant with Subsciber-Service=Serivce-A. 
+8. Start the VTN Observer. It will send a REST request to VTN app.
+9. Launch tcpdump in one of Slice-2's instances
+10. From Slice-1, start pinging the instance in Slice-2 where you launched tcpdump
+11. You should see the pings arrive and responses sent out. Note that the ping responses will not reach Slice-1, since VTN traffic is unidirectional.
+12. Delete the Tenancy relation you created in Step #7. The ping traffic should no longer appear in the tcpdump.
+
+### Getting external connectivity working on cloudlab
+
+On head node:
+
+    ovs-vsctl del-br br-flat-lan-1
+    ifconfig eth2 10.123.0.1
+    iptables --table nat --append POSTROUTING --out-interface br-ex -j MASQUERADE
+    #arp -s 10.123.0.3 fa:16:3e:ea:11:0a
+    sysctl net.ipv4.conf.all.send_redirects
+    sysctl net.ipv4.conf.all.send_redirects=0
+    sysctl net.ipv4.conf.default.send_redirects=0
+    sysctl net.ipv4.conf.eth0.send_redirects=0
+    sysctl net.ipv4.conf.br-ex.send_redirects=0
+    
+Substitute for your installation:
+
+    10.123.0.3 = wan_ip of vSG
+    10.123.0.1 = wan gateway
+    fa:16:3e:ea:11:0a = wan_mac of vSG
+    00:8c:fa:5b:09:d8 = wan_mac of gateway
+    
+### Setting up a test-client
+
+Before setting up VTN, create a bridge and attach it to the dataplane device on each compute node:
+
+    brctl addbr br-inject
+    brctl addif br-inject eth3   # substitute dataplane eth device here, may be different on each compute node
+    ip link set br-inject up
+    ip link set dev br-inject promisc on
+    
+Then update the network-config attribute of the VTN ONOS App in XOS to use a dataplaneIntf of br-inject instead of the eth device. Bring up VTN and a VSG. WAN connectivity and everything else should be working fine. 
+
+Add a new slice, mysite_client, and make sure to give it both a private and a management network. Bring up an instance on the same node as the vSG you want to test. On the compute node, run the following:
+
+    $MAC=<make-up-some-mac>
+    $INSTANCE=<instance-id>
+    virsh attach-interface --domain $INSTANCE --type bridge --source br-inject --model virtio --mac $MAC --config --live
+    
+Log into the vSG via the management interface. Inside of the vSG run the following:
+
+    STAG=<your s-tag here>
+    CTAG=<your c-tag here>
+    ip link add link eth2 eth2.$STAG type vlan id $STAG
+    ip link add link eth2.$STAG eth2.$STAG.$CTAG type vlan id $CTAG
+    ip link set eth2.$STAG up
+    ip link set eth2.$STAG.$CTAG up
+    ip addr add 192.168.0.2/24 dev eth2.$STAG.$CTAG
+    ip route del default
+    ip route add default via 192.168.0.1
diff --git a/cord-deprecated/README.md b/cord-deprecated/README.md
new file mode 100644
index 0000000..64075d9
--- /dev/null
+++ b/cord-deprecated/README.md
@@ -0,0 +1,151 @@
+# CORD development environment
+
+This configuration can be used to set up a CORD development environment.
+It does the following:
+
+* Sets up a basic dataplane for testing end-to-end packet flow between a subscriber client and the Internet
+* Brings up ONOS apps for controlling the dataplane: virtualbng, olt
+* Configures XOS with the CORD services: vCPE, vBNG, vOLT
+
+**NOTE: This configuration is stale and likely not working at present.  If you are looking to evaluate 
+and/or contribute to [CORD](http://opencord.org/), 
+you should look instead at the [cord-pod](../cord-pod) configuration. Almost
+all CORD developers have transitioned to [cord-pod](../cord-pod).**
+
+## End-to-end dataplane
+
+The configuration uses XOS to set up an end-to-end dataplane for development of the XOS services and ONOS apps
+used in CORD.  It abstracts away most of the complexity of the CORD hardware using virtual networks
+and Open vSwitch (OvS) switches.  At a high level the dataplane looks like this:
+
+```
+             olt                 virtualbng
+             ----                  ----
+             ONOS                  ONOS
+              |                     |
+client ----> CPqD ----> vCPE ----> OvS ----> Internet
+         1         2          3         4
+```
+
+On the datapath are two OvS switches, controlled by the `olt` and `virtualbng` ONOS applications.  Once all the pieces are in
+place, the client at left should be able to obtain an IP address via DHCP from the vCPE and send packets out to the Internet.
+
+All of the components in the above diagram (i.e., client, OvS switches, ONOS, and vCPE) currently run in distinct VMs
+created by XOS.  The numbers in the diagram correspond to networks set up by XOS:
+
+1. subscriber_network
+2. lan_network
+3. wan_network
+4. public_network
+
+## How to run it
+
+The configuration is intended to be run on [CloudLab](http://cloudlab.us).
+It launches an XOS container on Cloudlab that runs the XOS develserver.  The container is left running in the background.
+
+To get started on CloudLab:
+* Create an experiment using the *OpenStack-CORD* profile.  (You can also use the *OpenStack* profile, but choose *Kilo*
+with two compute nodes and disable security groups.)
+* Wait until you get an email from CloudLab with title "OpenStack Instance Finished Setting Up".
+* Login to the *ctl* node of your experiment and run:
+```
+ctl:~$ git clone https://github.com/open-cloud/xos.git
+ctl:~$ cd xos/xos/configurations/cord/
+ctl:~/xos/xos/configurations/cord$ make
+```
+
+Running `make` in this directory creates the XOS Docker container and runs the TOSCA engine with `cord.yaml` to
+configure XOS with the CORD services.  In addition, a number of VMs are created:
+
+1. *Slice mysite_onos*: runs the ONOS Docker container with `virtualbng` app loaded
+1. *Slice mysite_onos*: runs the ONOS Docker container with `olt` app loaded
+1. *Slice mysite_vbng*: for running OvS with the `virtualbng` app as controller
+1. *Slice mysite_volt*: for running the CPqD switch with the `olt` app as controller
+1. *Slice mysite_clients*: a subscriber client for end-to-end testing
+1. *Slice mysite_vcpe*: runs the vCPE Docker container (if not using containers on bare metal)
+
+Once all the VMs are up and the ONOS apps are configured, XOS should be able to get an address mapping from the `virtualbng`
+ONOS app for the vCPE. To verify that it has received an IP address mapping, look at the **Routeable subnet:** field in
+the appropriate *Vbng tenant* object in XOS.  It should contain an IP address in the 10.254.0.0/24 subnet.
+
+After launching the ONOS apps, it is necessary to configure software switches along the dataplane so that ONOS can control
+them.  To do this, from the `cord` configuration directory:
+```
+ctl:~/xos/xos/configurations/cord$ cd dataplane/
+ctl:~/xos/xos/configurations/cord/dataplane$ ./gen-inventory.sh > hosts
+ctl:~/xos/xos/configurations/cord/dataplane$ ansible-playbook -i hosts dataplane.yaml
+```
+
+To setup the dataplane for containers on bare metal, perform these steps in addition to the above (note: make sure to sudo when running the playbook):
+```
+ctl:~/xos/xos/configurations/cord/dataplane$ ./generate-bm.sh > hosts-bm   
+ctl:~/xos/xos/configurations/cord/dataplane$ sudo ansible-playbook -i hosts-bm dataplane-bm.yaml
+```
+
+Check that the vCPE container has started, by going into the XOS UI, selecting 'Services', 'service_vcpe', 'Administration', 'Vcpe Tenants', and make sure there's a green icon next to the vCPE.
+
+If the vCPE Tenant is still red, then the Instance could be exponentially backed-off due to errors while trying to sync before dataplane.yaml was run. You can reset the exponential backoff by tracking down the vCPE Instance (Slices->mysite_vcpe->Instances, and find the Instance associated with the vCPE Tenant) and hitting the save button.
+
+Now SSH into ONOS running the OLT app (see below) and activate the subscriber:
+```
+onos> add-subscriber-access of:0000000000000001 1 432
+```
+
+At this point the client should be able to get an IP address from the vCPE via
+DHCP.  To set up the IP address and default route on the client:
+```
+client:$ sudo route del default gw 10.11.10.5
+client:$ sudo dhclient br-sub
+```
+Once `dhclient` returns, the client should now be able to surf the Internet
+through the dataplane.
+
+## Setting up /etc/hosts
+
+To make it easy to log into the various VMs that make up the dataplane, add entries for them into `/etc/hosts` on the
+*ctl* node.  As root, run:
+```
+ctl:~/xos/xos/configurations/cord/dataplane$ ./gen-etc-hosts.sh >> /etc/hosts
+```
+For example, to log into the client:
+```
+ctl:~$ ssh ubuntu@client
+```
+
+## How to log into ONOS
+
+ONOS apps are run inside Docker containers hosted in VMs.  All ports exposed by the ONOS container are forwarded to the
+outside, and can be accessed from the *ctl* node over the `flat-lan-1-net` network.  Assuming that `/etc/hosts`
+has been configured as described above, it is possible to SSH to the ONOS running the `virtualbng` app as follows (password is *karaf*):
+
+```
+$ ssh -p 8101 karaf@onos_vbng
+Password authentication
+Password:
+Welcome to Open Network Operating System (ONOS)!
+     ____  _  ______  ____
+    / __ \/ |/ / __ \/ __/
+   / /_/ /    / /_/ /\ \
+   \____/_/|_/\____/___/
+
+
+Hit '<tab>' for a list of available commands
+and '[cmd] --help' for help on a specific command.
+Hit '<ctrl-d>' or type 'system:shutdown' or 'logout' to shutdown ONOS.
+
+onos>
+```
+
+For instance, to check the IP address mappings managed by the `virtualbng` app:
+
+```
+onos> vbngs
+   Private IP - Public IP
+   10.0.1.3 - 10.254.0.129
+```
+
+## Troubleshooting
+
+#### Problem: No external connectivity from vCPE container
+1. Make sure the hosts listed in `virtualbng.json` are the actual compute nodes used in your experiment.
+2. Try rebooting the ONOS container running the `virtualbng` app: `$ ssh ubuntu@onos-vbng "sudo docker restart ONOS"`
diff --git a/cord-deprecated/ceilometer.yaml b/cord-deprecated/ceilometer.yaml
new file mode 100644
index 0000000..464b07b
--- /dev/null
+++ b/cord-deprecated/ceilometer.yaml
@@ -0,0 +1,270 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Setup CORD-related services -- vOLT, vCPE, vBNG.
+
+imports:
+   - custom_types/xos.yaml
+
+node_types:
+    tosca.nodes.SFlowService:
+        derived_from: tosca.nodes.Root
+        description: >
+            XOS SFlow Collection Service
+        capabilities:
+            scalable:
+                type: tosca.capabilities.Scalable
+            service:
+                type: tosca.capabilities.xos.Service
+        properties:
+            kind:
+                type: string
+                default: generic
+                description: Type of service.
+            view_url:
+                type: string
+                required: false
+                description: URL to follow when icon is clicked in the Service Directory.
+            icon_url:
+                type: string
+                required: false
+                description: ICON to display in the Service Directory.
+            enabled:
+                type: boolean
+                default: true
+            published:
+                type: boolean
+                default: true
+                description: If True then display this Service in the Service Directory.
+            public_key:
+                type: string
+                required: false
+                description: Public key to install into Instances to allows Services to SSH into them.
+            private_key_fn:
+                type: string
+                required: false
+                description: Location of private key file
+            versionNumber:
+                type: string
+                required: false
+                description: Version number of Service.
+            sflow_port:
+              type: integer
+              required: false
+              default: 6343
+              description: sFlow listening port
+            sflow_api_port:
+              type: integer
+              required: false
+              default: 33333
+              description: sFlow publish subscribe api listening port
+
+    tosca.nodes.CeilometerService:
+        derived_from: tosca.nodes.Root
+        description: >
+            XOS Ceilometer Service
+        capabilities:
+            scalable:
+                type: tosca.capabilities.Scalable
+            service:
+                type: tosca.capabilities.xos.Service
+        properties:
+            kind:
+                type: string
+                default: generic
+                description: Type of service.
+            view_url:
+                type: string
+                required: false
+                description: URL to follow when icon is clicked in the Service Directory.
+            icon_url:
+                type: string
+                required: false
+                description: ICON to display in the Service Directory.
+            enabled:
+                type: boolean
+                default: true
+            published:
+                type: boolean
+                default: true
+                description: If True then display this Service in the Service Directory.
+            public_key:
+                type: string
+                required: false
+                description: Public key to install into Instances to allows Services to SSH into them.
+            private_key_fn:
+                type: string
+                required: false
+                description: Location of private key file
+            versionNumber:
+                type: string
+                required: false
+                description: Version number of Service.
+            ceilometer_pub_sub_url:
+                type: string
+                required: false
+                description: REST URL of ceilometer PUB/SUB component
+
+    tosca.nodes.CeilometerTenant:
+        derived_from: tosca.nodes.Root
+        description: >
+            CORD: A Tenant of the Ceilometer Service.
+        properties:
+            kind:
+                type: string
+                default: generic
+                description: Kind of tenant
+
+topology_template:
+  node_templates:
+    service_ceilometer:
+      type: tosca.nodes.CeilometerService
+      requirements:
+      properties:
+          view_url: /admin/ceilometer/ceilometerservice/$id$/
+          kind: ceilometer
+          ceilometer_pub_sub_url: http://10.11.10.1:4455/
+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+      artifacts:
+          pubkey: /opt/xos/synchronizers/monitoring_channel/monitoring_channel_public_key
+
+#    service_sflow:
+#      type: tosca.nodes.SFlowService
+#      requirements:
+#      properties:
+#          view_url: /admin/ceilometer/sflowservice/$id$/
+#          kind: sflow
+#          sflow_port: 6343
+#          sflow_api_port: 33333
+
+    Private:
+      type: tosca.nodes.NetworkTemplate
+
+    ceilometer_network:
+      type: tosca.nodes.network.Network.XOS
+      properties:
+          ip_version: 4
+          labels: ceilometer_client_access
+      requirements:
+          - network_template:
+              node: Private
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_ceilometer
+              relationship: tosca.relationships.MemberOfSlice
+          - connection:
+              node: mysite_ceilometer
+              relationship: tosca.relationships.ConnectsToSlice
+
+    mysite:
+      type: tosca.nodes.Site
+
+    trusty-server-multi-nic:
+      type: tosca.nodes.Image
+
+    ceilometer-trusty-server-multi-nic:
+      type: tosca.nodes.Image
+
+    m1.small:
+      type: tosca.nodes.Flavor
+
+    mysite_ceilometer:
+      description: Ceilometer Proxy Slice
+      type: tosca.nodes.Slice
+      requirements:
+          - ceilometer_service:
+              node: service_ceilometer
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+          - default_image:
+                node: ceilometer-trusty-server-multi-nic
+                relationship: tosca.relationships.DefaultImage
+          - default_flavor:
+                node: m1.small
+                relationship: tosca.relationships.DefaultFlavor
+
+#    mysite_sflow:
+#      description: Slice for sFlow service
+#      type: tosca.nodes.Slice
+#      requirements:
+#          - sflow_service:
+#              node: service_sflow
+#              relationship: tosca.relationships.MemberOfService
+#          - site:
+#              node: mysite
+#              relationship: tosca.relationships.MemberOfSite
+
+    my_ceilometer_tenant:
+      description: Ceilometer Service default Tenant
+      type: tosca.nodes.CeilometerTenant
+      requirements:
+          - provider_service:
+              node: service_ceilometer
+              relationship: tosca.relationships.MemberOfService
+       
+    # Virtual machines
+#    sflow_service_instance:
+#      type: tosca.nodes.Compute
+#      capabilities:
+#        # Host container properties
+#        host:
+#         properties:
+#           num_cpus: 1
+#           disk_size: 10 GB
+#           mem_size: 4 MB
+#        # Guest Operating System properties
+#        os:
+#          properties:
+#            # host Operating System image properties
+#            architecture: x86_64
+#            type: linux
+#            distribution: Ubuntu
+#            version: 14.10
+#      requirements:
+#          - slice:
+#                node: mysite_sflow
+#                relationship: tosca.relationships.MemberOfSlice
+
+    Customer Care:
+      type: tosca.nodes.DashboardView
+      properties:
+          url: template:xosDiagnostic
+
+    TruckRoll:
+      type: tosca.nodes.DashboardView
+      properties:
+          url: template:xosTruckroll
+
+    Ceilometer:
+      type: tosca.nodes.DashboardView
+      properties:
+          url: template:xosCeilometerDashboard
+
+
+    Tenant:
+      type: tosca.nodes.DashboardView
+      properties:
+          no-create: true
+          no-update: true
+          no-delete: true
+
+    padmin@vicci.org:
+      type: tosca.nodes.User
+      properties:
+          firstname: XOS
+          lastname: admin
+          is_admin: true
+      requirements:
+          - customer_care_dashboard:
+              node: Customer Care
+              relationship: tosca.relationships.UsesDashboard
+          - truckroll_dashboard:
+              node: TruckRoll
+              relationship: tosca.relationships.UsesDashboard
+          - ceilometer_dashboard:
+              node: Ceilometer
+              relationship: tosca.relationships.UsesDashboard
+          - tenant_dashboard:
+              node: Tenant
+              relationship: tosca.relationships.UsesDashboard
diff --git a/cord-deprecated/cord.yaml b/cord-deprecated/cord.yaml
new file mode 100644
index 0000000..c708d8e
--- /dev/null
+++ b/cord-deprecated/cord.yaml
@@ -0,0 +1,550 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Setup CORD-related services -- vOLT, vCPE, vBNG.
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    # CORD Services
+    service_vtr:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /admin/vtr/vtrservice/$id$/
+          kind: vTR
+
+    service_volt:
+      type: tosca.nodes.Service
+      requirements:
+          - vsg_tenant:
+              node: service_vsg
+              relationship: tosca.relationships.TenantOfService
+          - lan_network:
+              node: lan_network
+              relationship: tosca.relationships.UsesNetwork
+          - wan_network:
+              node: wan_network
+              relationship: tosca.relationships.UsesNetwork
+      properties:
+          view_url: /admin/cord/voltservice/$id$/
+          kind: vOLT
+
+    # set a pool of addresses that we can hand out for the VSG Wan.
+    public_addresses:
+      type: tosca.nodes.AddressPool
+      properties:
+          addresses: 10.123.0.0/24 10.124.0.0/24
+
+    service_vsg:
+      type: tosca.nodes.VSGService
+      requirements:
+          - vbng_tenant:
+              node: service_vbng
+              relationship: tosca.relationships.TenantOfService
+      properties:
+          view_url: /admin/cord/vsgservice/$id$/
+          backend_network_label: hpc_client
+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+          private_key_fn: /opt/xos/synchronizers/vcpe/vcpe_private_key
+#          node_label: label_vsg
+      artifacts:
+          pubkey: /opt/xos/synchronizers/vcpe/vcpe_public_key
+
+    service_vbng:
+      type: tosca.nodes.VBNGService
+      properties:
+          view_url: /admin/cord/vbngservice/$id$/
+# if unspecified, vbng observer will look for an ONOSApp Tenant and
+# generate a URL from its IP address
+#          vbng_url: http://10.11.10.24:8181/onos/virtualbng/
+
+    service_ONOS_vBNG:
+      type: tosca.nodes.ONOSService
+      requirements:
+      properties:
+          kind: onos
+          view_url: /admin/onos/onosservice/$id$/
+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+      artifacts:
+          pubkey: /opt/xos/synchronizers/onos/onos_key.pub
+
+#
+# To actually bring up the vBNG app
+# - Set up the dataplane using the ansible script
+# - Log into the vBNG ONOS and run 'devices' to get switch dpID
+# - Change the dpID values in vBNG ONOS app in XOS GUI
+# - (Synchronizer should copy the files to ONOS container immediately)
+# - Log into service_ONOS_vBNG VM and restart ONOS Docker container
+#   (Should roll this step into a Synchronizer)
+#f
+    vBNG_ONOS_app:
+      type: tosca.nodes.ONOSvBNGApp
+      requirements:
+          - onos_tenant:
+              node: service_ONOS_vBNG
+              relationship: tosca.relationships.TenantOfService
+          - vbng_service:
+              node: service_vbng
+              relationship: tosca.relationships.UsedByService
+      properties:
+          dependencies: org.onosproject.proxyarp, org.onosproject.virtualbng, org.onosproject.openflow, org.onosproject.fwd
+          config_network-cfg.json: >
+            {
+              "ports" : {
+                "of:0000000000000001/1" : {
+                  "interfaces" : [
+                    {
+                      "ips"  : [ "10.0.1.253/24" ],
+                      "mac"  : "00:00:00:00:00:99"
+                    }
+                  ]
+                },
+                "of:0000000000000001/2" : {
+                  "interfaces" : [
+                    {
+                      "ips"  : [ "10.254.0.2/24" ],
+                      "mac"  : "00:00:00:00:00:98"
+                    }
+                  ]
+                }
+              }
+            }
+          config_virtualbng.json: { get_artifact: [ SELF, virtualbng_json, LOCAL_FILE] }
+      artifacts:
+          virtualbng_json: /root/setup/virtualbng.json
+
+    service_ONOS_vOLT:
+      type: tosca.nodes.ONOSService
+      requirements:
+      properties:
+          kind: onos
+          view_url: /admin/onos/onosservice/$id$/
+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+          rest_onos/v1/network/configuration/: >
+            {
+              "devices" : {
+                "of:0000000000000001" : {
+                  "accessDevice" : {
+                    "uplink" : "2",
+                    "vlan"   : "222",
+                    "defaultVlan" : "1"
+                  },
+                  "basic" : {
+                    "driver" : "pmc-olt"
+                  }
+                }
+              }
+            }
+      artifacts:
+          pubkey: /opt/xos/synchronizers/onos/onos_key.pub
+
+
+    vOLT_ONOS_app:
+      type: tosca.nodes.ONOSvOLTApp
+      requirements:
+          - onos_tenant:
+              node: service_ONOS_vOLT
+              relationship: tosca.relationships.TenantOfService
+          - volt_service:
+              node: service_volt
+              relationship: tosca.relationships.UsedByService
+      properties:
+          install_dependencies: onos-ext-notifier-1.0-SNAPSHOT.oar, onos-ext-volt-event-publisher-1.0-SNAPSHOT.oar
+          dependencies: org.onosproject.openflow-base, org.onosproject.olt, org.ciena.onos.ext_notifier, org.ciena.onos.volt_event_publisher
+          component_config: >
+             {
+                "org.ciena.onos.ext_notifier.KafkaNotificationBridge":{
+                   "rabbit.user": "<rabbit_user>",
+                   "rabbit.password": "<rabbit_password>",
+                   "rabbit.host": "<rabbit_host>",
+                   "publish.kafka": "false",
+                   "publish.rabbit": "true",
+                   "volt.events.rabbit.topic": "notifications.info",
+                   "volt.events.rabbit.exchange": "voltlistener",
+                   "volt.events.opaque.info": "{project_id: <keystone_tenant_id>, user_id: <keystone_user_id>}",
+                   "publish.volt.events": "true"
+                }
+             }
+#          config_network-cfg.json: >
+#            {
+#              "devices" : {
+#                "of:0000000000000001" : {
+#                  "accessDevice" : {
+#                    "uplink" : "2",
+#                    "vlan"   : "222",
+#                    "defaultVlan" : "1"
+#                  },
+#                  "basic" : {
+#                    "driver" : "default"
+#                  }
+#                }
+#              }
+#            }
+
+    # Network templates
+    Private:
+      type: tosca.nodes.NetworkTemplate
+
+    Public network hack:
+      type: tosca.nodes.NetworkTemplate
+      properties:
+          visibility: private
+          translation: NAT
+          shared_network_name: tun0-net
+
+
+    # Networks required by the CORD setup
+    lan_network:
+      type: tosca.nodes.network.Network
+      properties:
+          ip_version: 4
+      requirements:
+          - network_template:
+              node: Private
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_vsg
+              relationship: tosca.relationships.MemberOfSlice
+          - connection:
+              node: mysite_vsg
+              relationship: tosca.relationships.ConnectsToSlice
+          - connection:
+              node: mysite_vsg
+              relationship: tosca.relationships.ConnectsToSlice
+
+    wan_network:
+      type: tosca.nodes.network.Network
+      properties:
+          ip_version: 4
+      requirements:
+          - network_template:
+              node: Private
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_vsg
+              relationship: tosca.relationships.MemberOfSlice
+          - connection:
+              node: mysite_vsg
+              relationship: tosca.relationships.ConnectsToSlice
+          - connection:
+              node: mysite_vsg
+              relationship: tosca.relationships.ConnectsToSlice
+
+    Private-Direct:
+      type: tosca.nodes.NetworkTemplate
+      properties:
+          access: direct
+
+    Private-Indirect:
+      type: tosca.nodes.NetworkTemplate
+      properties:
+          access: indirect
+
+    subscriber_network:
+      type: tosca.nodes.network.Network
+      properties:
+          ip_version: 4
+      requirements:
+          - network_template:
+              node: Private
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_volt
+              relationship: tosca.relationships.MemberOfSlice
+          - connection:
+              node: mysite_volt
+              relationship: tosca.relationships.ConnectsToSlice
+          - connection:
+              node: mysite_clients
+              relationship: tosca.relationships.ConnectsToSlice
+
+    public_network:
+      type: tosca.nodes.network.Network
+      properties:
+      requirements:
+          - network_template:
+              node: Public network hack
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_vbng
+              relationship: tosca.relationships.MemberOfSlice
+          - connection:
+              node: mysite_vbng
+              relationship: tosca.relationships.ConnectsToSlice
+
+
+    mysite:
+      type: tosca.nodes.Site
+
+    label_vsg:
+      type: tosca.nodes.NodeLabel
+
+    # CORD Slices
+    mysite_vsg:
+      description: vSG Controller Slice
+      type: tosca.nodes.Slice
+      requirements:
+          - vsg_service:
+              node: service_vsg
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+          - vsg_docker_image:
+              node: docker-vsg
+              relationship: tosca.relationships.UsesImage
+#      properties:
+#          default_isolation: container
+
+    mysite_onos_vbng:
+      description: ONOS Controller Slice for vBNG
+      type: tosca.nodes.Slice
+      requirements:
+          - ONOS:
+              node: service_ONOS_vBNG
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+
+    mysite_onos_volt:
+      description: ONOS Controller Slice for vOLT
+      type: tosca.nodes.Slice
+      requirements:
+          - ONOS:
+              node: service_ONOS_vOLT
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+
+    mysite_vbng:
+      description: slice running OVS controlled by vBNG
+      type: tosca.nodes.Slice
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+
+    mysite_volt:
+      description: OVS controlled by vOLT
+      type: tosca.nodes.Slice
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+
+    mysite_clients:
+      description: slice for clients at the subscriber
+      type: tosca.nodes.Slice
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+
+
+    # Virtual machines
+    onos_app_1:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: Ubuntu
+            version: 14.10
+      requirements:
+          - slice:
+                node: mysite_onos_vbng
+                relationship: tosca.relationships.MemberOfSlice
+
+    onos_app_2:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: Ubuntu
+            version: 14.10
+      requirements:
+          - slice:
+                node: mysite_onos_volt
+                relationship: tosca.relationships.MemberOfSlice
+
+    # VM for running the OVS controlled by vBNG
+    ovs_vbng:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: ubuntu
+            version: 14.04
+      requirements:
+          - slice:
+                node: mysite_vbng
+                relationship: tosca.relationships.MemberOfSlice
+
+    # VM for running the OVS controlled by vOLT
+    ovs_volt:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: ubuntu
+            version: 14.04
+      requirements:
+          - slice:
+                node: mysite_volt
+                relationship: tosca.relationships.MemberOfSlice
+
+    # A subscriber client VM
+    client1:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: ubuntu
+            version: 14.04
+      requirements:
+          - slice:
+                node: mysite_clients
+                relationship: tosca.relationships.MemberOfSlice
+
+    # docker image for vsg containers
+    docker-vsg:
+      # TODO: need to attach this to mydeployment
+      type: tosca.nodes.Image
+      properties:
+        kind: container
+        container_format: na
+        disk_format: na
+        path: andybavier/docker-vcpe
+        tag: develop
+
+    # Let's add a user who can be administrator of the household
+    johndoe@myhouse.com:
+      type: tosca.nodes.User
+      properties:
+          password: letmein
+          firstname: john
+          lastname: doe
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+
+    # A subscriber
+    My House:
+       type: tosca.nodes.CORDSubscriber
+       properties:
+           service_specific_id: 123
+           firewall_enable: false
+           cdn_enable: false
+           url_filter_enable: false
+           url_filter_level: R
+       requirements:
+          - house_admin:
+              node: johndoe@myhouse.com
+              relationship: tosca.relationships.AdminPrivilege
+
+    Mom's PC:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 01:02:03:04:05:06
+           level: PG_13
+       requirements:
+           - household:
+               node: My House
+               relationship: tosca.relationships.SubscriberDevice
+
+    Dad's PC:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 90:E2:BA:82:F9:75
+           level: PG_13
+       requirements:
+           - household:
+               node: My House
+               relationship: tosca.relationships.SubscriberDevice
+
+    Jack's Laptop:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 68:5B:35:9D:91:D5
+           level: PG_13
+       requirements:
+           - household:
+               node: My House
+               relationship: tosca.relationships.SubscriberDevice
+
+    Jill's Laptop:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 34:36:3B:C9:B6:A6
+           level: PG_13
+       requirements:
+           - household:
+               node: My House
+               relationship: tosca.relationships.SubscriberDevice
+
+    My Volt:
+        type: tosca.nodes.VOLTTenant
+        properties:
+            service_specific_id: 123
+            s_tag: 222
+            c_tag: 432
+        requirements:
+            - provider_service:
+                node: service_volt
+                relationship: tosca.relationships.MemberOfService
+            - subscriber:
+                node: My House
+                relationship: tosca.relationships.BelongsToSubscriber
diff --git a/cord-deprecated/dataplane/ansible.cfg b/cord-deprecated/dataplane/ansible.cfg
new file mode 100644
index 0000000..9100590
--- /dev/null
+++ b/cord-deprecated/dataplane/ansible.cfg
@@ -0,0 +1,4 @@
+[defaults]
+remote_user = ubuntu
+private_key_file = ~/.ssh/id_rsa
+host_key_checking = false
\ No newline at end of file
diff --git a/cord-deprecated/dataplane/change_controller.sh b/cord-deprecated/dataplane/change_controller.sh
new file mode 100755
index 0000000..2b961ee
--- /dev/null
+++ b/cord-deprecated/dataplane/change_controller.sh
@@ -0,0 +1,13 @@
+#! /bin/bash
+
+# put IP address of node running ONOS VTN App here
+DESIRED_CONTROLLER="tcp:130.127.133.24:6653"
+
+while [[ 1 ]]; do
+    CONTROLLER=`ovs-vsctl get-controller br-int`
+    if [[ "$CONTROLLER" == "tcp:172.17.0.2:6653" ]]; then
+       ovs-vsctl set-controller br-int $DESIRED_CONTROLLER
+       echo "changed controller from $CONTROLLER to $DESIRED_CONTROLLER"
+    fi
+    sleep 10s
+done
diff --git a/cord-deprecated/dataplane/cleanup.sh b/cord-deprecated/dataplane/cleanup.sh
new file mode 100755
index 0000000..91d821c
--- /dev/null
+++ b/cord-deprecated/dataplane/cleanup.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+function cleanup_network {
+  NETWORK=$1
+  SUBNETS=`neutron net-show $NETWORK | grep -i subnets | awk '{print $4}'`
+  if [[ $SUBNETS != "" ]]; then
+      PORTS=`neutron port-list | grep -i $SUBNETS | awk '{print $2}'`
+      for PORT in $PORTS; do
+          echo "Deleting port $PORT"
+          neutron port-delete $PORT
+      done
+  fi
+  neutron net-delete $NETWORK
+}
+
+source ../../setup/admin-openrc.sh
+
+echo "Deleting VMs"
+# Delete all VMs
+VMS=$( nova list --all-tenants|grep mysite|awk '{print $2}' )
+for VM in $VMS
+do
+    nova delete $VM
+done
+
+echo "Waiting 5 seconds..."
+sleep 5
+
+cleanup_network lan_network
+cleanup_network wan_network
+cleanup_network mysite_vsg-private
+cleanup_network mysite_vsg-access
+cleanup_network management
+
+echo "Deleting networks"
+# Delete all networks beginning with mysite_
+NETS=$( neutron net-list --all-tenants|grep mysite|awk '{print $2}' )
+for NET in $NETS
+do
+    neutron net-delete $NET
+done
+
+neutron net-delete lan_network || true
+neutron net-delete subscriber_network || true
+neutron net-delete public_network || true
+neutron net-delete hpc_client_network || true
+neutron net-delete ceilometer_network || true
+neutron net-delete management || true
+neutron net-delete mysite_vsg-access || true
diff --git a/cord-deprecated/dataplane/dataplane-bm.yaml b/cord-deprecated/dataplane/dataplane-bm.yaml
new file mode 100644
index 0000000..e1e78ee
--- /dev/null
+++ b/cord-deprecated/dataplane/dataplane-bm.yaml
@@ -0,0 +1,36 @@
+---
+- hosts: switch_volt
+  sudo: yes
+  tasks:
+  - name: Create tunnel port on br-lan
+    openvswitch_port:
+      bridge=br-lan
+      port={{ item }}
+      state=present
+    with_items: "grenames.split(' ')"
+
+  - name: Set up GRE tunnel to vCPE
+    shell: ovs-vsctl set Interface {{ item.0 }} type=gre options:remote_ip={{ item.1 }}
+    with_together:
+      - "grenames.split(' ')"
+      - "bm_ips.split(' ')"
+
+- hosts: baremetal
+
+  user: root
+  sudo: no
+  tasks:
+  - name: Create br-lan
+    openvswitch_bridge:
+      bridge=br-lan
+      state=present
+
+  - name: Create tunnel port
+    openvswitch_port:
+      bridge=br-lan
+      port={{ grename }}
+      state=present
+
+  - name: Configure GRE tunnel to vOLT switch
+    shell: ovs-vsctl set Interface {{ grename }} type=gre options:remote_ip={{ volt_addr }}
+
diff --git a/cord-deprecated/dataplane/dataplane-vtn.yaml b/cord-deprecated/dataplane/dataplane-vtn.yaml
new file mode 100644
index 0000000..f6561b5
--- /dev/null
+++ b/cord-deprecated/dataplane/dataplane-vtn.yaml
@@ -0,0 +1,31 @@
+---
+- hosts: baremetal:nm
+
+  user: root
+  sudo: no
+  tasks:
+  - name: stop neutron openvswitch agent
+    service: name=neutron-plugin-openvswitch-agent state=stopped
+
+  - name: Update openvswitch config
+    lineinfile:
+      dest=/usr/share/openvswitch/scripts/ovs-ctl
+      insertafter="set ovsdb-server.*DB_FILE.*"
+      regexp=".*set.*--remote=ptcp.*"
+      line="        set \"$@\" --remote=ptcp:6641"
+      state=present
+
+  - name: Restart openvswitch
+    service: name=openvswitch-switch state=restarted
+
+  - name: Delete br-int
+    shell: ovs-vsctl show | grep -i br-int > /dev/null && ovs-vsctl del-br br-int
+    ignore_errors: yes
+
+- hosts: nm
+
+  user: root
+  sudo: no
+  tasks:
+  - name: stop neutron dhcp agent
+    service: name=neutron-dhcp-agent state=stopped
diff --git a/cord-deprecated/dataplane/dataplane.yaml b/cord-deprecated/dataplane/dataplane.yaml
new file mode 100644
index 0000000..3ca3bbe
--- /dev/null
+++ b/cord-deprecated/dataplane/dataplane.yaml
@@ -0,0 +1,256 @@
+---
+- hosts: switch_vbng
+  sudo: yes
+  vars:
+    controller_ip: "{{ hostvars['onos_vbng']['ansible_ssh_host'] }}"
+    controller_port: 6653
+    ovs_dpid: "0000000000000001"
+  tags:
+  - vbng
+  tasks:
+  - name: Fix /etc/hosts
+    lineinfile:
+      dest=/etc/hosts
+      regexp="127.0.0.1 localhost"
+      line="127.0.0.1 localhost {{ ansible_hostname }}"
+
+  - name: Install packages
+    apt: name={{ item }}
+      state=latest
+      update_cache=yes
+    with_items:
+    - openvswitch-switch
+    - python-netifaces
+
+  - name: Create br-vbng
+    openvswitch_bridge:
+      bridge=br-vbng
+      state=present
+
+  - name: Find wan_network interface
+    script: scripts/if_from_ip.py {{ wan_ip }}
+    register: wan_net
+
+  - name: Find public_network interface
+    script: scripts/if_from_ip.py {{ public_ip }}
+    register: public_net
+
+  - name: Hook up wan-network to br-vbng
+    openvswitch_port:
+      bridge=br-vbng
+      port={{ wan_net.stdout }}
+      state=present
+
+  - name: Hook up public-network to OvS
+    openvswitch_port:
+      bridge=br-vbng
+      port={{ public_net.stdout }}
+      state=present
+
+  - name: Remove IP address on public_network
+    command: /sbin/ifconfig {{ public_net.stdout }} 0.0.0.0
+    when: public_net.stdout
+
+  - name: Change datapath ID of bridge to match config file
+    command: /usr/bin/ovs-vsctl set bridge br-vbng other-config:datapath-id={{ ovs_dpid }}
+
+  - name: Add controller to switch
+    command: /usr/bin/ovs-vsctl set-controller br-vbng tcp:{{ controller_ip }}:{{ controller_port }}
+
+- hosts: switch_volt
+  sudo: yes
+  vars:
+    controller_ip: "{{ hostvars['onos_volt']['ansible_ssh_host'] }}"
+    controller_port: 6653
+    vcpe_lan_ip: "{{ hostvars['vcpe']['lan_ip'] }}"
+  tags:
+  - volt
+  tasks:
+
+  - name: Fix /etc/hosts
+    lineinfile:
+      dest=/etc/hosts
+      regexp="127.0.0.1 localhost"
+      line="127.0.0.1 localhost {{ ansible_hostname }}"
+
+  - name: Install packages
+    apt: name={{ item }} state=present update_cache=yes
+    with_items:
+    - git
+    - python-netifaces
+    - openvswitch-switch
+
+  - name: Checkout the Mininet repo
+    git: repo=https://github.com/mininet/mininet.git
+      dest=/tmp/mininet
+
+  - name: Install the CPqD switch using Mininet install script
+    shell: /tmp/mininet/util/install.sh -3f
+      creates=/usr/local/bin/ofdatapath
+    ignore_errors: true
+
+  - name: Find subscriber_network interface
+    script: scripts/if_from_ip.py {{ subscriber_ip }}
+    register: subscriber_net
+
+  - name: Create bridge br-sub
+    openvswitch_bridge:
+      bridge=br-sub
+      state=present
+
+  - name: Add subscriber_net to br-sub
+    openvswitch_port:
+      bridge=br-sub
+      port={{ subscriber_net.stdout }}
+      state=present
+
+  # The CPqD switch is expecting that packets coming from the client have
+  # VLAN tag 1.  However Neutron's OvS configuration eats VLAN-tagged packets.
+  # So tag them with VLAN 1 here before sending to CPqD.
+  #
+  # Note that the VLAN tag is 0 in the real-world setup, but the CPqD switch
+  # seems to have a problem with these packets.
+
+  # Using OvS to tag packets with VLAN ID 1 is not quite working for some reason.
+  # The packets from the client get tagged OK, but only the first packet from the
+  # VCPE gets its tag stripped off.  Very weird.  That's why we are using veth
+  # devices instead.
+  #- name: Add tag 1 to br-sub port
+  #  shell: ovs-vsctl set port {{ subscriber_net.stdout }} tag=1
+
+  - name: Create a pair of veth devices
+    shell: ifconfig veth0 >> /dev/null || ip link add veth0 type veth peer name veth1
+
+  - name: Create veth0.1
+    shell: ifconfig veth0.1 >> /dev/null || ip link add link veth0 name veth0.1 type vlan id 1
+
+  - name: Bring the interfaces up
+    shell: ip link set {{ item }} up
+    with_items:
+    - veth0
+    - veth1
+    - veth0.1
+
+  - name: Add veth0.1 to br-sub
+    openvswitch_port:
+      bridge=br-sub
+      port=veth0.1
+      state=present
+
+  - name: Create bridge br-lan
+    openvswitch_bridge:
+      bridge=br-lan
+      state=present
+
+  - name: Create tunnel port on br-lan
+    openvswitch_port:
+      bridge=br-lan
+      port=gre0
+      state=present
+
+  - name: Set up GRE tunnel to vCPE
+    shell: ovs-vsctl set Interface gre0 type=gre options:remote_ip={{ vcpe_lan_ip }}
+
+  - name: Check if br-lan has an IPv6 address
+    shell: ip addr show br-lan|grep inet6|awk '{print $2}'
+    register: ipv6
+
+  - name: Remove br-lan IPv6 address if present
+    shell: ifconfig br-lan inet6 del {{ ipv6.stdout }}
+    when: ipv6.stdout != ""
+
+  - name: Check if veth1 has an IPv6 address
+    shell: ip addr show veth1|grep inet6|awk '{print $2}'
+    register: ipv6
+
+  - name: Remove veth1 IPv6 address if present
+    shell: ifconfig veth1 inet6 del {{ ipv6.stdout }}
+    when: ipv6.stdout != ""
+
+  - name: Run the datapath
+    command: /usr/local/bin/ofdatapath -i veth1,br-lan punix:/tmp/s1 -d 000000000001 --no-slicing -D -P
+      creates=/usr/local/var/run/ofdatapath.pid
+
+  - name: Run the control program
+    command: /usr/local/bin/ofprotocol unix:/tmp/s1 tcp:{{ controller_ip }}:{{ controller_port }} --fail=closed --listen=punix:/tmp/s1.listen -D -P
+      creates=/usr/local/var/run/ofprotocol.pid
+
+- hosts: client
+  sudo: yes
+  tags:
+  - client
+  tasks:
+
+  - name: Fix /etc/hosts
+    lineinfile:
+      dest=/etc/hosts
+      regexp="127.0.0.1 localhost"
+      line="127.0.0.1 localhost {{ ansible_hostname }}"
+
+  - name: Install packages
+    apt: name={{ item }}
+      state=latest
+      update_cache=yes
+    with_items:
+    - openvswitch-switch
+    - python-netifaces
+
+  - name: Create br-sub
+    openvswitch_bridge:
+      bridge=br-sub
+      state=present
+
+  - name: Find subscriber_network interface
+    script: scripts/if_from_ip.py {{ subscriber_ip }}
+    register: client_net
+
+  - name: Hook up subscriber-network to OvS
+    openvswitch_port:
+      bridge=br-sub
+      port={{ client_net.stdout }}
+      state=present
+
+  - name: Run some commands on br-sub
+    shell: "{{ item }}"
+    with_items:
+    - ifconfig br-sub 0.0.0.0 mtu 1400 up
+    - ethtool -K br-sub tso off
+    - ethtool -K br-sub tx off
+
+  # Run dhclient on br-sub internal interface to issue DHCP request to vCPE
+
+#
+# Need to set up a tunnel between vCPE and vOLT to keep VLAN-tagged
+# packets from being swallowed by the network.
+#
+- hosts: vcpe
+  sudo: yes
+  vars:
+    volt_lan_ip: "{{ hostvars['switch_volt']['lan_ip'] }}"
+  tags:
+  - vcpe
+  tasks:
+
+  - name: Install packages
+    apt: name={{ item }}
+      state=latest
+      update_cache=yes
+    with_items:
+    - openvswitch-switch
+
+  - name: Create br-lan
+    openvswitch_bridge:
+      bridge=br-lan
+      state=present
+
+  - name: Create tunnel port
+    openvswitch_port:
+      bridge=br-lan
+      port=gre0
+      state=present
+
+  - name: Configure GRE tunnel to vOLT switch
+    shell: ovs-vsctl set Interface gre0 type=gre options:remote_ip={{ volt_lan_ip }}
+
+  - name: Restart vCPEs
+    script: scripts/restart-vcpes.sh
diff --git a/cord-deprecated/dataplane/gen-etc-hosts.sh b/cord-deprecated/dataplane/gen-etc-hosts.sh
new file mode 100755
index 0000000..0d49706
--- /dev/null
+++ b/cord-deprecated/dataplane/gen-etc-hosts.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# set -x
+
+source ../../setup/admin-openrc.sh
+
+get_ip () {
+    LABEL=$1
+    NETWORK=$2
+    nova list --all-tenants|grep $LABEL|sed "s/^.*$NETWORK=//g"|sed 's/; .*$//g'|awk '{print $1}'
+}
+
+cat <<EOF
+$( get_ip mysite_onos_vbng flat-lan-1-net) onos_vbng
+$( get_ip mysite_vbng flat-lan-1-net) switch_vbng
+$( get_ip mysite_onos_volt flat-lan-1-net) onos_volt
+$( get_ip mysite_volt flat-lan-1-net) switch_volt
+$( get_ip mysite_clients flat-lan-1-net) client
+$( get_ip mysite_vsg flat-lan-1-net) vcpe
+EOF
diff --git a/cord-deprecated/dataplane/gen-inventory.sh b/cord-deprecated/dataplane/gen-inventory.sh
new file mode 100755
index 0000000..bacd2dd
--- /dev/null
+++ b/cord-deprecated/dataplane/gen-inventory.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+# set -x
+
+source ../../setup/admin-openrc.sh
+
+get_ip () {
+    LABEL=$1
+    NETWORK=$2
+    nova list --all-tenants|grep $LABEL|sed "s/^.*$NETWORK=//g"|sed 's/; .*$//g'|awk '{print $1}'
+}
+
+cat <<EOF
+onos_vbng    ansible_ssh_host=$( get_ip mysite_onos_vbng flat-lan-1-net)
+switch_vbng  ansible_ssh_host=$( get_ip mysite_vbng flat-lan-1-net) wan_ip=$( get_ip mysite_vbng wan_network) public_ip=$( get_ip mysite_vbng tun0-net )
+
+onos_volt    ansible_ssh_host=$( get_ip mysite_onos_volt flat-lan-1-net)
+switch_volt  ansible_ssh_host=$( get_ip mysite_volt flat-lan-1-net) subscriber_ip=$( get_ip mysite_volt subscriber_network) lan_ip=$( get_ip mysite_volt lan_network)
+
+client       ansible_ssh_host=$( get_ip mysite_clients flat-lan-1-net) subscriber_ip=$( get_ip mysite_clients subscriber_network)
+vcpe         ansible_ssh_host=$( get_ip mysite_vsg flat-lan-1-net) lan_ip=$( get_ip mysite_vsg lan_network)
+EOF
diff --git a/cord-deprecated/dataplane/generate-bm.sh b/cord-deprecated/dataplane/generate-bm.sh
new file mode 100755
index 0000000..f9b8787
--- /dev/null
+++ b/cord-deprecated/dataplane/generate-bm.sh
@@ -0,0 +1,44 @@
+source ../../setup/admin-openrc.sh
+
+get_ip () {
+    LABEL=$1
+    NETWORK=$2
+    nova list --all-tenants|grep $LABEL|sed "s/^.*$NETWORK=//g"|sed 's/; .*$//g'|awk '{print $1}'
+    }
+
+GRENAMES=()
+BM_IPS=()
+
+NODES=`sudo bash -c "source ../../setup/admin-openrc.sh ; nova hypervisor-list" |grep enabled|awk '{print $4}'`
+I=1
+for NODE in $NODES; do
+    BM_SSH_IP=`getent hosts $NODE | awk '{ print $1 }'`
+    IFS=. read BM_NAME BM_REMAINDER <<< $NODE
+    BM_IP=`sudo grep -i $BM_NAME /root/setup/data-hosts.flat-lan-1 | awk '{print $1}'`
+
+    GRE_NAMES+=("gre-bm-$I")
+    BM_IPS+=("$BM_IP")
+
+    #echo switch_volt$I    ansible_ssh_host=$( get_ip mysite_volt flat-lan-1-net) grename=gre-bm-$I bm_addr=$BM_IP
+    echo bm$I           ansible_ssh_host=$BM_SSH_IP grename=gre-bm-$I volt_addr=$( get_ip mysite_volt flat-lan-1-net)  ansible_ssh_private_key_file=/root/.ssh/id_rsa
+    I=$(( I+1 ))
+done
+
+GRE_NAMES=${GRE_NAMES[@]}
+BM_IPS=${BM_IPS[@]}
+
+echo switch_volt ansible_ssh_host=$( get_ip mysite_volt flat-lan-1-net) grenames=\"$GRE_NAMES\" bm_ips=\"$BM_IPS\"
+
+NM=`grep "^nm" /root/setup/fqdn.map | awk '{ print $2 }'`
+echo "nm1 ansible_ssh_host=$NM ansible_ssh_private_key_file=/root/.ssh/id_rsa"
+
+echo "[baremetal]"
+I=1
+for NODE in $NODES; do
+    echo bm$I
+    I=$((I+1))
+done
+
+# now for the network management node
+echo "[nm]"
+echo "nm1"
diff --git a/cord-deprecated/dataplane/scripts/if_from_ip.py b/cord-deprecated/dataplane/scripts/if_from_ip.py
new file mode 100644
index 0000000..28524fe
--- /dev/null
+++ b/cord-deprecated/dataplane/scripts/if_from_ip.py
@@ -0,0 +1,14 @@
+#!/usr/bin/python
+
+import sys
+import netifaces
+
+def main (argv):
+    addr = argv[0]
+    for iface in netifaces.interfaces():
+        addrs = netifaces.ifaddresses(iface)
+        if 2 in addrs and addrs[2][0]['addr'] == addr:
+            sys.stdout.write(iface)
+
+if __name__ == "__main__":
+    main(sys.argv[1:])
diff --git a/cord-deprecated/dataplane/scripts/restart-vcpes.sh b/cord-deprecated/dataplane/scripts/restart-vcpes.sh
new file mode 100644
index 0000000..d1c9fce
--- /dev/null
+++ b/cord-deprecated/dataplane/scripts/restart-vcpes.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+for VCPE in $( docker ps|grep vcpe|awk '{print $NF}' )
+do
+  service $VCPE stop
+  sleep 1
+  service $VCPE start
+done
diff --git a/cord-deprecated/docker-compose.yml b/cord-deprecated/docker-compose.yml
new file mode 100644
index 0000000..28eeeb4
--- /dev/null
+++ b/cord-deprecated/docker-compose.yml
@@ -0,0 +1,106 @@
+xos_db:
+    image: xosproject/xos-postgres
+    expose:
+        - "5432"
+
+xos_synchronizer_openstack:
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/openstack/xos-synchronizer.py"
+    image: xosproject/xos-synchronizer-openstack
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: openstack
+    links:
+        - xos_db
+    extra_hosts:
+        - ctl:${MYIP}
+    volumes:
+        - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config:ro
+        - ./xos_cord_config:/opt/xos/xos_configuration/xos_cord_config:ro
+        - ../setup:/root/setup:ro
+
+xos_synchronizer_onos:
+    image: xosproject/xos-synchronizer-openstack
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/onos/onos-synchronizer.py -C /opt/xos/synchronizers/onos/onos_synchronizer_config"
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: onos
+    links:
+        - xos_db
+    extra_hosts:
+        - ctl:${MYIP}
+    volumes:
+        - ../setup/id_rsa:/opt/xos/synchronizers/onos/onos_key:ro  # private key
+
+xos_synchronizer_vcpe:
+    image: xosproject/xos-synchronizer-openstack
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/vcpe/vcpe-synchronizer.py -C /opt/xos/synchronizers/vcpe/vcpe_synchronizer_config"
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: vcpe
+    links:
+        - xos_db
+    extra_hosts:
+        - ctl:${MYIP}
+    volumes:
+        - ../setup/id_rsa:/opt/xos/synchronizers/vcpe/vcpe_private_key:ro  # private key
+        - ../setup:/root/setup:ro
+
+xos_synchronizer_vbng:
+    image: xosproject/xos-synchronizer-openstack
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/vbng/vbng-synchronizer.py -C /opt/xos/synchronizers/vbng/vbng_synchronizer_config"
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: vbng
+    links:
+        - xos_db
+    extra_hosts:
+        - ctl:${MYIP}
+
+xos_synchronizer_monitoring_channel:
+    image: xosproject/xos-synchronizer-openstack
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/monitoring_channel/monitoring_channel_synchronizer.py -C /opt/xos/synchronizers/monitoring_channel/monitoring_channel_synchronizer_config"
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: monitoring_channel
+    links:
+        - xos_db
+    extra_hosts:
+        - ctl:${MYIP}
+    volumes:
+        - ../setup/id_rsa:/opt/xos/synchronizers/monitoring_channel/monitoring_channel_private_key:ro  # private key
+
+xos_synchronizer_vtr:
+    image: xosproject/xos-synchronizer-openstack
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/vtr/vtr-synchronizer.py -C /opt/xos/synchronizers/vtr/vtr_synchronizer_config"
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: vtr
+    links:
+        - xos_db
+    extra_hosts:
+        - ctl:${MYIP}
+    volumes:
+        - ../setup/id_rsa:/opt/xos/synchronizers/vtr/vcpe_private_key:ro  # private key
+        - ../setup:/root/setup:ro
+
+# FUTURE
+#xos_swarm_synchronizer:
+#    image: xosproject/xos-swarm-synchronizer
+#    labels:
+#        org.xosproject.kind: synchronizer
+#        org.xosproject.target: swarm
+
+xos:
+    image: xosproject/xos
+    command: python /opt/xos/manage.py runserver 0.0.0.0:8000 --insecure --makemigrations
+    ports:
+        - "9999:8000"
+    links:
+        - xos_db
+    volumes:
+      - ../setup:/root/setup:ro
+      - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config:ro
+      - ./xos_cord_config:/opt/xos/xos_configuration/xos_cord_config:ro
+      - ../setup/id_rsa.pub:/opt/xos/synchronizers/onos/onos_key.pub:ro
+      - ../setup/id_rsa.pub:/opt/xos/synchronizers/vcpe/vcpe_public_key:ro
+      - ../setup/id_rsa.pub:/opt/xos/synchronizers/monitoring_channel/monitoring_channel_public_key:ro
diff --git a/cord-deprecated/make-virtualbng-json.sh b/cord-deprecated/make-virtualbng-json.sh
new file mode 100644
index 0000000..993643c
--- /dev/null
+++ b/cord-deprecated/make-virtualbng-json.sh
@@ -0,0 +1,38 @@
+FN=$SETUPDIR/virtualbng.json
+
+rm -f $FN
+
+cat >> $FN <<EOF
+{
+    "localPublicIpPrefixes" : [
+        "10.254.0.128/25"
+    ],
+    "nextHopIpAddress" : "10.254.0.1",
+    "publicFacingMac" : "00:00:00:00:00:66",
+    "xosIpAddress" : "10.11.10.1",
+    "xosRestPort" : "9999",
+    "hosts" : {
+EOF
+
+NODES=$( sudo bash -c "source $SETUPDIR/admin-openrc.sh ; nova hypervisor-list" |grep -v ID|grep -v +|awk '{print $4}' )
+
+NODECOUNT=0
+for NODE in $NODES; do
+    ((NODECOUNT++))
+done
+
+I=0
+for NODE in $NODES; do
+    echo $NODE
+    ((I++))
+    if [[ "$I" -lt "$NODECOUNT" ]]; then
+        echo "      \"$NODE\" : \"of:0000000000000001/1\"," >> $FN
+    else
+        echo "      \"$NODE\" : \"of:0000000000000001/1\"" >> $FN
+    fi
+done
+
+cat >> $FN <<EOF
+    }
+}
+EOF
diff --git a/cord-deprecated/make-vtn-networkconfig-json.sh b/cord-deprecated/make-vtn-networkconfig-json.sh
new file mode 100644
index 0000000..77b855d
--- /dev/null
+++ b/cord-deprecated/make-vtn-networkconfig-json.sh
@@ -0,0 +1,88 @@
+FN=$SETUPDIR/vtn-network-cfg.json
+
+echo "Writing to $FN"
+
+rm -f $FN
+
+cat >> $FN <<EOF
+{
+    "apps" : {
+        "org.onosproject.cordvtn" : {
+            "cordvtn" : {
+                "privateGatewayMac" : "00:00:00:00:00:01",
+                "localManagementIp": "172.27.0.1/24",
+                "ovsdbPort": "6641",
+                "sshPort": "22",
+                "sshUser": "root",
+                "sshKeyFile": "/root/node_key",
+                "publicGateways": [
+                    {
+                        "gatewayIp": "10.123.0.1",
+                        "gatewayMac": "00:8c:fa:5b:09:d8"
+                    }
+                ],
+                "nodes" : [
+EOF
+
+NODES=$( sudo bash -c "source $SETUPDIR/admin-openrc.sh ; nova hypervisor-list" |grep -v ID|grep -v +|awk '{print $4}' )
+
+# XXX disabled - we don't need or want the nm node at this time
+# also configure ONOS to manage the nm node
+# NM=`grep "^nm" /root/setup/fqdn.map | awk '{ print $2 }'`
+# NODES="$NODES $NM"
+
+NODECOUNT=0
+for NODE in $NODES; do
+    ((NODECOUNT++))
+done
+
+I=0
+for NODE in $NODES; do
+    echo $NODE
+    NODEIP=`getent hosts $NODE | awk '{ print $1 }'`
+
+    # This part is cloudlab-specific. It examines the flat-net-1 network and extracts
+    # the eth device and ip address that was assigned to flat-net-1.
+    sudo scp root@$NODE:/root/setup/info.flat-lan-1 $SETUPDIR/flat-lan-$NODE
+    PHYPORT=`bash -c "source $SETUPDIR/flat-lan-$NODE; echo \\\$DATADEV"`
+    LOCALIP=`bash -c "source $SETUPDIR/flat-lan-$NODE; echo \\\$DATAIP"`
+
+    ((I++))
+    cat >> $FN <<EOF
+                    {
+                      "hostname": "$NODE",
+                      "hostManagementIp": "$NODEIP/24",
+                      "bridgeId": "of:000000000000000$I",
+                      "dataPlaneIntf": "$PHYPORT",
+                      "dataPlaneIp": "$LOCALIP/24"
+EOF
+    if [[ "$I" -lt "$NODECOUNT" ]]; then
+        echo "                    }," >> $FN
+    else
+        echo "                    }" >> $FN
+    fi
+done
+
+# get the openstack admin password and username
+source $SETUPDIR/admin-openrc.sh
+
+HOSTNAME=`hostname`
+NEUTRONIP=`getent hosts $HOSTNAME | awk '{ print $1 }'`
+KEYSTONEIP=`getent hosts $HOSTNAME | awk '{ print $1 }'`
+
+cat >> $FN <<EOF
+                ]
+            }
+        },
+        "org.onosproject.openstackinterface" : {
+            "openstackinterface" : {
+                 "do_not_push_flows" : "true",
+                 "neutron_server" : "http://$NEUTRONIP:9696/v2.0/",
+                 "keystone_server" : "http://$KEYSTONEIP:5000/v2.0/",
+                 "user_name" : "$OS_USERNAME",
+                 "password" : "$OS_PASSWORD"
+             }
+        }
+    }
+}
+EOF
diff --git a/cord-deprecated/xos_cord_config b/cord-deprecated/xos_cord_config
new file mode 100644
index 0000000..a5448f7
--- /dev/null
+++ b/cord-deprecated/xos_cord_config
@@ -0,0 +1,6 @@
+[gui]
+branding_name=CORD
+#branding_css=/static/cord.css
+branding_icon=/static/cord-logo.png
+branding_favicon=/static/cord-favicon.png
+branding_bg=/static/cord-bg.jpg
diff --git a/cord-pod/Makefile b/cord-pod/Makefile
new file mode 100644
index 0000000..f006c51
--- /dev/null
+++ b/cord-pod/Makefile
@@ -0,0 +1,173 @@
+include ../common/Makedefs
+
+CONFIG_DIR:=$(shell pwd)
+DOCKER_COMPOSE_YML=./onboarding-docker-compose/docker-compose.yml
+BOOTSTRAP_YML=./docker-compose-bootstrap.yml
+DOCKER_PROJECT=cordpod
+BOOTSTRAP_PROJECT=cordpodbs
+XOS_BOOTSTRAP_PORT=81
+XOS_UI_PORT=80
+ADMIN_USERNAME=padmin@vicci.org
+ADMIN_PASSWORD=letmein
+RUN_TOSCA_BOOTSTRAP=python ../common/run_tosca.py $(XOS_BOOTSTRAP_PORT) $(ADMIN_USERNAME) $(ADMIN_PASSWORD)
+RUN_TOSCA=python ../common/run_tosca.py $(XOS_UI_PORT) $(ADMIN_USERNAME) $(ADMIN_PASSWORD)
+
+.PHONY: xos
+xos: prereqs dirs download_services bootstrap onboarding podconfig
+
+prereqs:
+	sudo make -f ../common/Makefile.prereqs
+
+dirs:
+	# if this directory doesn't exist, then docker-compose will create it with root permission
+	mkdir -p key_import
+	mkdir -p onboarding-docker-compose
+
+bootstrap:
+	echo "[BOOTSTRAP]"
+	sudo rm -f onboarding-docker-compose/docker-compose.yml
+	sudo CONFIG_DIR=$(CONFIG_DIR) docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) up -d
+	bash ../common/wait_for_xos_port.sh 81
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) run xos_bootstrap_ui python /opt/xos/tosca/run.py none - < ../common/fixtures.yaml
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) run xos_bootstrap_ui python /opt/xos/tosca/run.py none - < ../common/mydeployment.yaml
+	$(RUN_TOSCA_BOOTSTRAP) xos.yaml
+
+download_services:
+	make -f ../common/Makefile.services
+
+update_services:
+	make -f ../common/Makefile.services update
+
+onboarding:
+	echo "[ONBOARDING]"
+	# on-board any services here
+	bash ../common/wait_for_onboarding_ready.sh 81 xos
+	$(RUN_TOSCA_BOOTSTRAP) ../common/disable-onboarding.yaml
+	sudo cp id_rsa key_import/vsg_rsa
+	sudo cp id_rsa.pub key_import/vsg_rsa.pub
+	sudo cp id_rsa key_import/volt_rsa
+	sudo cp id_rsa.pub key_import/volt_rsa.pub
+	sudo cp id_rsa key_import/onos_rsa
+	sudo cp id_rsa key_import/onos_rsa.pub
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/vrouter/xos/vrouter-onboard.yaml
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/olt/xos/volt-onboard.yaml
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/vsg/xos/vsg-onboard.yaml
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/vtn/xos/vtn-onboard.yaml
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/onos-service/xos/onos-onboard.yaml
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/fabric/xos/\fabric-onboard.yaml
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/vtr/xos/vtr-onboard.yaml
+	$(RUN_TOSCA_BOOTSTRAP) synchronizers.yaml
+	$(RUN_TOSCA_BOOTSTRAP) ../common/enable-onboarding.yaml
+	bash ../common/wait_for_onboarding_ready.sh 81 services/vrouter
+	bash ../common/wait_for_onboarding_ready.sh 81 services/volt
+	bash ../common/wait_for_onboarding_ready.sh 81 services/vsg
+	bash ../common/wait_for_onboarding_ready.sh 81 services/vtn
+	bash ../common/wait_for_onboarding_ready.sh 81 services/onos
+	bash ../common/wait_for_onboarding_ready.sh 81 services/fabric
+	bash ../common/wait_for_onboarding_ready.sh 81 services/vtr
+	bash ../common/wait_for_onboarding_ready.sh 81 xos
+	bash ../common/wait_for_xos_port.sh 80
+
+podconfig: nodes.yaml images.yaml
+	echo "[PODCONFIG]"
+	$(RUN_TOSCA) setup.yaml
+	$(RUN_TOSCA) nodes.yaml
+	$(RUN_TOSCA) images.yaml
+
+vtn: vtn-external.yaml
+	$(RUN_TOSCA) vtn-external.yaml
+
+fabric: fabric.yaml
+	$(RUN_TOSCA) fabric.yaml
+
+cord: vsg_custom_images
+	$(RUN_TOSCA) mgmt-net.yaml
+	$(RUN_TOSCA) cord-vtn-vsg.yaml
+	$(RUN_TOSCA) cord-volt-devices.yaml
+
+clean-nodes:
+	rm -f nodes.yaml
+
+update-nodes: nodes.yaml
+	$(RUN_TOSCA) nodes.yaml
+
+new-nodes: clean-nodes update-nodes vtn
+
+exampleservice: onboard-exampleservice
+	$(RUN_TOSCA) pod-exampleservice.yaml
+
+onboard-exampleservice:
+	sudo cp id_rsa key_import/exampleservice_rsa
+	sudo cp id_rsa.pub key_import/exampleservice_rsa.pub
+	$(RUN_TOSCA_BOOSTRAP) $(SERVICE_DIR)/exampleservice/exampleservice-onboard.yaml
+	bash ../common/wait_for_onboarding_ready.sh 81 services/exampleservice
+	bash ../common/wait_for_onboarding_ready.sh 81 xos
+	bash ../common/wait_for_xos_port.sh 80
+
+cord-ceilometer: ceilometer_custom_images cord onboard-ceilometer
+	$(RUN_TOSCA) ceilometer.yaml
+
+onboard-ceilometer: download-ceilometer
+	sudo cp id_rsa key_import/monitoring_channel_rsa
+	sudo cp id_rsa.pub key_import/monitoring_channel_rsa.pub
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/monitoring/xos/ceilometer-onboard.yaml
+	bash ../common/wait_for_onboarding_ready.sh 81 services/ceilometer
+	bash ../common/wait_for_onboarding_ready.sh 81 xos
+	bash ../common/wait_for_xos_port.sh 80
+
+download-ceilometer:
+	make -f ../common/Makefile.services monitoring_services
+
+nodes.yaml:
+	export SETUPDIR=.; bash ../common/make-nodes-yaml.sh
+
+images.yaml:
+	export SETUPDIR=.; bash ../common/make-images-yaml.sh
+
+vtn-external.yaml:
+	export SETUPDIR=.; bash ./make-vtn-external-yaml.sh
+
+fabric.yaml:
+	export SETUPDIR=.; bash ./make-fabric-yaml.sh
+
+virtualbng_json:
+	export SETUPDIR=.; bash ./make-virtualbng-json.sh
+
+vtn_network_cfg_json:
+	export SETUPDIR=.; bash ./make-vtn-networkconfig-json.sh
+
+stop:
+	test ! -s $(DOCKER_COMPOSE_YML) || sudo docker-compose -p $(DOCKER_PROJECT) -f $(DOCKER_COMPOSE_YML) stop
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) stop
+
+rm:
+	test ! -s $(DOCKER_COMPOSE_YML) || sudo docker-compose -p $(DOCKER_PROJECT) -f $(DOCKER_COMPOSE_YML) rm
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) rm
+
+showlogs:
+	sudo MYIP=$(MYIP) docker-compose logs
+
+cleanup: stop rm
+	./cleanup.sh
+	bash -c "source ./admin-openrc.sh; nova list --all-tenants; neutron net-list"
+
+ceilometer_custom_images: images/ceilometer-trusty-server-multi-nic.img
+	bash -c "source ./admin-openrc.sh; glance image-show ceilometer-trusty-server-multi-nic || glance image-create --name ceilometer-trusty-server-multi-nic --disk-format qcow2 --file ./images/ceilometer-trusty-server-multi-nic.img --container-format bare"
+
+vsg_custom_images: images/vsg-1.0.img
+	bash -c "source ./admin-openrc.sh; glance image-show vsg-1.0 || glance image-create --name vsg-1.0 --disk-format qcow2 --file ./images/vsg-1.0.img --container-format bare"
+
+images/ceilometer-trusty-server-multi-nic.img: images
+	wget http://www.vicci.org/cord/ceilometer-trusty-server-multi-nic.compressed.qcow2 -P ./images
+	mv ./images/ceilometer-trusty-server-multi-nic.compressed.qcow2 ./images/ceilometer-trusty-server-multi-nic.img
+
+images/vsg-1.0.img: images
+	wget http://www.vicci.org/cord/vsg-1.0.img -P ./images
+
+images:
+	mkdir -p ./images
+
+.PHONY: local_containers
+local_containers:
+	make -f ../common/Makefile.containers update_certs xos_devel synchronizer onboarding_synchronizer
+
diff --git a/cord-pod/NOTES.txt b/cord-pod/NOTES.txt
new file mode 100644
index 0000000..d832f2b
--- /dev/null
+++ b/cord-pod/NOTES.txt
@@ -0,0 +1,37 @@
+Notes on setup
+
+Requirements:
+* admin-openrc.sh: Admin credentials for your OpenStack cloud
+* id_rsa[.pub]: Keypair for use by the various services
+* node_key: Private key that allows root login to the compute nodes
+
+Steps for bringing up the POD:
+
+OpenStack
+* Configure management net
+  - mgmtbr on head nodes
+  - dnsmasq on head1 using cord config file
+* Install OpenStack using the openstack-cluster-install repo
+
+VTN
+* onos-cord VM is created by openstack-cluster-install
+* Bring up ONOS
+  # cd cord; docker-compose up -d
+* On each compute node it's necessary perform a few manual steps (FIX ME)
+  - Disable neutron-plugin-openvswitch-agent. As root:
+    # service neutron-plugin-openvswitch-agent stop
+    # echo manual > /etc/init/neutron-plugin-openvswitch-agent.override
+  - Clean up OVS: delete br-int and any other bridges
+  - Listen for connections from VTN:
+    # ovs-appctl -t ovsdb-server ovsdb-server/add-remote ptcp:6641
+
+XOS
+* xos VM is created by openstack-cluster-install
+  - requirements listed above should already be satisfied by install
+* cd xos/xos/configurations/cord-pod
+* Bring up XOS cord-pod configuration
+  # make
+  # make vtn
+  # make cord
+* Login to XOS at http://xos
+  - padmin@vicci.org / letmein
diff --git a/cord-pod/README-Tutorial.md b/cord-pod/README-Tutorial.md
new file mode 100644
index 0000000..9f8c9e9
--- /dev/null
+++ b/cord-pod/README-Tutorial.md
@@ -0,0 +1,182 @@
+# Setting up the XOS Tutorial
+
+The XOS Tutorial demonstrates how to add a new subscriber-facing
+service to CORD.  
+
+## Prepare the development POD
+
+This tutorial runs on a single-node CORD POD development environment.
+For best results, prepare a clean Ubuntu 14.04
+LTS installation on a server with at least 48GB RAM and 12 CPU cores.
+Update the packages to the latest versions.
+
+To set up the POD, run
+[this script](https://github.com/open-cloud/openstack-cluster-setup/blob/master/scripts/single-node-pod.sh)
+with the `-e` option:
+
+```
+ubuntu@pod:~$ wget https://raw.githubusercontent.com/open-cloud/openstack-cluster-setup/master/scripts/single-node-pod.sh
+ubuntu@pod:~$ bash single-node-pod.sh -e
+```
+
+> NOTE: The above script can also automatically perform all tutoral steps if run as `bash single-node-pod -e -t`.  
+
+Be patient... it will take **at least one hour** to fully set up the single-node POD.
+
+## Include ExampleService in XOS
+
+On the POD, SSH into the XOS VM: `$ ssh ubuntu@xos`.  You will see the XOS repository
+checked out under `~/xos/`
+
+Change the XOS code as described in the
+[ExampleService Tutorial](http://guide.xosproject.org/devguide/exampleservice/)
+under the **Install the Service in Django** heading, and rebuild the XOS containers as
+follows:
+
+```
+ubuntu@xos:~$ cd xos/xos/configurations/cord-pod
+ubuntu@xos:~/xos/xos/configurations/cord-pod$ make local_containers
+```
+
+Modify the `docker-compose.yml` file in the `cord-pod` directory to include the synchronizer
+for ExampleService:
+
+```yaml
+xos_synchronizer_exampleservice:
+    image: xosproject/xos-synchronizer-openstack
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/exampleservice/exampleservice-synchronizer.py -C /root/setup/files/exampleservice_config"
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: exampleservice
+    links:
+        - xos_db
+    volumes:
+        - .:/root/setup:ro
+        - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config:ro
+        - ./id_rsa:/opt/xos/synchronizers/exampleservice/exampleservice_private_key:ro
+```
+
+Also, add ExampleService's public key to the `volumes` section of the `xos` docker container:
+
+```yaml
+xos:
+    ...
+    volumes:
+        ...
+        - ./id_rsa.pub:/opt/xos/synchronizers/exampleservice/exampleservice_public_key:ro 
+```
+
+## Bring up XOS
+
+Run the `make` commands described in the [Bringing up XOS](https://github.com/open-cloud/xos/blob/master/xos/configurations/cord-pod/README.md#bringing-up-xos)
+section of the README.md file.
+
+## Configure ExampleService in XOS
+
+The TOSCA file `pod-exampleservice.yaml` contains the service declaration.
+Tell XOS to process it by running:
+
+```
+ubuntu@xos:~/xos/xos/configurations/cord-pod$ make exampleservice
+```
+
+This will add the ExampleService to XOS.  It will also create an ExampleTenant,
+which causes a VM to be created with Apache running inside.
+
+
+## Set up a Subscriber Device
+
+The single-node POD does not include a virtual OLT, but a device at the
+subscriber’s premises can be simulated by an LXC container running on the
+nova-compute node.
+
+In the nova-compute VM:
+
+```
+ubuntu@nova-compute:~$ sudo apt-get install lxc
+```
+
+Next edit `/etc/lxc/default.conf` and change the default bridge name to `databr`:
+
+```
+  lxc.network.link = databr
+```
+
+Create the client container and attach to it:
+
+```
+ubuntu@nova-compute:~$ sudo lxc-create -t ubuntu -n testclient
+ubuntu@nova-compute:~$ sudo lxc-start -n testclient
+ubuntu@nova-compute:~$ sudo lxc-attach -n testclient
+```
+
+(The lxc-start command may throw an error but it seems to be unimportant.)
+
+Finally, inside the container set up an interface so that outgoing traffic
+is tagged with the s-tag (222) and c-tag (111) configured for the
+sample subscriber:
+
+```
+root@testclient:~# ip link add link eth0 name eth0.222 type vlan id 222
+root@testclient:~# ip link add link eth0.222 name eth0.222.111 type vlan id 111
+root@testclient:~# ifconfig eth0.222 up
+root@testclient:~# ifconfig eth0.222.111 up
+root@testclient:~# dhclient eth0.222.111
+```
+
+If the vSG is up and everything is working correctly, the eth0.222.111
+interface should acquire an IP address via DHCP and have external connectivity.
+
+## Access ExampleService from the Subscriber Device
+
+To test that the subscriber device can access the ExampleService, find the IP
+address of the ExampleService Instance in the XOS GUI, and then curl this
+address from inside the testclient container:
+
+```
+root@testclient:~# sudo apt-get install curl
+root@testclient:~# curl 10.168.1.3
+ExampleService
+ Service Message: "service_message"
+ Tenant Message: "tenant_message"
+```
+
+Hooray!  This shows that the subscriber (1) has external connectivity, and
+(2) can access the new service via the vSG.
+
+## Troubleshooting
+
+Sometimes the ExampleService instance comes up with the wrong default route.  If the 
+ExampleService instance is active but the `curl` command does not work, SSH to the
+instance and check its default gateway.  Assuming the management address of the `mysite_exampleservice`
+VM is 172.27.0.2:
+
+```
+ubuntu@pod:~$ ssh-agent bash
+ubuntu@pod:~$ ssh-add
+ubuntu@pod:~$ ssh -A ubuntu@nova-compute
+ubuntu@nova-compute:~$ ssh ubuntu@172.27.0.2
+ubuntu@mysite-exampleservice-2:~$ route -n
+Kernel IP routing table
+Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
+0.0.0.0         172.27.0.1      0.0.0.0         UG    0      0        0 eth1
+10.168.1.0      0.0.0.0         255.255.255.0   U     0      0        0 eth0
+172.27.0.0      0.0.0.0         255.255.255.0   U     0      0        0 eth1
+```
+
+If the default gateway is not `10.168.1.1`, manually set it to this value.
+
+```
+ubuntu@mysite-exampleservice-2:~$ sudo bash
+root@mysite-exampleservice-2:~# route del default gw 172.27.0.1
+root@mysite-exampleservice-2:~# route add default gw 10.168.1.1
+root@mysite-exampleservice-2:~# route -n
+Kernel IP routing table
+Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
+0.0.0.0         10.168.1.1      0.0.0.0         UG    0      0        0 eth0
+10.168.1.0      0.0.0.0         255.255.255.0   U     0      0        0 eth0
+172.27.0.0      0.0.0.0         255.255.255.0   U     0      0        0 eth1
+```
+
+Now the VM should have Internet connectivity and XOS will start downloading Apache. 
+A short while later the `curl` test should complete.
diff --git a/cord-pod/README.md b/cord-pod/README.md
new file mode 100644
index 0000000..8813d3e
--- /dev/null
+++ b/cord-pod/README.md
@@ -0,0 +1,200 @@
+# XOS Configuration for CORD development POD
+
+## Introduction
+
+This directory holds files that are used to configure a development POD for
+CORD.  For more information on the CORD project, check out
+[the CORD website](http://cord.onosproject.org/).
+
+XOS is composed of several core services:
+
+  * A database backend (postgres)
+  * A webserver front end (django)
+  * A synchronizer daemon that interacts with the openstack backend
+  * A synchronizer for each configured XOS service
+
+Each service runs in a separate Docker container.  The containers are built
+automatically by [Docker Hub](https://hub.docker.com/u/xosproject/) using
+the HEAD of the XOS repository.
+
+## How to bring up CORD
+
+Installing a CORD POD involves these steps:
+ 1. Install OpenStack on a cluster
+ 2. Set up the ONOS VTN app and configuring OVS on the nova-compute nodes to be
+    controlled by VTN
+ 3. Set up external connectivity for VMs (if not using the CORD fabric)
+ 4. Bring up XOS with the CORD services
+
+### Install OpenStack
+
+To set up OpenStack, follow the instructions in the
+[README.md](https://github.com/open-cloud/openstack-cluster-setup/blob/master/README.md)
+file of the [open-cloud/openstack-cluster-setup](https://github.com/open-cloud/openstack-cluster-setup/)
+repository.  If you're just getting started with CORD, it's probably best to begin with the
+single-node CORD test environment to familiarize yourself with the overall setup.
+
+**NOTE: In order to use the cord-pod configuration, you must set up OpenStack using the above recipe.**
+
+### Set up ONOS VTN
+
+The OpenStack installer above creates a VM called *onos-cord* on the head node.
+To bring up ONOS in this VM, log into the head node and run:
+```
+$ ssh ubuntu@onos-cord
+ubuntu@onos-cord:~$ cd cord; sudo docker-compose up -d
+```
+
+### Set up external connectivity for VMs
+
+The CORD fabric is responsible for providing external (Internet) connectivity
+for VMs created on CORD.  If you are running on CloudLab (or another development
+environment) and want external connectivity without the fabric, download [this script](https://raw.githubusercontent.com/open-cloud/openstack-cluster-setup/master/scripts/compute-ext-net.sh)
+ and run it on the Nova compute node(s) as root:
+ ```
+ $ sudo compute-ext-net.sh
+ ```
+
+The script creates a bridge (*databr*) on the node as well as a veth pair
+(*veth0/veth1*).  The *veth0* interface is added as a port on *databr* and
+VTN is configured to use *veth1* as its data plane interface.  Traffic coming
+from *databr* is NAT'ed to the external network via `iptables`.  The configuration
+assumes that *databr* takes the MAC address of *veth0* when it is added as a port
+-- this seems to always be the case (though not sure why).
+
+Note that setting up the full fabric is beyond the scope of this README.
+
+### Bringing up XOS
+
+The OpenStack installer above creates a VM called *xos* on the head node.
+To bring up XOS in this VM, first log into the head node and run:
+```
+$ ssh ubuntu@xos
+ubuntu@xos:~$ cd xos/xos/configurations/cord-pod
+```
+
+Next, check that the following files exist in this directory
+(they will have been put there for you by the cluster installation scripts):
+
+ * *admin-openrc.sh*: Admin credentials for your OpenStack cloud
+ * *id_rsa[.pub]*: A keypair that will be used by the various services
+ * *node_key*: A private key that allows root login to the compute nodes
+
+XOS can then be brought up for CORD by running a few `make` commands.
+First, run:
+
+```
+ubuntu@xos:~/xos/xos/configurations/cord-pod$ make
+```
+
+Before proceeding, you should verify that objects in XOS are
+being sync'ed with OpenStack. [Login to the XOS GUI](#logging-into-xos-on-cloudlab-or-any-remote-host) 
+and select *Users* at left.  Make sure there is a green check next to `padmin@vicci.org`.
+
+> If you are **not** building the single-node development POD, the next
+> step is to create and edit the VTN configuration.  Run `make vtn-external.yaml`
+> then edit the `vtn-external.yml` TOSCA file.  The `rest_hostname:`
+> field points to the host where ONOS should run the VTN app.  The
+> fields in the `service_vtn` and the objects of type `tosca.nodes.Tag`
+> correspond to the VTN fields listed
+> on [the CORD VTN page on the ONOS Wiki](https://wiki.onosproject.org/display/ONOS/CORD+VTN),
+> under the **ONOS Settings** heading; refer there for the fields'
+> meanings.  
+
+Then run:
+
+```
+ubuntu@xos:~/xos/xos/configurations/cord-pod$ make vtn
+```
+The above step configures the ONOS VTN app by generating a configuration
+and pushing it to ONOS.  You are able to see and modify the configuration
+via the GUI as follows:
+
+* To see the generated configuration, go to *http://xos/admin/onos/onosapp/* 
+([caveat](#logging-into-xos-on-cloudlab-or-any-remote-host)), select
+*VTN_ONOS_app*, then the *Attributes* tab, and look for the
+`rest_onos/v1/network/configuration/` attribute.  
+
+* To change the VTN configuration, modify the fields of the VTN Service object
+and the Tag objects associated with Nodes.  Don't forget to select *Save*.
+
+* After modifying the above fields, delete the `rest_onos/v1/network/configuration/` attribute
+in the *ONOS_VTN_app* and select *Save*.  The attribute will be regenerated using the new information.
+
+* Alternatively, if you want to load your own VTN configuration manually, you can delete the
+`autogenerate` attribute from the *ONOS_VTN_app*, edit the configuration in the
+`rest_onos/v1/network/configuration/` attribute, and select *Save*.
+
+Before proceeding, check that the VTN app is controlling Open vSwitch on the compute nodes.  Log
+into ONOS and run the `cordvtn-nodes` command:
+
+```
+$ ssh -p 8101 karaf@onos-cord   # password is karaf
+onos> cordvtn-nodes
+hostname=nova-compute, hostMgmtIp=192.168.122.177/24, dpIp=192.168.199.1/24, br-int=of:0000000000000001, dpIntf=veth1, init=COMPLETE
+Total 1 nodes
+```
+The important part is the `init=COMPLETE` at the end.  If you do not see this, refer to
+[the CORD VTN page on the ONOS Wiki](https://wiki.onosproject.org/display/ONOS/CORD+VTN) for
+help fixing the problem.  This must be working to bring up VMs on the POD.
+
+> If you are **not** building the single-node development POD, modify `cord-vtn-vsg.yml` 
+> and change `addresses_vsg` so that it contains the IP address block,
+> gateway IP, and gateway MAC of the CORD fabric.  
+
+Then run:
+
+```
+ubuntu@xos:~/xos/xos/configurations/cord-pod$ make cord
+```
+
+
+### Inspecting the vSG
+
+The above series of `make` commands will spin up a vSG for a sample subscriber.  The
+vSG is implemented as a Docker container (using the
+[andybavier/docker-vcpe](https://hub.docker.com/r/andybavier/docker-vcpe/) image
+hosted on Docker Hub) running inside an Ubuntu VM.  Once the VM is created, you
+can login as the `ubuntu` user at the management network IP (172.27.0.x) on the compute node
+hosting the VM, using the private key generated on the head node by the install process.
+For example, in the single-node development POD configuration, you can login to the VM
+with management IP 172.27.0.2 using a ProxyCommand as follows:
+
+```
+ubuntu@pod:~$ ssh -o ProxyCommand="ssh -W %h:%p ubuntu@nova-compute" ubuntu@172.27.0.2
+```
+
+Alternatively, you could copy the generated private key to the compute node
+and login from there:
+
+```
+ubuntu@pod:~$ scp ~/.ssh/id_rsa ubuntu@nova-compute:~/.ssh
+ubuntu@pod:~$ ssh ubuntu@nova-compute
+ubuntu@nova-compute:~$ ssh ubuntu@172.27.0.2
+```
+
+Once logged in to the VM, you can run `sudo docker ps` to see the running
+vSG containers:
+
+```
+ubuntu@mysite-vsg-1:~$ sudo docker ps
+CONTAINER ID        IMAGE                    COMMAND             CREATED             STATUS              PORTS               NAMES
+2b0bfb3662c7        andybavier/docker-vcpe   "/sbin/my_init"     5 days ago          Up 5 days                               vcpe-222-111
+```
+
+### Logging into XOS on CloudLab (or any remote host)
+
+The XOS service is accessible on the POD at `http://xos/`, but `xos` maps to a private IP address
+on the management network.  If you install CORD on CloudLab 
+you will not be able to directly access the XOS GUI.
+In order to log into the XOS GUI in the browser on your local machine (desktop or laptop), 
+you can set up an SSH tunnel to your CloudLab node.  Assuming that 
+`<your-cloudlab-node>` is the DNS name of the CloudLab node hosting your experiment,
+run the following on your local machine to create the tunnel:
+
+```
+$ ssh -L 8888:xos:80 <your-cloudlab-node>
+```
+
+Then you should be able to access the XOS GUI by pointing your browser to
+`http://localhost:8888`.  Default username/password is `padmin@vicci.org/letmein`.
diff --git a/cord-pod/admin-openrc.sh b/cord-pod/admin-openrc.sh
new file mode 100644
index 0000000..bfc9eab
--- /dev/null
+++ b/cord-pod/admin-openrc.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+# Replace with the OpenStack admin credentials for your cluster
+export OS_TENANT_NAME=admin
+export OS_USERNAME=admin
+export OS_PASSWORD=admin
+export OS_AUTH_URL=http://localhost:35357/v2.0
+
diff --git a/cord-pod/cdn/README.md b/cord-pod/cdn/README.md
new file mode 100644
index 0000000..be8c184
--- /dev/null
+++ b/cord-pod/cdn/README.md
@@ -0,0 +1,77 @@
+## Set up a new CDN
+
+### CDN on VTN - headnode
+
+1. nova flavor-create --is-public true m1.cdnnode auto 8192 110 4
+2. in XOS create flavor m1.cdnnode and add to deployment
+
+### CDN on VTN - CMI
+
+1. Make sure plenty of glance space on ctl node
+2. Make sure plenty of instance space on compute nodes
+3. Install cmi-0.3.img into XOS images/ directory
+4. Install CentOS-6-cdnnode-0.3.img into XOS images/ directory
+5. Wait for these two images to be loaded into glance (check glance image-list for status)
+6. XOS UI: Add cmi and CentOS images to MyDeployment
+7. Run recipe xos/configurations/cord-pod/pod-cdn.yaml
+       * this will create mysite_cdn slice, cdn-public network, and add management and cdn-public networks to slice
+8. Instantiate CMI instance in mysite_cdn
+       * flavor: m1.large
+       * image: cmi-0.3.img
+9. edit configurations/cord-pod/cdn/cmi-settings.sh
+       * update COMPUTE_NODE and MGMT_IP to match CMI instance
+       * update NODE_KEY to match ssh key for root @ the compute node
+       * do not change VM_KEY; the pubkey is baked into the instance
+10. edit configurations/cord-pod/cdn/cmi.yaml
+       * update gateway_ip and gateway_mac to reflect public internet gateway CMI will use
+11. copy the keygen and allkeys.template to the private/ directory
+12. copy cmi_id_rsa
+13. run setup-cmi.sh
+       * this will SSH into the CMI and run setup, then modify some settings.
+       * it may take a long time, 10-20 minutes or more
+       * takeover script will be saved to takeovers/. Takeover script will be used in the next phase.
+
+### CDN on VTN - cdnnode
+
+1. Instantiate cdnnode instance in mysite_cdn
+       * flavor: m1.cdnnode
+       * CenOS-6-cdnnode-0.3.img
+2. Log into compute node and Attach disk
+       * virsh attach-disk <instance_name> /dev/sdc vdc --cache none
+       * (make sure this disk wasn't used anywhere else!)
+3. log into cdnnode VM
+       * make sure default gateway is good (check public connectivity)
+       * make sure arp table is good
+       * make sure CMI is reachable from cdnnode
+       * run takeover script that was created by the CMI 
+       * (I suggest commenting out the final reboot -f, and make sure the rest of it worked right before rebooting)
+       * Node will take a long time to install
+4. log into cdnnode
+       * to SSH into cdnnode, go into CMI, vserver coplc, cd /etc/planetlab, and use debug_ssh_key.rsa w/ root user
+       * check default gateway
+       * fix arp entry for default gateway
+
+### CDN on VTN - cmi part 2
+
+1. run setup-logicalinterfaces.sh
+
+### CDN on VTN - important notes
+
+We manually edited synchronizers/vcpe/templates/dnsasq_safe_servers.j2 inside the vcpe synchronizer VM:
+
+    # temporary for ONS demo
+    address=/z.cdn.turner.com/207.141.192.134
+    address=/cnn-vh.akamaihd.net/207.141.192.134
+
+### Test Commands
+
+* First, make sure the vSG is the only DNS server available in the test client. 
+* Second, make sure cdn_enable bit is set in CordSubscriber object for your vSG.
+* curl -L -vvvv http://downloads.onosproject.org/vm/onos-tutorial-1.1.0r220-ovf.zip > /dev/null
+* curl -L -vvvv http://onlab.vicci.org/onos-videos/Nov-planning-day1/Day1+00+Bill+-+Community+Growth.mp4 > /dev/null
+* curl -L -vvvv http://downloads.onosproject.org/release/onos-1.2.0.zip > /dev/null
+
+## Restart CDN after power-down
+
+To do...
+test
diff --git a/cord-pod/cdn/cmi-logicalinterfaces.yaml b/cord-pod/cdn/cmi-logicalinterfaces.yaml
new file mode 100644
index 0000000..d45b63a
--- /dev/null
+++ b/cord-pod/cdn/cmi-logicalinterfaces.yaml
@@ -0,0 +1,11 @@
+---
+- hosts: cmi
+  connection: ssh
+  user: root
+  tasks:
+  - name: copy over cmi logical interface template
+    template: src=templates/setup_cmi_logicalinterfaces.sh dest=/vservers/coplc/root/setup_cmi_logicalinterfaces.sh
+
+  - name: run logical interface script
+    command: vserver coplc exec onevsh /root/setup_cmi_logicalinterfaces.sh
+
diff --git a/cord-pod/cdn/cmi-settings.sh b/cord-pod/cdn/cmi-settings.sh
new file mode 100644
index 0000000..db6c5f3
--- /dev/null
+++ b/cord-pod/cdn/cmi-settings.sh
@@ -0,0 +1,12 @@
+# This holds the connection information necessary to talk to your CMI.
+# It will be used by setup-cmi.sh and ssh-cmi.sh
+
+#COMPUTE_NODE=cp-2.smbaker-xos-vtn.xos-pg0.clemson.cloudlab.us
+#MGMT_IP=172.27.0.22
+#NODE_KEY=/root/setup/id_rsa
+#VM_KEY=cmi_id_rsa
+
+COMPUTE_NODE=10.90.0.65
+MGMT_IP=172.27.0.17
+NODE_KEY=cord_pod_node_key
+VM_KEY=cmi_id_rsa
diff --git a/cord-pod/cdn/cmi.yaml b/cord-pod/cdn/cmi.yaml
new file mode 100644
index 0000000..62abe01
--- /dev/null
+++ b/cord-pod/cdn/cmi.yaml
@@ -0,0 +1,69 @@
+---
+- hosts: cmi
+  connection: ssh
+  user: root
+  vars:
+    eth_device: eth0
+    eth_mac: 02:42:CF:8D:C0:82
+    cmi_password: XOScdn123$
+    cmi_hostname: xos-cloudlab-cmi-vtn.opencloud.us
+    cmi_dns: 8.8.8.8
+    cdn_site: CoBlitz Test
+    cdn_short_name: cobtest
+    cdn_name: CoBlitz
+#    gateway_ip: 10.124.0.1
+#    gateway_mac: 00:8c:fa:5b:09:d8
+    gateway_ip: 207.141.192.129
+    gateway_mac: a4:23:05:45:56:79
+    node_hostname: xos-cloudlab-node1-vtn.opencloud.us
+  tasks:
+  - name: fix the networking
+    shell: "{{ item }}"
+    with_items:
+      - ifconfig {{ eth_device }} hw ether {{ eth_mac }}
+      - ip route del default || true
+      - ip route add default via {{ gateway_ip }}
+      - arp -s {{ gateway_ip }} {{ gateway_mac }}
+
+  - name: copy over setup answers
+    template: src=templates/setup_answers.txt dest=/root/setup_answers.txt
+
+  - name: run the setup script
+    shell: /a/sbin/setup.sh < /root/setup_answers.txt
+    args:
+        creates: /a/var/log/setup.log
+
+  - name: fix onevapi CDNPrefix bug
+    shell: sed -i 's/hostname/str/g' /vservers/coplc/usr/share/cob_api/COB/PublicObjects/CDNPrefix.py
+
+  - name: fix onevapi OriginServer bug
+    shell: sed -i 's/attrToCheck = "edge_hosttype"/attrToCheck = "edge_hosttype_broken"/g' /vservers/coplc/usr/share/cob_api/COB/PublicObjects/OriginServer.py
+
+  - name: copy over cmi setup template
+    template: src=templates/setup_cmi_onevsh.sh dest=/vservers/coplc/root/setup_cmi_onevsh.sh
+
+  - name: run cmi setup script
+    command: vserver coplc exec onevsh /root/setup_cmi_onevsh.sh
+
+  - name: copy over cmi node setup template
+    template: src=templates/setup_cmi_node.sh dest=/vservers/coplc/root/setup_cmi_node.sh
+
+  - name: run node setup script
+    command: vserver coplc exec plcsh /root/setup_cmi_node.sh
+    args:
+        creates: /vservers/coplc/root/takeover-{{ node_hostname }}
+
+  - name: retrieve node takeover script
+    fetch: src=/vservers/coplc/root/takeover-{{ node_hostname }} dest=takeovers/takeover-{{ node_hostname }}
+
+  - name: update all keys script
+    copy: src=private/allkeys.template dest=/vservers/coplc/etc/onevantage/services/HPC/templates/usr/local/CoBlitz/var/allkeys.template
+
+  - name: install keygen
+    copy: src=private/keygen dest=/vservers/coplc/etc/onevantage/services/HPC/templates/usr/local/CoBlitz/var/keygen mode=0755
+
+  - name: download socat
+    get_url: url=http://pkgs.repoforge.org/socat/socat-1.7.2.1-1.el6.rf.x86_64.rpm dest=/root/socat-1.7.2.1-1.el6.rf.x86_64.rpm
+ 
+  - name: install socat
+    yum: name=/root/socat-1.7.2.1-1.el6.rf.x86_64.rpm state=present
diff --git a/cord-pod/cdn/cmi_id_rsa.pub b/cord-pod/cdn/cmi_id_rsa.pub
new file mode 100644
index 0000000..4acc08f
--- /dev/null
+++ b/cord-pod/cdn/cmi_id_rsa.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+4THhqOmPNnFa/C/WbR7+BOvjJAZPRmB5d2c81CNHzkyk7OLaibEQ84Hkcaaj+KLQPKcyWhpyCLlXiaOufHQUqI4rKpFceEPpuaGRGvFrs6JRK1m3X+fj2Xw9Odg+SnJ+zHyJqwyh+8QTubFZfPXx0Gti5P6utkgzWqUmh0XuqC0JLVXBGs5M3ViIq7NemlUPcWPCLfsCzDMHMMvSeJfnT7+LB76YXqMNpmwyq9Dwv/MRd/8lV6C4q3ZmlwBBuXl4JxdUha6LtecLO+2Wdjoin+g3otCmgCnVZKAh2H1BLcZkHSy9ILs+vj22m8oB2ufyefP+R6Xsyne+G3kpJqWV smbaker@fc16-64.lan
diff --git a/cord-pod/cdn/private/README b/cord-pod/cdn/private/README
new file mode 100644
index 0000000..e5cfbc1
--- /dev/null
+++ b/cord-pod/cdn/private/README
@@ -0,0 +1 @@
+Stuff in here is private and will not be uploaded to github. 
diff --git a/cord-pod/cdn/setup-cmi-logicalinterfaces.sh b/cord-pod/cdn/setup-cmi-logicalinterfaces.sh
new file mode 100644
index 0000000..b1acd65
--- /dev/null
+++ b/cord-pod/cdn/setup-cmi-logicalinterfaces.sh
@@ -0,0 +1,18 @@
+#! /bin/bash
+
+source cmi-settings.sh
+
+echo "[ssh_connection]" > cmi.conf
+echo "ssh_args = -o \"ProxyCommand ssh -q -i $NODE_KEY -o StrictHostKeyChecking=no root@$COMPUTE_NODE nc $MGMT_IP 22\"" >> cmi.conf
+echo "scp_if_ssh = True" >> cmi.conf
+echo "pipelining = True" >> cmi.conf
+echo >> cmi.conf
+echo "[defaults]" >> cmi.conf
+echo "host_key_checking = False" >> cmi.conf
+
+echo "cmi ansible_ssh_private_key_file=$VM_KEY" > cmi.hosts
+
+export ANSIBLE_CONFIG=cmi.conf
+export ANSIBLE_HOSTS=cmi.hosts
+
+ansible-playbook -v --step cmi-logicalinterfaces.yaml
diff --git a/cord-pod/cdn/setup-cmi.sh b/cord-pod/cdn/setup-cmi.sh
new file mode 100644
index 0000000..8cfe11c
--- /dev/null
+++ b/cord-pod/cdn/setup-cmi.sh
@@ -0,0 +1,20 @@
+#! /bin/bash
+
+source cmi-settings.sh
+
+#apt-get -y install sshpass
+
+echo "[ssh_connection]" > cmi.conf
+echo "ssh_args = -o \"ProxyCommand ssh -q -i $NODE_KEY -o StrictHostKeyChecking=no root@$COMPUTE_NODE nc $MGMT_IP 22\"" >> cmi.conf
+echo "scp_if_ssh = True" >> cmi.conf
+echo "pipelining = True" >> cmi.conf
+echo >> cmi.conf
+echo "[defaults]" >> cmi.conf
+echo "host_key_checking = False" >> cmi.conf
+
+echo "cmi ansible_ssh_private_key_file=$VM_KEY" > cmi.hosts
+
+export ANSIBLE_CONFIG=cmi.conf
+export ANSIBLE_HOSTS=cmi.hosts
+
+ansible-playbook -v cmi.yaml
diff --git a/cord-pod/cdn/ssh-cmi.sh b/cord-pod/cdn/ssh-cmi.sh
new file mode 100644
index 0000000..15a0408
--- /dev/null
+++ b/cord-pod/cdn/ssh-cmi.sh
@@ -0,0 +1,5 @@
+#! /bin/bash
+
+source ./cmi-settings.sh
+
+ssh -i $VM_KEY -o "ProxyCommand ssh -q -i $NODE_KEY -o StrictHostKeyChecking=no root@$COMPUTE_NODE nc $MGMT_IP 22" root@cmi
diff --git a/cord-pod/cdn/templates/setup_answers.txt b/cord-pod/cdn/templates/setup_answers.txt
new file mode 100644
index 0000000..1c20be9
--- /dev/null
+++ b/cord-pod/cdn/templates/setup_answers.txt
@@ -0,0 +1,18 @@
+y
+{{ cmi_password }}
+{{ cmi_password }}
+n
+{{ eth_device }}
+y
+{{ cmi_hostname }}
+{{ eth_device }}
+
+
+{{ cdn_site }}
+{{ cdn_short_name }}
+{{ cmi_dns }}
+
+{{ cdn_name }}
+{{ cmi_password }}
+{{ cmi_password }}
+y
diff --git a/cord-pod/cdn/templates/setup_cmi_logicalinterfaces.sh b/cord-pod/cdn/templates/setup_cmi_logicalinterfaces.sh
new file mode 100644
index 0000000..2ac8422
--- /dev/null
+++ b/cord-pod/cdn/templates/setup_cmi_logicalinterfaces.sh
@@ -0,0 +1,14 @@
+lab="External"
+for service in ["HyperCache", "RequestRouter"]:
+    for node in ListAll("Node"):
+        node_id = node["node_id"]
+        for interface_id in node["interface_ids"]:
+            iface=Read("Interface", interface_id)
+            if iface["is_primary"] and len(iface["ip_address_ids"])==1:
+                ip_id = iface["ip_address_ids"][0]
+                if ListAll("LogicalInterface", {"node_id": node_id, "ip_address_ids": [ip_id], "label": lab, "service": service}):
+                    print "External label exists for node", node_id, "ip", ip_id, "service", service
+                else:
+                    print "Adding external label for node", node_id, "ip", ip_id, "service", service
+                    li = Create("LogicalInterface", {"node_id": node_id, "label": lab, "service": service})
+	            Bind("LogicalInterface", li, "IpAddress", ip_id)
diff --git a/cord-pod/cdn/templates/setup_cmi_node.sh b/cord-pod/cdn/templates/setup_cmi_node.sh
new file mode 100644
index 0000000..93435a3
--- /dev/null
+++ b/cord-pod/cdn/templates/setup_cmi_node.sh
@@ -0,0 +1,20 @@
+site_id=GetSites()[0]["site_id"]
+nodeinfo = {'hostname': "{{ node_hostname }}", 'dns': "8.8.8.8"}
+n_id = AddNode(site_id, nodeinfo)
+mac = "DE:AD:BE:EF:00:01"
+interfacetemplate = {'mac': mac, 'kind': 'physical', 'method': 'static', 'is_primary': True, 'if_name': 'eth0'}
+i_id = AddInterface(n_id, interfacetemplate)
+ip_addr = "169.254.169.1" # TO DO: get this from Neutron in the future
+netmask = "255.255.255.254" # TO DO: get this from Neutron in the future
+ipinfo = {'ip_addr': ip_addr, 'netmask': netmask, 'type': 'ipv4'}
+ip_id = AddIpAddress(i_id, ipinfo)
+routeinfo = {'interface_id': i_id, 'next_hop': "127.0.0.127", 'subnet': '0.0.0.0', 'metric': 1}
+r_id = AddRoute(n_id, routeinfo)
+hpc_slice_id = GetSlices({"name": "co_coblitz"})[0]["slice_id"]
+AddSliceToNodes(hpc_slice_id, [n_id])
+dnsdemux_slice_id = GetSlices({"name": "co_dnsdemux"})[0]["slice_id"]
+dnsredir_slice_id = GetSlices({"name": "co_dnsredir_coblitz"})[0]["slice_id"]
+AddSliceToNodes(dnsdemux_slice_id, [n_id])
+AddSliceToNodes(dnsredir_slice_id, [n_id])
+takeoverscript=GetBootMedium(n_id, "node-cloudinit", '')
+file("/root/takeover-{{ node_hostname }}","w").write(takeoverscript)
diff --git a/cord-pod/cdn/templates/setup_cmi_onevsh.sh b/cord-pod/cdn/templates/setup_cmi_onevsh.sh
new file mode 100644
index 0000000..c517780
--- /dev/null
+++ b/cord-pod/cdn/templates/setup_cmi_onevsh.sh
@@ -0,0 +1,19 @@
+def CreateOrFind(kind, args):
+    objs=ListAll(kind, args.copy())
+    if objs:
+        id_name = {"ServiceProvider": "service_provider_id",
+                   "ContentProvider": "content_provider_id",
+                   "OriginServer": "origin_server_id",
+                   "CDNPrefix": "cdn_prefix_id"}
+        print kind, "exists with args", args
+        return objs[0].get(id_name[kind])
+    else:
+	print "create", kind, "with args", args
+        return Create(kind, args)
+sp=CreateOrFind("ServiceProvider", {"account": "cord", "name": "cord", "enabled": True})
+cp=CreateOrFind("ContentProvider", {"account": "test", "name": "test", "enabled": True, "service_provider_id": sp})
+ors=CreateOrFind("OriginServer", {"url": "http://www.cs.arizona.edu", "content_provider_id": cp, "service_type": "HyperCache"})
+pre=CreateOrFind("CDNPrefix", {"service": "HyperCache", "enabled": True, "content_provider_id": cp, "cdn_prefix": "test.vicci.org", "default_origin_server": "http://www.cs.arizona.edu"})
+cp=CreateOrFind("ContentProvider", {"account": "onlab", "name": "onlab", "enabled": True, "service_provider_id": sp})
+ors=CreateOrFind("OriginServer", {"url": "http://onlab.vicci.org", "content_provider_id": cp, "service_type": "HyperCache"})
+pre=CreateOrFind("CDNPrefix", {"service": "HyperCache", "enabled": True, "content_provider_id": cp, "cdn_prefix": "onlab.vicci.org", "default_origin_server": "http://onlab.vicci.org"})
diff --git a/cord-pod/ceilometer.yaml b/cord-pod/ceilometer.yaml
new file mode 100644
index 0000000..07b163e
--- /dev/null
+++ b/cord-pod/ceilometer.yaml
@@ -0,0 +1,263 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Setup CORD-related services -- vOLT, vCPE, vBNG.
+
+imports:
+   - custom_types/xos.yaml
+
+node_types:
+    tosca.nodes.SFlowService:
+        derived_from: tosca.nodes.Root
+        description: >
+            XOS SFlow Collection Service
+        capabilities:
+            scalable:
+                type: tosca.capabilities.Scalable
+            service:
+                type: tosca.capabilities.xos.Service
+        properties:
+            kind:
+                type: string
+                default: generic
+                description: Type of service.
+            view_url:
+                type: string
+                required: false
+                description: URL to follow when icon is clicked in the Service Directory.
+            icon_url:
+                type: string
+                required: false
+                description: ICON to display in the Service Directory.
+            enabled:
+                type: boolean
+                default: true
+            published:
+                type: boolean
+                default: true
+                description: If True then display this Service in the Service Directory.
+            public_key:
+                type: string
+                required: false
+                description: Public key to install into Instances to allows Services to SSH into them.
+            private_key_fn:
+                type: string
+                required: false
+                description: Location of private key file
+            versionNumber:
+                type: string
+                required: false
+                description: Version number of Service.
+            sflow_port:
+              type: integer
+              required: false
+              default: 6343
+              description: sFlow listening port
+            sflow_api_port:
+              type: integer
+              required: false
+              default: 33333
+              description: sFlow publish subscribe api listening port
+
+    tosca.nodes.CeilometerService:
+        derived_from: tosca.nodes.Root
+        description: >
+            XOS Ceilometer Service
+        capabilities:
+            scalable:
+                type: tosca.capabilities.Scalable
+            service:
+                type: tosca.capabilities.xos.Service
+        properties:
+            kind:
+                type: string
+                default: generic
+                description: Type of service.
+            view_url:
+                type: string
+                required: false
+                description: URL to follow when icon is clicked in the Service Directory.
+            icon_url:
+                type: string
+                required: false
+                description: ICON to display in the Service Directory.
+            enabled:
+                type: boolean
+                default: true
+            published:
+                type: boolean
+                default: true
+                description: If True then display this Service in the Service Directory.
+            public_key:
+                type: string
+                required: false
+                description: Public key to install into Instances to allows Services to SSH into them.
+            private_key_fn:
+                type: string
+                required: false
+                description: Location of private key file
+            versionNumber:
+                type: string
+                required: false
+                description: Version number of Service.
+            ceilometer_pub_sub_url:
+                type: string
+                required: false
+                description: REST URL of ceilometer PUB/SUB component
+
+    tosca.nodes.CeilometerTenant:
+        derived_from: tosca.nodes.Root
+        description: >
+            CORD: A Tenant of the Ceilometer Service.
+        properties:
+            kind:
+                type: string
+                default: generic
+                description: Kind of tenant
+
+topology_template:
+  node_templates:
+    service_ceilometer:
+      type: tosca.nodes.CeilometerService
+      requirements:
+      properties:
+          view_url: /admin/ceilometer/ceilometerservice/$id$/
+          kind: ceilometer
+          ceilometer_pub_sub_url: http://10.11.10.1:4455/
+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+          private_key_fn: /opt/xos/synchronizers/monitoring_channel/monitoring_channel_private_key
+      artifacts:
+          pubkey: /opt/xos/synchronizers/monitoring_channel/monitoring_channel_public_key
+
+#    service_sflow:
+#      type: tosca.nodes.SFlowService
+#      requirements:
+#      properties:
+#          view_url: /admin/ceilometer/sflowservice/$id$/
+#          kind: sflow
+#          sflow_port: 6343
+#          sflow_api_port: 33333
+
+    Private:
+      type: tosca.nodes.NetworkTemplate
+
+    management:
+      type: tosca.nodes.network.Network.XOS
+      properties:
+          no-create: true
+          no-delete: true
+          no-update: true
+
+#    ceilometer_network:
+#      type: tosca.nodes.network.Network.XOS
+#      properties:
+#          ip_version: 4
+#          labels: ceilometer_client_access
+#      requirements:
+#          - network_template:
+#              node: Private
+#              relationship: tosca.relationships.UsesNetworkTemplate
+#          - owner:
+#              node: mysite_ceilometer
+#              relationship: tosca.relationships.MemberOfSlice
+#          - connection:
+#              node: mysite_ceilometer
+#              relationship: tosca.relationships.ConnectsToSlice
+
+    mysite:
+      type: tosca.nodes.Site
+
+    trusty-server-multi-nic:
+      type: tosca.nodes.Image
+
+    ceilometer-trusty-server-multi-nic:
+      type: tosca.nodes.Image
+
+    m1.small:
+      type: tosca.nodes.Flavor
+
+    mysite_ceilometer:
+      description: Ceilometer Proxy Slice
+      type: tosca.nodes.Slice
+      requirements:
+          - ceilometer_service:
+              node: service_ceilometer
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+          - default_image:
+                node: ceilometer-trusty-server-multi-nic
+                relationship: tosca.relationships.DefaultImage
+          - management:
+              node: management
+              relationship: tosca.relationships.ConnectsToNetwork
+          - m1.small:
+              node: m1.small
+              relationship: tosca.relationships.DefaultFlavor
+
+#    mysite_sflow:
+#      description: Slice for sFlow service
+#      type: tosca.nodes.Slice
+#      requirements:
+#          - sflow_service:
+#              node: service_sflow
+#              relationship: tosca.relationships.MemberOfService
+#          - site:
+#              node: mysite
+#              relationship: tosca.relationships.MemberOfSite
+
+    my_ceilometer_tenant:
+      description: Ceilometer Service default Tenant
+      type: tosca.nodes.CeilometerTenant
+      requirements:
+          - provider_service:
+              node: service_ceilometer
+              relationship: tosca.relationships.MemberOfService
+       
+    # Virtual machines
+#    sflow_service_instance:
+#      type: tosca.nodes.Compute
+#      capabilities:
+#        # Host container properties
+#        host:
+#         properties:
+#           num_cpus: 1
+#           disk_size: 10 GB
+#           mem_size: 4 MB
+#        # Guest Operating System properties
+#        os:
+#          properties:
+#            # host Operating System image properties
+#            architecture: x86_64
+#            type: linux
+#            distribution: Ubuntu
+#            version: 14.10
+#      requirements:
+#          - slice:
+#                node: mysite_sflow
+#                relationship: tosca.relationships.MemberOfSlice
+
+    Ceilometer:
+      type: tosca.nodes.DashboardView
+      properties:
+          url: template:xosCeilometerDashboard
+    Tenant:
+      type: tosca.nodes.DashboardView
+      properties:
+          no-create: true
+          no-update: true
+          no-delete: true
+
+    padmin@vicci.org:
+      type: tosca.nodes.User
+      properties:
+          firstname: XOS
+          lastname: admin
+          is_admin: true
+      requirements:
+          - tenant_dashboard:
+              node: Tenant
+              relationship: tosca.relationships.UsesDashboard
+          - ceilometer_dashboard:
+              node: Ceilometer
+              relationship: tosca.relationships.UsesDashboard
diff --git a/cord-pod/cleanup.sh b/cord-pod/cleanup.sh
new file mode 100755
index 0000000..704cacb
--- /dev/null
+++ b/cord-pod/cleanup.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+function cleanup_network {
+  NETWORK=$1
+  SUBNETS=`neutron net-show $NETWORK | grep -i subnets | awk '{print $4}'`
+  if [[ $SUBNETS != "" ]]; then
+      PORTS=`neutron port-list | grep -i $SUBNETS | awk '{print $2}'`
+      for PORT in $PORTS; do
+          echo "Deleting port $PORT"
+          neutron port-delete $PORT
+      done
+  fi
+  neutron net-delete $NETWORK
+}
+
+source ./admin-openrc.sh
+
+echo "Deleting VMs"
+# Delete all VMs
+VMS=$( nova list --all-tenants|grep mysite|awk '{print $2}' )
+for VM in $VMS
+do
+    nova delete $VM
+done
+
+echo "Waiting 5 seconds..."
+sleep 5
+
+cleanup_network lan_network
+cleanup_network wan_network
+cleanup_network mysite_vcpe-private
+cleanup_network mysite_vsg-access
+cleanup_network management
+
+echo "Deleting networks"
+# Delete all networks beginning with mysite_
+NETS=$( neutron net-list --all-tenants|grep mysite|awk '{print $2}' )
+for NET in $NETS
+do
+    neutron net-delete $NET
+done
+
+neutron net-delete lan_network || true
+neutron net-delete subscriber_network || true
+neutron net-delete public_network || true
+neutron net-delete hpc_client_network || true
+neutron net-delete ceilometer_network || true
+neutron net-delete management || true
+neutron net-delete mysite_vsg-access || true
+neutron net-delete exampleservice-public || true
diff --git a/cord-pod/cord-volt-devices.yaml b/cord-pod/cord-volt-devices.yaml
new file mode 100644
index 0000000..8b41623
--- /dev/null
+++ b/cord-pod/cord-volt-devices.yaml
@@ -0,0 +1,47 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Just enough Tosca to get the vSG slice running on the CORD POD
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    service#volt:
+      type: tosca.nodes.VOLTService
+      properties:
+          no-create: True
+          no-delete: True
+          no-update: True
+
+    voltdev-1:
+      type: tosca.nodes.VOLTDevice
+      properties:
+            driver: pmc-olt
+            openflow_id: of:1000000000000001
+            access_devices: >
+              2 222,
+              3 223,
+              4 224
+      requirements:
+          - volt_service:
+              node: service#volt
+              relationship: tosca.relationships.MemberOfService
+          - access_agent:
+              node: agent-1
+              relationship: tosca.relationships.UsesAgent
+
+    agent-1:
+      type: tosca.nodes.AccessAgent
+      properties:
+          mac: AA:BB:CC:DD:EE:FF
+          port_mappings: >
+            of:0000000000000002/2 DE:AD:BE:EF:BA:11,
+            of:0000000000000002/3 BE:EF:DE:AD:BE:EF
+      requirements:
+          - volt_service:
+              node: service#volt
+              relationship: tosca.relationships.MemberOfService
+
+
+
diff --git a/cord-pod/cord-vtn-vsg.yaml b/cord-pod/cord-vtn-vsg.yaml
new file mode 100644
index 0000000..4f8b9ec
--- /dev/null
+++ b/cord-pod/cord-vtn-vsg.yaml
@@ -0,0 +1,257 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Just enough Tosca to get the vSG slice running on the CORD POD
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    # CORD Services
+    service#vtr:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /admin/vtr/vtrservice/$id$/
+          kind: vTR
+          replaces: service_vtr
+
+    service#volt:
+      type: tosca.nodes.VOLTService
+      requirements:
+          - vsg_tenant:
+              node: service#vsg
+              relationship: tosca.relationships.TenantOfService
+      properties:
+          view_url: /admin/volt/voltservice/$id$/
+          kind: vOLT
+          replaces: service_volt
+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+          private_key_fn: /opt/xos/services/volt/keys/volt_rsa
+      artifacts:
+          pubkey: /opt/xos/services/volt/keys/volt_rsa.pub
+
+    addresses_vsg:
+      type: tosca.nodes.AddressPool
+      properties:
+          addresses: 10.168.0.0/24
+          gateway_ip: 10.168.0.1
+          gateway_mac: 02:42:0a:a8:00:01
+
+    addresses_exampleservice-public:
+      type: tosca.nodes.AddressPool
+      properties:
+          addresses: 10.168.1.0/24
+          gateway_ip: 10.168.1.1
+          gateway_mac: 02:42:0a:a8:00:01
+
+    service#vsg:
+      type: tosca.nodes.VSGService
+      requirements:
+          - vrouter_tenant:
+              node: service#vrouter
+              relationship: tosca.relationships.TenantOfService
+      properties:
+          view_url: /admin/vsg/vsgservice/$id$/
+          backend_network_label: hpc_client
+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+          private_key_fn: /opt/xos/services/vsg/keys/vsg_rsa
+#          node_label: label_vsg
+          replaces: service_vsg
+      artifacts:
+          pubkey: /opt/xos/services/vsg/keys/vsg_rsa.pub
+
+    service#vrouter:
+      type: tosca.nodes.VRouterService
+      properties:
+          view_url: /admin/vrouter/vrouterservice/$id$/
+          replaces: service_vrouter
+      requirements:
+          - addresses_vsg:
+              node: addresses_vsg
+              relationship: tosca.relationships.ProvidesAddresses
+          - addresses_service1:
+              node: addresses_exampleservice-public
+              relationship: tosca.relationships.ProvidesAddresses
+
+
+    service#ONOS_CORD:
+      type: tosca.nodes.ONOSService
+      properties:
+          no-delete: true
+          no-create: true
+          no-update: true
+
+    service#ONOS_Fabric:
+      type: tosca.nodes.ONOSService
+      properties:
+          no-delete: true
+          no-create: true
+          no-update: true
+
+    vOLT_ONOS_app:
+      type: tosca.nodes.ONOSvOLTApp
+      requirements:
+          - onos_tenant:
+              node: service#ONOS_CORD
+              relationship: tosca.relationships.TenantOfService
+          - volt_service:
+              node: service#volt
+              relationship: tosca.relationships.UsedByService
+      properties:
+          install_dependencies: onos-ext-notifier-1.0-SNAPSHOT.oar, onos-ext-volt-event-publisher-1.0-SNAPSHOT.oar
+          dependencies: org.onosproject.openflow-base, org.onosproject.olt, org.ciena.onos.ext_notifier, org.ciena.onos.volt_event_publisher
+          autogenerate: volt-network-cfg
+
+    vRouter_ONOS_app:
+      type: tosca.nodes.ONOSvRouterApp
+      requirements:
+          - onos_tenant:
+              node: service#ONOS_Fabric
+              relationship: tosca.relationships.TenantOfService
+          - vrouter_service:
+              node: service#vrouter
+              relationship: tosca.relationships.UsedByService
+      properties:
+          dependencies: org.onosproject.vrouter
+          autogenerate: vrouter-network-cfg
+
+    Private:
+      type: tosca.nodes.NetworkTemplate
+
+    management:
+      type: tosca.nodes.network.Network.XOS
+      properties:
+          no-create: true
+          no-delete: true
+          no-update: true
+
+    image#vsg-1.0:
+      type: tosca.nodes.Image
+
+    mysite:
+      type: tosca.nodes.Site
+
+    label_vsg:
+      type: tosca.nodes.NodeLabel
+
+    # Networks required by the CORD setup
+    mysite_vsg-access:
+      type: tosca.nodes.network.Network
+      properties:
+          ip_version: 4
+      requirements:
+          - network_template:
+              node: Private
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_vsg
+              relationship: tosca.relationships.MemberOfSlice
+          - connection:
+              node: mysite_vsg
+              relationship: tosca.relationships.ConnectsToSlice
+
+    # CORD Slices
+    mysite_vsg:
+      description: vSG Controller Slice
+      type: tosca.nodes.Slice
+      properties:
+          network: noauto
+      requirements:
+          - vsg_service:
+              node: service#vsg
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+          - management:
+              node: management
+              relationship: tosca.relationships.ConnectsToNetwork
+          - image:
+              node: image#vsg-1.0
+              relationship: tosca.relationships.DefaultImage
+
+    # Let's add a user who can be administrator of the household
+    johndoe@myhouse.com:
+      type: tosca.nodes.User
+      properties:
+          password: letmein
+          firstname: john
+          lastname: doe
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+          - dependency:
+                node: mysite_vsg
+                relationship: tosca.relationships.DependsOn
+
+    # A subscriber
+    My House:
+       type: tosca.nodes.CORDSubscriber
+       properties:
+           service_specific_id: 123
+           firewall_enable: false
+           cdn_enable: false
+           url_filter_enable: false
+           url_filter_level: R
+       requirements:
+          - house_admin:
+              node: johndoe@myhouse.com
+              relationship: tosca.relationships.AdminPrivilege
+
+    Mom's PC:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 01:02:03:04:05:06
+           level: PG_13
+       requirements:
+           - household:
+               node: My House
+               relationship: tosca.relationships.SubscriberDevice
+
+    Dad's PC:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 90:E2:BA:82:F9:75
+           level: PG_13
+       requirements:
+           - household:
+               node: My House
+               relationship: tosca.relationships.SubscriberDevice
+
+    Jack's Laptop:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 68:5B:35:9D:91:D5
+           level: PG_13
+       requirements:
+           - household:
+               node: My House
+               relationship: tosca.relationships.SubscriberDevice
+
+    Jill's Laptop:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 34:36:3B:C9:B6:A6
+           level: PG_13
+       requirements:
+           - household:
+               node: My House
+               relationship: tosca.relationships.SubscriberDevice
+
+    My Volt:
+        type: tosca.nodes.VOLTTenant
+        properties:
+            service_specific_id: 123
+            s_tag: 222
+            c_tag: 111
+        requirements:
+            - provider_service:
+                node: service#volt
+                relationship: tosca.relationships.MemberOfService
+            - subscriber:
+                node: My House
+                relationship: tosca.relationships.BelongsToSubscriber
+            - dependency:
+                node: mysite_vsg
+                relationship: tosca.relationships.DependsOn
diff --git a/cord-pod/docker-compose-bootstrap.yml b/cord-pod/docker-compose-bootstrap.yml
new file mode 100644
index 0000000..78a014e
--- /dev/null
+++ b/cord-pod/docker-compose-bootstrap.yml
@@ -0,0 +1,61 @@
+xos_db:
+    image: xosproject/xos-postgres
+    expose:
+        - "5432"
+
+xos_synchronizer_onboarding:
+    image: xosproject/xos-synchronizer-onboarding
+    command: bash -c "cd /opt/xos/synchronizers/onboarding; ./run.sh"
+    #command: sleep 86400
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: onboarding
+    links:
+        - xos_db
+    volumes:
+        - /var/run/docker.sock:/var/run/docker.sock
+        - ./key_import:/opt/xos/key_import:ro
+        - ./onboarding-docker-compose:/opt/xos/synchronizers/onboarding/docker-compose
+        - ../../xos_services:/opt/xos_services
+    log_driver: "json-file"
+    log_opt:
+            max-size: "100k"
+            max-file: "5"
+
+xos_synchronizer_openstack:
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/openstack/xos-synchronizer.py"
+    image: xosproject/xos-synchronizer-openstack
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: openstack
+    links:
+        - xos_db
+    volumes:
+        - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config:ro
+        - ./xos_cord_config:/opt/xos/xos_configuration/xos_cord_config:ro
+        - .:/root/setup:ro
+        - ../vtn/files/xos_vtn_config:/opt/xos/xos_configuration/xos_vtn_config:ro
+        - ./images:/opt/xos/images:ro
+    log_driver: "json-file"
+    log_opt:
+            max-size: "100k"
+            max-file: "5"
+
+xos_bootstrap_ui:
+    command: python /opt/xos/manage.py runserver 0.0.0.0:81 --insecure --makemigrations
+    environment:
+        - CONFIG_DIR
+    image: xosproject/xos
+    links:
+        - xos_db
+    ports:
+        - "81:81"
+    volumes:
+        - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config:ro
+        - ./xos_cord_config:/opt/xos/xos_configuration/xos_cord_config:ro
+        - ../vtn/files/xos_vtn_config:/opt/xos/xos_configuration/xos_vtn_config:ro
+        - ../../xos_services:/opt/xos_services
+    log_driver: "json-file"
+    log_opt:
+            max-size: "100k"
+            max-file: "5"
diff --git a/cord-pod/files/exampleservice_config b/cord-pod/files/exampleservice_config
new file mode 100644
index 0000000..823e31d
--- /dev/null
+++ b/cord-pod/files/exampleservice_config
@@ -0,0 +1,29 @@
+# Required by XOS
+[db]
+name=xos
+user=postgres
+password=password
+host=localhost
+port=5432
+
+# Required by XOS
+[api]
+nova_enabled=True
+
+# Sets options for the synchronizer
+[observer]
+name=exampleservice
+dependency_graph=/opt/xos/synchronizers/exampleservice/model-deps
+steps_dir=/opt/xos/synchronizers/exampleservice/steps
+sys_dir=/opt/xos/synchronizers/exampleservice/sys
+logfile=/var/log/xos_backend.log
+pretend=False
+backoff_disabled=True
+save_ansible_output=True
+proxy_ssh=True
+proxy_ssh_key=/root/setup/node_key
+proxy_ssh_user=root
+
+[networking]
+use_vtn=True
+
diff --git a/cord-pod/files/monitoring_channel_synchronizer_config b/cord-pod/files/monitoring_channel_synchronizer_config
new file mode 100644
index 0000000..fb3f22a
--- /dev/null
+++ b/cord-pod/files/monitoring_channel_synchronizer_config
@@ -0,0 +1,43 @@
+
+[plc]
+name=plc
+deployment=VICCI
+
+[db]
+name=xos
+user=postgres
+password=password
+host=localhost
+port=5432
+
+[api]
+host=128.112.171.237
+port=8000
+ssl_key=None
+ssl_cert=None
+ca_ssl_cert=None
+ratelimit_enabled=0
+omf_enabled=0
+mail_support_address=support@localhost
+nova_enabled=True
+
+[observer]
+name=monitoring_channel
+dependency_graph=/opt/xos/synchronizers/monitoring_channel/model-deps
+steps_dir=/opt/xos/synchronizers/monitoring_channel/steps
+sys_dir=/opt/xos/synchronizers/monitoring_channel/sys
+deleters_dir=/opt/xos/synchronizers/monitoring_channel/deleters
+log_file=console
+driver=None
+pretend=False
+backoff_disabled=True
+save_ansible_output=True
+full_setup=True
+# For CORD_POD config, set proxy_ssh to True even on cloudlab
+proxy_ssh=True
+proxy_ssh_key=/root/setup/node_key
+proxy_ssh_user=root
+
+[feefie]
+client_id='vicci_dev_central'
+user_id='pl'
diff --git a/cord-pod/files/vcpe_synchronizer_config b/cord-pod/files/vcpe_synchronizer_config
new file mode 100644
index 0000000..9da6ede
--- /dev/null
+++ b/cord-pod/files/vcpe_synchronizer_config
@@ -0,0 +1,47 @@
+
+[plc]
+name=plc
+deployment=VICCI
+
+[db]
+name=xos
+user=postgres
+password=password
+host=localhost
+port=5432
+
+[api]
+host=128.112.171.237
+port=8000
+ssl_key=None
+ssl_cert=None
+ca_ssl_cert=None
+ratelimit_enabled=0
+omf_enabled=0
+mail_support_address=support@localhost
+nova_enabled=True
+
+[observer]
+name=vcpe
+dependency_graph=/opt/xos/synchronizers/vsg/model-deps
+steps_dir=/opt/xos/synchronizers/vsg/steps
+sys_dir=/opt/xos/synchronizers/vsg/sys
+deleters_dir=/opt/xos/synchronizers/vsg/deleters
+log_file=console
+#/var/log/hpc.log
+driver=None
+pretend=False
+backoff_disabled=True
+save_ansible_output=True
+# set proxy_ssh to false on cloudlab
+full_setup=True
+proxy_ssh=True
+proxy_ssh_key=/root/setup/node_key
+proxy_ssh_user=root
+
+[networking]
+use_vtn=True
+
+[feefie]
+client_id='vicci_dev_central'
+user_id='pl'
diff --git a/cord-pod/files/vtr_synchronizer_config b/cord-pod/files/vtr_synchronizer_config
new file mode 100644
index 0000000..223ab00
--- /dev/null
+++ b/cord-pod/files/vtr_synchronizer_config
@@ -0,0 +1,47 @@
+
+[plc]
+name=plc
+deployment=VICCI
+
+[db]
+name=xos
+user=postgres
+password=password
+host=localhost
+port=5432
+
+[api]
+host=128.112.171.237
+port=8000
+ssl_key=None
+ssl_cert=None
+ca_ssl_cert=None
+ratelimit_enabled=0
+omf_enabled=0
+mail_support_address=support@localhost
+nova_enabled=True
+
+[observer]
+name=vtr
+dependency_graph=/opt/xos/synchronizers/vtr/model-deps
+steps_dir=/opt/xos/synchronizers/vtr/steps
+sys_dir=/opt/xos/synchronizers/vtr/sys
+deleters_dir=/opt/xos/synchronizers/vtr/deleters
+log_file=console
+#/var/log/hpc.log
+driver=None
+pretend=False
+backoff_disabled=True
+save_ansible_output=True
+# set proxy_ssh to false on cloudlab
+full_setup=True
+proxy_ssh=True
+proxy_ssh_key=/root/setup/node_key
+proxy_ssh_user=root
+
+[networking]
+use_vtn=True
+
+[feefie]
+client_id='vicci_dev_central'
+user_id='pl'
diff --git a/cord-pod/files/xos_vtn_config b/cord-pod/files/xos_vtn_config
new file mode 100644
index 0000000..5dfd459
--- /dev/null
+++ b/cord-pod/files/xos_vtn_config
@@ -0,0 +1,2 @@
+[networking]
+use_vtn=True
diff --git a/cord-pod/images/.gitignore b/cord-pod/images/.gitignore
new file mode 100644
index 0000000..6949d1f
--- /dev/null
+++ b/cord-pod/images/.gitignore
@@ -0,0 +1,3 @@
+*.img
+*.qcow2
+*.qcow
diff --git a/cord-pod/images/README.md b/cord-pod/images/README.md
new file mode 100644
index 0000000..aca55a9
--- /dev/null
+++ b/cord-pod/images/README.md
@@ -0,0 +1,5 @@
+# VM images for XOS
+
+Any Cloud image files placed in this directory (with suffix .img) will be automatically
+imported by XOS and added to Glance (OpenStack's image repository).  For instance, the image
+`trusty-server-multi-nic.img` will be imported with name `trusty-server-multi-nic`.
diff --git a/cord-pod/make-fabric-yaml.sh b/cord-pod/make-fabric-yaml.sh
new file mode 100644
index 0000000..a829690
--- /dev/null
+++ b/cord-pod/make-fabric-yaml.sh
@@ -0,0 +1,71 @@
+FN=$SETUPDIR/fabric.yaml
+
+rm -f $FN
+
+cat >> $FN <<EOF
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+imports:
+   - custom_types/xos.yaml
+
+description: generate fabric configuration
+
+topology_template:
+  node_templates:
+
+    service#ONOS_Fabric:
+      type: tosca.nodes.ONOSService
+      requirements:
+      properties:
+          kind: onos
+          view_url: /admin/onos/onosservice/\$id$/
+          no_container: true
+          rest_hostname: onos-fabric
+          replaces: service_ONOS_Fabric
+
+    service#fabric:
+      type: tosca.nodes.FabricService
+      properties:
+          view_url: /admin/fabric/fabricservice/\$id\$/
+          replaces: service_fabric
+
+
+EOF
+
+NODES=$( bash -c "source $SETUPDIR/admin-openrc.sh ; nova host-list" |grep compute|awk '{print $2}' )
+I=0
+for NODE in $NODES; do
+    echo $NODE
+    cat >> $FN <<EOF
+    $NODE:
+      type: tosca.nodes.Node
+
+    # Fabric location field for node $NODE
+    ${NODE}_location_tag:
+      type: tosca.nodes.Tag
+      properties:
+          name: location
+          value: of:0000000000000001/1
+      requirements:
+          - target:
+              node: $NODE
+              relationship: tosca.relationships.TagsObject
+          - service:
+              node: service#ONOS_Fabric
+              relationship: tosca.relationships.MemberOfService
+EOF
+done
+
+cat >> $FN <<EOF
+    Fabric_ONOS_app:
+      type: tosca.nodes.ONOSApp
+      requirements:
+          - onos_tenant:
+              node: service#ONOS_Fabric
+              relationship: tosca.relationships.TenantOfService
+          - fabric_service:
+              node: service#fabric
+              relationship: tosca.relationships.UsedByService
+      properties:
+          dependencies: org.onosproject.lldpprovider, org.onosproject.hostprovider, org.onosproject.openflow-base, org.onosproject.openflow, org.onosproject.drivers, org.onosproject.segmentrouting
+EOF
diff --git a/cord-pod/make-virtualbng-json.sh b/cord-pod/make-virtualbng-json.sh
new file mode 100644
index 0000000..993643c
--- /dev/null
+++ b/cord-pod/make-virtualbng-json.sh
@@ -0,0 +1,38 @@
+FN=$SETUPDIR/virtualbng.json
+
+rm -f $FN
+
+cat >> $FN <<EOF
+{
+    "localPublicIpPrefixes" : [
+        "10.254.0.128/25"
+    ],
+    "nextHopIpAddress" : "10.254.0.1",
+    "publicFacingMac" : "00:00:00:00:00:66",
+    "xosIpAddress" : "10.11.10.1",
+    "xosRestPort" : "9999",
+    "hosts" : {
+EOF
+
+NODES=$( sudo bash -c "source $SETUPDIR/admin-openrc.sh ; nova hypervisor-list" |grep -v ID|grep -v +|awk '{print $4}' )
+
+NODECOUNT=0
+for NODE in $NODES; do
+    ((NODECOUNT++))
+done
+
+I=0
+for NODE in $NODES; do
+    echo $NODE
+    ((I++))
+    if [[ "$I" -lt "$NODECOUNT" ]]; then
+        echo "      \"$NODE\" : \"of:0000000000000001/1\"," >> $FN
+    else
+        echo "      \"$NODE\" : \"of:0000000000000001/1\"" >> $FN
+    fi
+done
+
+cat >> $FN <<EOF
+    }
+}
+EOF
diff --git a/cord-pod/make-vtn-external-yaml.sh b/cord-pod/make-vtn-external-yaml.sh
new file mode 100644
index 0000000..71437d5
--- /dev/null
+++ b/cord-pod/make-vtn-external-yaml.sh
@@ -0,0 +1,110 @@
+FN=$SETUPDIR/vtn-external.yaml
+
+rm -f $FN
+
+cat >> $FN <<EOF
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+imports:
+   - custom_types/xos.yaml
+
+description: autogenerated node tags file for VTN configuration
+
+topology_template:
+  node_templates:
+
+    service#ONOS_CORD:
+      type: tosca.nodes.ONOSService
+      requirements:
+      properties:
+          kind: onos
+          view_url: /admin/onos/onosservice/\$id$/
+          no_container: true
+          rest_hostname: onos-cord
+          replaces: service_ONOS_CORD
+
+    service#vtn:
+      type: tosca.nodes.VTNService
+      properties:
+          view_url: /admin/vtn/vtnservice/\$id$/
+          privateGatewayMac: 00:00:00:00:00:01
+          localManagementIp: 172.27.0.1/24
+          ovsdbPort: 6641
+          sshUser: root
+          sshKeyFile: /root/node_key
+          sshPort: 22
+          xosEndpoint: http://xos/
+          xosUser: padmin@vicci.org
+          xosPassword: letmein
+          replaces: service_vtn
+
+EOF
+
+NODES=$( bash -c "source $SETUPDIR/admin-openrc.sh ; nova host-list" |grep compute|awk '{print $2}' )
+I=0
+for NODE in $NODES; do
+    echo $NODE
+    cat >> $FN <<EOF
+    $NODE:
+      type: tosca.nodes.Node
+
+    # VTN bridgeId field for node $NODE
+    ${NODE}_bridgeId_tag:
+      type: tosca.nodes.Tag
+      properties:
+          name: bridgeId
+          value: of:0000000000000001
+      requirements:
+          - target:
+              node: $NODE
+              relationship: tosca.relationships.TagsObject
+          - service:
+              node: service#ONOS_CORD
+              relationship: tosca.relationships.MemberOfService
+
+    # VTN dataPlaneIntf field for node $NODE
+    ${NODE}_dataPlaneIntf_tag:
+      type: tosca.nodes.Tag
+      properties:
+          name: dataPlaneIntf
+          value: fabric
+      requirements:
+          - target:
+              node: $NODE
+              relationship: tosca.relationships.TagsObject
+          - service:
+              node: service#ONOS_CORD
+              relationship: tosca.relationships.MemberOfService
+
+    # VTN dataPlaneIp field for node $NODE
+    ${NODE}_dataPlaneIp_tag:
+      type: tosca.nodes.Tag
+      properties:
+          name: dataPlaneIp
+          value: 10.168.0.253/24
+      requirements:
+          - target:
+              node: $NODE
+              relationship: tosca.relationships.TagsObject
+          - service:
+              node: service#ONOS_CORD
+              relationship: tosca.relationships.MemberOfService
+
+EOF
+done
+
+cat >> $FN <<EOF
+    VTN_ONOS_app:
+      type: tosca.nodes.ONOSVTNApp
+      requirements:
+          - onos_tenant:
+              node: service#ONOS_CORD
+              relationship: tosca.relationships.TenantOfService
+          - vtn_service:
+              node: service#vtn
+              relationship: tosca.relationships.UsedByService
+      properties:
+          install_dependencies: http://new-host:8080/repository/org/opencord/cord-config/1.0-SNAPSHOT/cord-config-1.0-SNAPSHOT.oar,http://new-host:8080/repository/org/opencord/vtn/1.0-SNAPSHOT/vtn-1.0-SNAPSHOT.oar
+          dependencies: org.onosproject.drivers, org.onosproject.drivers.ovsdb, org.onosproject.openflow-base, org.onosproject.ovsdb-base, org.onosproject.dhcp
+          autogenerate: vtn-network-cfg
+EOF
diff --git a/cord-pod/mgmt-net.yaml b/cord-pod/mgmt-net.yaml
new file mode 100644
index 0000000..2bd0173
--- /dev/null
+++ b/cord-pod/mgmt-net.yaml
@@ -0,0 +1,40 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Set up management network for CORD POD
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+
+    management_template:
+      type: tosca.nodes.NetworkTemplate
+      properties:
+          visibility: private
+          translation: none
+
+    management:
+      type: tosca.nodes.network.Network
+      properties:
+          ip_version: 4
+          cidr: 172.27.0.0/24
+      requirements:
+          - network_template:
+              node: management_template
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_management
+              relationship: tosca.relationships.MemberOfSlice
+
+    mysite:
+      type: tosca.nodes.Site
+
+    mysite_management:
+      description: This slice exists solely to own the management network
+      type: tosca.nodes.Slice
+      properties:
+          network: noauto
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
diff --git a/cord-pod/pod-cdn.yaml b/cord-pod/pod-cdn.yaml
new file mode 100644
index 0000000..2229686
--- /dev/null
+++ b/cord-pod/pod-cdn.yaml
@@ -0,0 +1,52 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Setup the CDN on the pod
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+
+    Private:
+      type: tosca.nodes.NetworkTemplate
+
+    management:
+      type: tosca.nodes.network.Network.XOS
+      properties:
+          no-create: true
+          no-delete: true
+          no-update: true
+
+    cdn-public:
+      type: tosca.nodes.network.Network
+      properties:
+          ip_version: 4
+          cidr: 207.141.192.128/28
+      requirements:
+          - network_template:
+              node: Private
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_cdn
+              relationship: tosca.relationships.MemberOfSlice
+          - connection:
+              node: mysite_cdn
+              relationship: tosca.relationships.ConnectsToSlice
+
+    mysite:
+      type: tosca.nodes.Site
+
+    mysite_cdn:
+      description: This slice holds the CDN
+      type: tosca.nodes.Slice
+      properties:
+          network: noauto
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+          - management:
+              node: management
+              relationship: tosca.relationships.ConnectsToNetwork
+
diff --git a/cord-pod/pod-exampleservice.yaml b/cord-pod/pod-exampleservice.yaml
new file mode 100644
index 0000000..0182a59
--- /dev/null
+++ b/cord-pod/pod-exampleservice.yaml
@@ -0,0 +1,94 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Setup the ExampleService on the pod
+
+imports:
+   - custom_types/xos.yaml
+   - custom_types/exampleservice.yaml
+
+topology_template:
+  node_templates:
+
+    Private:
+      type: tosca.nodes.NetworkTemplate
+
+    management:
+      type: tosca.nodes.network.Network.XOS
+      properties:
+          no-create: true
+          no-delete: true
+          no-update: true
+
+    service#vrouter:
+      type: tosca.nodes.Service
+      properties:
+          no-create: true
+          no-delete: true
+          no-update: true
+
+    exampleservice-public:
+      type: tosca.nodes.network.Network
+      properties:
+          ip_version: 4
+      requirements:
+          - network_template:
+              node: Private
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_exampleservice
+              relationship: tosca.relationships.MemberOfSlice
+          - connection:
+              node: mysite_exampleservice
+              relationship: tosca.relationships.ConnectsToSlice
+          - vrouter_tenant:
+              node: service#vrouter
+              relationship: tosca.relationships.TenantOfService
+
+    mysite:
+      type: tosca.nodes.Site
+
+    trusty-server-multi-nic:
+      type: tosca.nodes.Image
+
+    mysite_exampleservice:
+      description: This slice holds the ExampleService
+      type: tosca.nodes.Slice
+      properties:
+          network: noauto
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+          - management:
+              node: management
+              relationship: tosca.relationships.ConnectsToNetwork
+          - exmapleserver:
+              node: service#exampleservice
+              relationship: tosca.relationships.MemberOfService
+          - image:
+              node: trusty-server-multi-nic
+              relationship: tosca.relationships.DefaultImage
+
+    service#exampleservice:
+      type: tosca.nodes.ExampleService
+      requirements:
+          - management:
+              node: management
+              relationship: tosca.relationships.UsesNetwork
+      properties:
+          view_url: /admin/exampleservice/exampleservice/$id$/
+          kind: exampleservice
+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+          private_key_fn: /opt/xos/services/exampleservice/keys/exampleservice_rsa
+          service_message: hello
+      artifacts:
+          pubkey: /opt/xos/services/exampleservice/keys/exampleservice_rsa.pub
+
+    tenant#exampletenant1:
+        type: tosca.nodes.ExampleTenant
+        properties:
+            tenant_message: world
+        requirements:
+          - tenant:
+              node: service#exampleservice
+              relationship: tosca.relationships.TenantOfService
diff --git a/cord-pod/setup.yaml b/cord-pod/setup.yaml
new file mode 100644
index 0000000..c13f0eb
--- /dev/null
+++ b/cord-pod/setup.yaml
@@ -0,0 +1,61 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >
+    * Adds OpenCloud Sites, Deployments, and Controllers.
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+
+    MyDeployment:
+      type: tosca.nodes.Deployment
+      properties:
+          flavors: m1.large, m1.medium, m1.small
+
+    MyOpenStack:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: MyDeployment
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Kilo
+          auth_url: { get_script_env: [ SELF, adminrc, OS_AUTH_URL, LOCAL_FILE] }
+          admin_user: { get_script_env: [ SELF, adminrc, OS_USERNAME, LOCAL_FILE] }
+          admin_password: { get_script_env: [ SELF, adminrc, OS_PASSWORD, LOCAL_FILE] }
+          admin_tenant: { get_script_env: [ SELF, adminrc, OS_TENANT_NAME, LOCAL_FILE] }
+          domain: Default
+      artifacts:
+          adminrc: /root/setup/admin-openrc.sh
+
+    mysite:
+      type: tosca.nodes.Site
+      properties:
+          display_name: MySite
+          site_url: http://xosproject.org/
+      requirements:
+          - deployment:
+               node: MyDeployment
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: MyOpenStack
+                       relationship: tosca.relationships.UsesController
+
+    # This user already exists in XOS with this password
+    # It's an example of how to create new users
+    padmin@vicci.org:
+      type: tosca.nodes.User
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+      properties:
+          is_admin: true
+          is_active: true
+          firstname: XOS
+          lastname: admin
+          password: letmein
diff --git a/cord-pod/synchronizers.yaml b/cord-pod/synchronizers.yaml
new file mode 100644
index 0000000..02035e3
--- /dev/null
+++ b/cord-pod/synchronizers.yaml
@@ -0,0 +1,19 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: This recipe provides additional configuration for the onboarded services.
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    servicecontroller#vsg:
+      type: tosca.nodes.ServiceController
+      properties:
+        no-create: true
+        synchronizer_config: /root/setup/files/vcpe_synchronizer_config
+    servicecontroller#vtr:
+      type: tosca.nodes.ServiceController
+      properties:
+        no-create: true
+        synchronizer_config: /root/setup/files/vtr_synchronizer_config
diff --git a/cord-pod/xos.yaml b/cord-pod/xos.yaml
new file mode 100644
index 0000000..33374c0
--- /dev/null
+++ b/cord-pod/xos.yaml
@@ -0,0 +1,86 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Onboard the exampleservice
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    xos:
+      type: tosca.nodes.XOS
+      properties:
+        ui_port: 80
+        bootstrap_ui_port: 81
+        docker_project_name: cordpod
+        db_container_name: cordpodbs_xos_db_1
+
+    /opt/xos/xos_configuration/xos_common_config:
+      type: tosca.nodes.XOSVolume
+      properties:
+          host_path: { path_join: [ SELF, CONFIG_DIR, ../common/xos_common_config, ENV_VAR ] }
+          read_only: true
+      requirements:
+          - xos:
+             node: xos
+             relationship: tosca.relationships.UsedByXOS
+
+    /opt/xos/xos_configuration/xos_cord_config:
+      type: tosca.nodes.XOSVolume
+      properties:
+          host_path: { path_join: [ SELF, CONFIG_DIR, xos_cord_config, ENV_VAR ] }
+          read_only: true
+      requirements:
+          - xos:
+             node: xos
+             relationship: tosca.relationships.UsedByXOS
+
+    /opt/xos/xos_configuration/xos_vtn_config:
+      type: tosca.nodes.XOSVolume
+      properties:
+          host_path: { path_join: [ SELF, CONFIG_DIR, files/xos_vtn_config, ENV_VAR ] }
+          read_only: true
+      requirements:
+          - xos:
+              node: xos
+              relationship: tosca.relationships.UsedByXOS
+
+    /root/setup:
+      type: tosca.nodes.XOSVolume
+      properties:
+          host_path: { path_join: [ SELF, CONFIG_DIR, ., ENV_VAR ] }
+          read_only: true
+      requirements:
+          - xos:
+             node: xos
+             relationship: tosca.relationships.UsedByXOS
+
+#    /opt/xos/synchronizers/onos/onos_key.pub:
+#      type: tosca.nodes.XOSVolume
+#      properties:
+#          host_path: { path_join: [ SELF, CONFIG_DIR, id_rsa.pub, ENV_VAR ] }
+#          read_only: true
+#      requirements:
+#          - xos:
+#             node: xos
+#             relationship: tosca.relationships.UsedByXOS
+
+#    /opt/xos/synchronizers/vcpe/vcpe_public_key:
+#      type: tosca.nodes.XOSVolume
+#      properties:
+#          host_path: { path_join: [ SELF, CONFIG_DIR, id_rsa.pub, ENV_VAR ] }
+#          read_only: true
+#      requirements:
+#          - xos:
+#             node: xos
+#             relationship: tosca.relationships.UsedByXOS
+
+    /opt/xos/synchronizers/monitoring_channel/monitoring_channel_public_key:
+      type: tosca.nodes.XOSVolume
+      properties:
+          host_path: { path_join: [ SELF, CONFIG_DIR, id_rsa.pub, ENV_VAR ] }                                                      
+          read_only: true
+      requirements:
+          - xos:
+             node: xos
+             relationship: tosca.relationships.UsedByXOS
diff --git a/cord-pod/xos_cord_config b/cord-pod/xos_cord_config
new file mode 100644
index 0000000..a5448f7
--- /dev/null
+++ b/cord-pod/xos_cord_config
@@ -0,0 +1,6 @@
+[gui]
+branding_name=CORD
+#branding_css=/static/cord.css
+branding_icon=/static/cord-logo.png
+branding_favicon=/static/cord-favicon.png
+branding_bg=/static/cord-bg.jpg
diff --git a/devel/Makefile b/devel/Makefile
new file mode 100644
index 0000000..524e4cd
--- /dev/null
+++ b/devel/Makefile
@@ -0,0 +1,54 @@
+MYIP:=$(shell hostname -i)
+
+cloudlab: common_cloudlab local_containers xos
+
+devstack: upgrade_pkgs common_devstack local_containers xos
+
+xos:
+	sudo MYIP=$(MYIP) docker-compose up -d
+	bash ../common/wait_for_xos.sh
+	sudo docker-compose run xos python /opt/xos/tosca/run.py none /opt/xos/configurations/common/fixtures.yaml
+	sudo docker-compose run xos python /opt/xos/tosca/run.py none /opt/xos/configurations/common/mydeployment.yaml
+	sudo MYIP=$(MYIP) docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/cloudlab-openstack.yaml
+	sudo MYIP=$(MYIP) docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /root/setup/nodes.yaml
+
+common_cloudlab:
+	make -C ../common -f Makefile.cloudlab
+
+common_devstack:
+	make -C ../common -f Makefile.devstack
+
+base:
+	make -C ../../../containers/xos base
+
+local_containers:
+	echo "" > ../../../containers/xos/local_certs.crt
+	for CRT in $$(ls /usr/local/share/ca-certificates/*) ; do \
+		echo Adding Certificate: $$CRT ;\
+		cat $$CRT >> ../../../containers/xos/local_certs.crt ;\
+		echo "" >> ../../../containers/xos/local_certs.crt ;\
+	done
+	make -C ../../../containers/xos devel
+	make -C ../../../containers/synchronizer
+
+stop:
+	sudo MYIP=$(MYIP) docker-compose stop
+
+showlogs:
+	sudo MYIP=$(MYIP) docker-compose logs
+
+rm: stop
+	sudo MYIP=$(MYIP) docker-compose rm
+
+ps:
+	sudo MYIP=$(MYIP) docker-compose ps
+
+enter-xos:
+	sudo docker exec -it devel_xos_1 bash
+
+enter-synchronizer:
+	sudo docker exec -it devel_xos_synchronizer_openstack_1 bash
+
+upgrade_pkgs:
+	sudo pip install httpie --upgrade
+
diff --git a/devel/README.md b/devel/README.md
new file mode 100644
index 0000000..5dbad10
--- /dev/null
+++ b/devel/README.md
@@ -0,0 +1,73 @@
+# XOS development environment
+
+This configuration can be used to do basic end-to-end development of XOS.  It launches
+XOS in three Docker containers (development GUI, Synchronizer, database) and configures XOS
+to talk to an OpenStack backend.  *docker-compose* is used to manage the containers.
+
+**NOTE: If your goal is to create a development environment for [CORD](http://opencord.org/), 
+this configuration is not what you want.  Look at the [cord-pod](../cord-pod) configuration instead!**
+
+## How to run it
+
+The configuration can be either run on [CloudLab](http://cloudlab.us) (controlling
+an OpenStack backend set up by a CloudLab profile) or used with a basic
+[DevStack](http://docs.openstack.org/developer/devstack/) configuration.
+
+### CloudLab
+
+To get started on CloudLab:
+* Create an experiment using the *OpenStack* profile.  Choose *Kilo* and
+disable security groups.
+* Wait until you get an email from CloudLab with title "OpenStack Instance Finished Setting Up".
+* Login to the *ctl* node of your experiment and run:
+```
+ctl:~$ git clone https://github.com/open-cloud/xos.git
+ctl:~$ cd xos/xos/configurations/devel/
+ctl:~/xos/xos/configurations/devel$ make cloudlab
+```
+
+### DevStack
+
+On a server with a fresh Ubuntu 14.04 install, 
+[this script](https://raw.githubusercontent.com/open-cloud/xos/master/xos/configurations/common/devstack/setup-devstack.sh)
+can be used to bootstrap a single-node DevStack environment that can be used
+for basic XOS development.
+The script installs DevStack and checks out the XOS repository.  Run the script
+and then invoke the XOS configuration for DevStack as follows:
+```
+~$ wget https://raw.githubusercontent.com/open-cloud/xos/master/xos/configurations/common/devstack/setup-devstack.sh
+~$ bash ./setup-devstack.sh
+~$ cd ../xos/xos/configurations/devel/
+~/xos/xos/configurations/devel$ make devstack
+```
+
+This setup has been run successfully in a VirtualBox VM with 2 CPUs and 4096 GB RAM.
+However it is recommended to use a dedicated server with more resources.
+
+
+## What you get
+
+XOS will be set up with a single Deployment and Site.  It should be in a state where
+you can create slices and associate instances with them.
+
+Note that there are some issues with the networking setup in this configuration:
+VMs do not have a working DNS configuration in `/etc/resolv.conf`.  If you fix this
+manually then everything should work.
+
+## Docker Helpers
+
+Stop the containers: `make stop`
+
+Restart the containers: `make stop; make [cloudlab|devstack]`
+
+Delete the containers and relaunch them: `make rm; make [cloudlab|devstack]`
+
+Build the containers from scratch using the local XOS source tree: `make containers`
+
+View logs: `make showlogs`
+
+See what containers are running: `make ps`
+
+Open a shell on the XOS container: `make enter-xos`
+
+Open a shell on the Synchronizer container: `make enter-synchronizer`
diff --git a/devel/docker-compose.yml b/devel/docker-compose.yml
new file mode 100644
index 0000000..9ef6fc7
--- /dev/null
+++ b/devel/docker-compose.yml
@@ -0,0 +1,36 @@
+xos_db:
+    image: xosproject/xos-postgres
+    expose:
+        - "5432"
+
+xos_synchronizer_openstack:
+    image: xosproject/xos-synchronizer-openstack
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/openstack/xos-synchronizer.py"
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: openstack
+    links:
+        - xos_db
+    extra_hosts:
+        - ctl:${MYIP}
+    volumes:
+        - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config:ro
+        - ./images:/opt/xos/images:ro
+
+# FUTURE
+#xos_swarm_synchronizer:
+#    image: xosproject/xos-swarm-synchronizer
+#    labels:
+#        org.xosproject.kind: synchronizer
+#        org.xosproject.target: swarm
+
+xos:
+    image: xosproject/xos
+    command: python /opt/xos/manage.py runserver 0.0.0.0:8000 --insecure --makemigrations
+    ports:
+        - "9999:8000"
+    links:
+        - xos_db
+    volumes:
+      - ../setup:/root/setup:ro
+      - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config:ro
diff --git a/frontend/Makefile b/frontend/Makefile
new file mode 100644
index 0000000..a6cd9b8
--- /dev/null
+++ b/frontend/Makefile
@@ -0,0 +1,113 @@
+include ../common/Makedefs
+
+MYIP:=$(shell hostname -i)
+CONFIG_DIR:=$(shell pwd)
+DOCKER_COMPOSE_YML=./onboarding-docker-compose/docker-compose.yml
+BOOTSTRAP_YML=./docker-compose-bootstrap.yml
+DOCKER_PROJECT=frontend
+BOOTSTRAP_PROJECT=frontendbs
+XOS_BOOTSTRAP_PORT=9998
+XOS_UI_PORT=9999
+ADMIN_USERNAME=padmin@vicci.org
+ADMIN_PASSWORD=letmein
+RUN_TOSCA_BOOTSTRAP=python ../common/run_tosca.py $(XOS_BOOTSTRAP_PORT) $(ADMIN_USERNAME) $(ADMIN_PASSWORD)
+RUN_TOSCA=python ../common/run_tosca.py $(XOS_UI_PORT) $(ADMIN_USERNAME) $(ADMIN_PASSWORD)
+
+frontend: prereqs dirs download_services bootstrap onboarding frontendconfig
+
+prereqs:
+	sudo make -f ../common/Makefile.prereqs
+
+dirs:
+	# if this directory doesn't exist, then docker-compose will create it with root permission
+	mkdir -p key_import
+	mkdir -p onboarding-docker-compose
+
+download_services:
+	make -f ../common/Makefile.services
+
+update_services:
+	make -f ../common/Makefile.services update
+
+bootstrap:
+	echo "[BOOTSTRAP]"
+	sudo rm -f onboarding-docker-compose/docker-compose.yml
+	sudo CONFIG_DIR=$(CONFIG_DIR) docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) up -d
+	bash ../common/wait_for_xos_port.sh $(XOS_BOOTSTRAP_PORT)
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) run xos_bootstrap_ui python /opt/xos/tosca/run.py none - < ../common/fixtures.yaml
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) run xos_bootstrap_ui python /opt/xos/tosca/run.py none - < ../common/mydeployment.yaml
+	$(RUN_TOSCA_BOOTSTRAP) xos.yaml
+
+onboarding:
+	echo "[ONBOARDING]"
+	# on-board any services here
+	bash ../common/wait_for_onboarding_ready.sh $(XOS_BOOTSTRAP_PORT) xos
+	bash ../common/wait_for_xos_port.sh $(XOS_UI_PORT)
+
+frontendconfig:
+	echo "[FRONTENDCONFIG]"
+	$(RUN_TOSCA) sample.yaml
+
+containers:
+	make -f ../common/Makefile.containers xos_devel synchronizer onboarding_synchronizer
+
+stop:
+	test ! -s $(DOCKER_COMPOSE_YML) || sudo docker-compose -p $(DOCKER_PROJECT) -f $(DOCKER_COMPOSE_YML) stop
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) stop
+
+showlogs:
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) logs
+
+rm: stop
+	test ! -s $(DOCKER_COMPOSE_YML) || sudo docker-compose -p $(DOCKER_PROJECT) -f $(DOCKER_COMPOSE_YML) rm
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) rm
+
+ps:
+	sudo docker-compose ps
+
+enter-xos:
+	sudo docker exec -ti frontend_xos_ui_1 bash
+
+django-restart:
+	sudo docker exec frontend_xos_ui_1 touch /opt/xos/xos/settings.py
+
+clean-config-folder:
+	sudo docker exec frontend_xos_ui_1 rm -f /opt/xos/xos_configuration/xos_mcord_config
+	sudo docker exec frontend_xos_ui_1 rm -f /opt/xos/xos_configuration/xos_cord_config
+
+mock-cord-pod: onboard-cord-pod
+	$(RUN_TOSCA) ../cord-pod/mgmt-net.yaml
+	$(RUN_TOSCA) ../cord-pod/cord-vtn-vsg.yaml
+	$(RUN_TOSCA) ../cord-pod/cord-volt-devices.yaml
+	sudo docker exec frontend_xos_ui_1 cp /opt/xos/configurations/cord-pod/xos_cord_config /opt/xos/xos_configuration/
+	sudo docker exec frontend_xos_ui_1 touch /opt/xos/xos/settings.py
+
+onboard-cord-pod:
+	sudo bash -c "echo somekey > key_import/vsg_rsa"
+	sudo bash -c "echo somekey > key_import/vsg_rsa.pub"
+	sudo bash -c "echo somekey > key_import/volt_rsa"
+	sudo bash -c "echo somekey > key_import/volt_rsa.pub"
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/vrouter/xos/vrouter-onboard.yaml
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/olt/xos/volt-onboard.yaml
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/vsg/xos/vsg-onboard.yaml
+	bash ../common/wait_for_onboarding_ready.sh 9998 services/vrouter
+	bash ../common/wait_for_onboarding_ready.sh 9998 services/volt
+	bash ../common/wait_for_onboarding_ready.sh 9998 services/vsg
+	bash ../common/wait_for_onboarding_ready.sh $(XOS_BOOTSTRAP_PORT) xos
+	bash ../common/wait_for_xos_port.sh $(XOS_UI_PORT)
+
+mock-mcord:
+	# check this
+	$(RUN_TOSCA) ../cord-pod/mgmt-net.yaml
+	$(RUN_TOSCA) mocks/mcord.yaml
+	sudo docker exec frontend_xos_ui_1 cp /opt/xos/configurations/mcord/xos_mcord_config /opt/xos/xos_configuration/
+	sudo docker exec frontend_xos_ui_1 touch /opt/xos/xos/settings.py
+
+exampleservice:
+	mkdir -p key_import
+	# fake keys are fine
+	sudo bash -c "echo somekey > key_import/exampleservice_rsa"
+	sudo bash -c "echo somekey > key_import/exampleservice_rsa.pub"
+	$(RUN_TOSCA_BOOTSTRAP) $(SERVICE_DIR)/exampleservice/xos/exampleservice-onboard.yaml
+	bash ../common/wait_for_onboarding_ready.sh 9998 services/exampleservice
+	bash ../common/wait_for_onboarding_ready.sh 9998 xos
diff --git a/frontend/README.md b/frontend/README.md
new file mode 100644
index 0000000..7e2ef02
--- /dev/null
+++ b/frontend/README.md
@@ -0,0 +1,63 @@
+# XOS UI Development
+
+This configuration launches the XOS GUI and database in separate containers
+using docker-compose.  The Synchronizer is not started and there is no openstack backend connected for XOS.  This configuration is intended for developing the XOS GUI.
+
+## Getting Started
+
+- Navigate to `/xos/configurations/frontend` folder
+- Run `make` command
+
+You'll be able to visit XOS at `0.0.0.0:9000` and the `xos/core/xoslib` folder is shared with the container. This means that any update to that folder is automatically reported in the container.
+
+If you need to work on the Django application itself, comment out the "sleep" command
+for the `xos` container in docker-compose.yml and run `make`.  Once the containers are
+up then type `make enter` to enter the running container.
+
+> _NOTE:
+> Please be careful and do not commit migrations, private keys or other autogenerated files._
+
+
+### Using Vagrant
+
+Vagrant environment is provided with a ubuntu_14.04 box (ubuntu/trusty64). It will run xos on the following address `192.168.46.100:9999'. 
+It also provide a shared folder between host machine (root folder of xos repository) and ubuntu client(/opt/xos). 
+
+Start the Vagrant box : vagrant up --provision
+SSH details : ssh vagrant@127.0.0.1 -p 2222
+password: vagrant
+Check status: vagrant global-status
+
+### Docker Helpers
+
+Stop the containers: `make stop`
+
+Restart the containers: `make stop; make`
+
+Delete the containers and relaunch them: `make rm; make`
+
+Build the containers from scratch using the local XOS source tree: `make containers`
+
+View logs: `make showlogs`
+
+See what containers are running: `make ps`
+
+Open a shell on the XOS container: `make enter-xos`
+
+## Docs
+
+You can find a Swagger documentation for endpoint at: `http://0.0.0.0:9000/docs/`
+
+## Populate the Data Model with custom data
+
+Sometimes while developing the GUI is usefull to have control over the DataModel. Sample `tosca` recipes for different configuration are defined in the `mocks` folder, and corresponding `make` commands are provided.
+
+- Bring up the **CORD** data model: `make mock-cord`
+- - Bring up the **M-CORD** data model: `make mock-mcord`
+
+## JS Styleguide
+
+This project is following [Google JavaScript Style Guide](https://google.github.io/styleguide/javascriptguide.xml). To contribute please install [Eslint](http://eslint.org/) in your editor and run `npm run eslint` before commit.
+
+> _NOTE_:
+> Many of the already present file were not Style compliant. Linting for them has been disabled as it was to time consuming fix all of them. If **you are going to work** on that files, please **start fixing style issues**, and then **remove the `/* eslint-disable */`** comment
diff --git a/frontend/Vagrantfile b/frontend/Vagrantfile
new file mode 100644
index 0000000..83d6d84
--- /dev/null
+++ b/frontend/Vagrantfile
@@ -0,0 +1,8 @@
+Vagrant.configure("2") do |config|
+  config.vm.box = "ubuntu/trusty64"
+
+  config.vm.network "private_network", ip: "192.168.46.100"
+	config.vm.synced_folder "../../../", "/opt/xos"
+  config.vm.provision "shell",
+	inline: "cd /opt/xos/xos/configurations/frontend && make && echo Vagrant running"
+ end
diff --git a/frontend/docker-compose-bootstrap.yml b/frontend/docker-compose-bootstrap.yml
new file mode 100644
index 0000000..49a22e8
--- /dev/null
+++ b/frontend/docker-compose-bootstrap.yml
@@ -0,0 +1,37 @@
+xos_db:
+    image: xosproject/xos-postgres
+    expose:
+        - "5432"
+
+xos_bootstrap_ui:
+    image: xosproject/xos
+    command: python /opt/xos/manage.py runserver 0.0.0.0:9998 --insecure --makemigrations
+    environment:
+        - CONFIG_DIR
+    ports:
+        - "9998:9998"
+    links:
+        - xos_db
+    volumes:
+      - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config
+      - ../vtn/files/xos_vtn_config:/opt/xos/xos_configuration/xos_vtn_config:ro
+      - ../../xos_services:/opt/xos_services
+
+xos_synchronizer_onboarding:
+    image: xosproject/xos-synchronizer-onboarding
+    command: bash -c "cd /opt/xos/synchronizers/onboarding; ./run.sh"
+#    command: sleep 86400
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: onboarding
+    links:
+        - xos_db
+    volumes:
+        - /var/run/docker.sock:/var/run/docker.sock
+        - ./key_import:/opt/xos/key_import:ro
+        - ./onboarding-docker-compose:/opt/xos/synchronizers/onboarding/docker-compose
+        - ../../xos_services:/opt/xos_services
+    log_driver: "json-file"
+    log_opt:
+            max-size: "100k"
+            max-file: "5"
diff --git a/frontend/mocks/MCORDServiceN.yaml b/frontend/mocks/MCORDServiceN.yaml
new file mode 100644
index 0000000..bef7bb3
--- /dev/null
+++ b/frontend/mocks/MCORDServiceN.yaml
@@ -0,0 +1,89 @@
+tosca_definitions_version: tosca_simple_yaml_1_0

+

+description: Setup MCORD-related services.

+

+imports:

+   - custom_types/xos.yaml

+

+node_types:

+    tosca.nodes.MCORDComponent:

+        derived_from: tosca.nodes.Root

+        description: >

+            CORD: A Service Component of MCORD Service.

+        properties:

+            kind:

+                type: string

+                default: generic

+                description: Kind of component

+

+topology_template:

+  node_templates:

+    service_mcord:

+      type: tosca.nodes.Service

+      requirements:

+      properties:

+          kind: mcordservice

+

+

+    Private:

+      type: tosca.nodes.NetworkTemplate

+

+    mcord_network:

+      type: tosca.nodes.network.Network.XOS

+      properties:

+          ip_version: 4

+          labels: mcord_service_internal_net

+          cidr: 172.16.16.0/24

+          start_ip: 172.16.16.1

+          end_ip: 172.16.16.5

+          gateway_ip: 172.16.16.1

+

+      requirements:

+          - network_template:

+              node: Private

+              relationship: tosca.relationships.UsesNetworkTemplate

+          - owner:

+              node: mysite_mcord_slice1

+              relationship: tosca.relationships.MemberOfSlice

+          - connection:

+              node: mysite_mcord_slice1

+              relationship: tosca.relationships.ConnectsToSlice

+

+    mysite:

+      type: tosca.nodes.Site

+

+

+    ubuntu-14.04-server-cloudimg-amd64-disk1:

+      type: tosca.nodes.Image

+

+    trusty-server-multi-nic:

+      type: tosca.nodes.Image

+

+    mysite_mcord_slice1:

+      description: MCORD Service Slice 1

+      type: tosca.nodes.Slice

+      requirements:

+          - mcord_service:

+              node: service_mcord

+              relationship: tosca.relationships.MemberOfService

+          - site:

+              node: mysite

+              relationship: tosca.relationships.MemberOfSite

+          - default_image:

+                node: ubuntu-14.04-server-cloudimg-amd64-disk1 

+#                node: mcord-server-image-s1

+                relationship: tosca.relationships.DefaultImage

+      properties:

+          default_flavor: m1.medium

+          default_node: compute9 

+

+    my_service_mcord_component1:

+      description: MCORD Service default Component

+      type: tosca.nodes.MCORDComponent

+      requirements:

+          - provider_service:

+              node: service_mcord

+              relationship: tosca.relationships.MemberOfService

+          - mcord_slice:

+              node: mysite_mcord_slice1

+              relationship: tosca.relationships.MemberOfSlice

diff --git a/frontend/mocks/MCORDServiceN.yml b/frontend/mocks/MCORDServiceN.yml
new file mode 100644
index 0000000..8f764d0
--- /dev/null
+++ b/frontend/mocks/MCORDServiceN.yml
@@ -0,0 +1,106 @@
+tosca_definitions_version: tosca_simple_yaml_1_0

+

+description: Setup MCORD-related services.

+

+imports:

+   - custom_types/xos.yaml

+

+node_types:

+    tosca.nodes.MCORDComponent:

+        derived_from: tosca.nodes.Root

+        description: >

+            CORD: A Service Component of MCORD Service.

+        properties:

+            kind:

+                type: string

+                default: generic

+                description: Kind of component

+

+topology_template:

+  node_templates:

+    service_mcord:

+      type: tosca.nodes.Service

+      requirements:

+      properties:

+          kind: mcordservice

+

+

+    Private:

+      type: tosca.nodes.NetworkTemplate

+

+    mcord_network:

+      type: tosca.nodes.network.Network.XOS

+      properties:

+          ip_version: 4

+          labels: mcord_service_internal_net

+          cidr: 172.16.16.0/24

+          start_ip: 172.16.16.1

+          end_ip: 172.16.16.5

+          gateway_ip: 172.16.16.1

+

+      requirements:

+          - network_template:

+              node: Private

+              relationship: tosca.relationships.UsesNetworkTemplate

+          - owner:

+              node: mysite_mcord_slice1

+              relationship: tosca.relationships.MemberOfSlice

+          - connection:

+              node: mysite_mcord_slice1

+              relationship: tosca.relationships.ConnectsToSlice

+

+    mysite:

+      type: tosca.nodes.Site

+

+

+    mcord-server-image-s1:

+      type: tosca.nodes.Image

+

+    trusty-server-multi-nic:

+      type: tosca.nodes.Image

+

+    mysite_mcord_slice1:

+      description: MCORD Service Slice 1

+      type: tosca.nodes.Slice

+      requirements:

+          - mcord_service:

+              node: service_mcord

+              relationship: tosca.relationships.MemberOfService

+          - site:

+              node: mysite

+              relationship: tosca.relationships.MemberOfSite

+          - default_image:

+                node: trusty-server-multi-nic

+#                node: mcord-server-image-s1

+                relationship: tosca.relationships.DefaultImage

+      properties:

+          default_flavor: m1.medium

+          default_node: ip-10-0-10-125

+

+    my_service_mcord_component1:

+      description: MCORD Service default Component

+      type: tosca.nodes.MCORDComponent

+      # properties:

+      #     view_url: /mcord/?service=vBBU

+      #     kind: RAN

+      requirements:

+          - provider_service:

+              node: service_mcord

+              relationship: tosca.relationships.MemberOfService

+          - mcord_slice:

+              node: mysite_mcord_slice1

+              relationship: tosca.relationships.MemberOfSlice

+

+    # Nodes

+    compute9:

+      type: tosca.nodes.Node

+      properties:

+          view_url: /mcord/?service=vBBU

+          kind: RAN

+      requirements:

+        - site:

+            node: mysite

+            relationship: tosca.relationships.MemberOfSite

+        - deployment:

+            node: MyDeployment

+            relationship: tosca.relationships.MemberOfDeployment

diff --git a/frontend/mocks/cord.yaml b/frontend/mocks/cord.yaml
new file mode 100644
index 0000000..9d5aeaa
--- /dev/null
+++ b/frontend/mocks/cord.yaml
@@ -0,0 +1,561 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Setup CORD-related services -- vOLT, vCPE, vBNG.
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+
+    addresses_vsg:
+      type: tosca.nodes.AddressPool
+      properties:
+          addresses: 10.168.0.0/24
+          gateway_ip: 10.168.0.1
+          gateway_mac: 02:42:0a:a8:00:01
+
+    addresses_exampleservice-public:
+      type: tosca.nodes.AddressPool
+      properties:
+          addresses: 10.168.1.0/24
+          gateway_ip: 10.168.1.1
+          gateway_mac: 02:42:0a:a8:00:01
+    
+    # CORD Services
+    service_volt:
+      type: tosca.nodes.Service
+      requirements:
+          - vcpe_tenant:
+              node: service_vsg
+              relationship: tosca.relationships.TenantOfService
+          - lan_network:
+              node: lan_network
+              relationship: tosca.relationships.UsesNetwork
+          - wan_network:
+              node: wan_network
+              relationship: tosca.relationships.UsesNetwork
+      properties:
+          view_url: /admin/cord/voltservice/$id$/
+          kind: vOLT
+
+    service_vrouter:
+      type: tosca.nodes.VRouterService
+      properties:
+          view_url: /admin/vrouter/vrouterservice/$id$/
+      requirements:
+          - addresses_vsg:
+              node: addresses_vsg
+              relationship: tosca.relationships.ProvidesAddresses
+          - addresses_service1:
+              node: addresses_exampleservice-public
+              relationship: tosca.relationships.ProvidesAddresses
+
+    service_vsg:
+      type: tosca.nodes.VSGService
+      requirements:
+          - vrouter_tenant:
+              node: service_vrouter
+              relationship: tosca.relationships.TenantOfService
+      properties:
+          view_url: /admin/cord/vsgservice/$id$/
+          backend_network_label: hpc_client
+          #public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+          #private_key_fn: /opt/xos/synchronizers/vcpe/vcpe_private_key
+      #artifacts:
+          #pubkey: /opt/xos/synchronizers/vcpe/vcpe_public_key
+
+    service_vbng:
+      type: tosca.nodes.VBNGService
+      properties:
+          view_url: /admin/cord/vbngservice/$id$/
+# if unspecified, vbng observer will look for an ONOSApp Tenant and
+# generate a URL from its IP address
+#          vbng_url: http://10.11.10.24:8181/onos/virtualbng/
+
+    service_ONOS_vBNG:
+      type: tosca.nodes.ONOSService
+      requirements:
+      properties:
+          kind: onos
+          view_url: /admin/onos/onosservice/$id$/
+          #public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+      #artifacts:
+          #pubkey: /opt/xos/synchronizers/onos/onos_key.pub
+
+#
+# To actually bring up the vBNG app
+# - Set up the dataplane using the ansible script
+# - Log into the vBNG ONOS and run 'devices' to get switch dpID
+# - Change the dpID values in vBNG ONOS app in XOS GUI
+# - (Synchronizer should copy the files to ONOS container immediately)
+# - Log into service_ONOS_vBNG VM and restart ONOS Docker container
+#   (Should roll this step into a Synchronizer)
+#f
+    vBNG_ONOS_app:
+      type: tosca.nodes.ONOSvBNGApp
+      requirements:
+          - onos_tenant:
+              node: service_ONOS_vBNG
+              relationship: tosca.relationships.TenantOfService
+          - vbng_service:
+              node: service_vbng
+              relationship: tosca.relationships.UsedByService
+      properties:
+          dependencies: org.onosproject.proxyarp, org.onosproject.virtualbng, org.onosproject.openflow, org.onosproject.fwd
+          config_network-cfg.json: >
+            {
+              "ports" : {
+                "of:0000000000000001/1" : {
+                  "interfaces" : [
+                    {
+                      "ips"  : [ "10.0.1.253/24" ],
+                      "mac"  : "00:00:00:00:00:99"
+                    }
+                  ]
+                },
+                "of:0000000000000001/2" : {
+                  "interfaces" : [
+                    {
+                      "ips"  : [ "10.254.0.2/24" ],
+                      "mac"  : "00:00:00:00:00:98"
+                    }
+                  ]
+                }
+              }
+            }
+          #config_virtualbng.json: { get_artifact: [ SELF, virtualbng_json, LOCAL_FILE] }
+      #artifacts:
+          #virtualbng_json: /root/setup/virtualbng.json
+
+    service_ONOS_vOLT:
+      type: tosca.nodes.ONOSService
+      requirements:
+      properties:
+          kind: onos
+          view_url: /admin/onos/onosservice/$id$/
+          #public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+          rest_onos/v1/network/configuration/: >
+            {
+              "devices" : {
+                "of:0000000000000001" : {
+                  "accessDevice" : {
+                    "uplink" : "2",
+                    "vlan"   : "222",
+                    "defaultVlan" : "1"
+                  },
+                  "basic" : {
+                    "driver" : "pmc-olt"
+                  }
+                }
+              }
+            }
+      #artifacts:
+          #pubkey: /opt/xos/synchronizers/onos/onos_key.pub
+
+
+    vOLT_ONOS_app:
+      type: tosca.nodes.ONOSvOLTApp
+      requirements:
+          - onos_tenant:
+              node: service_ONOS_vOLT
+              relationship: tosca.relationships.TenantOfService
+          - volt_service:
+              node: service_volt
+              relationship: tosca.relationships.UsedByService
+      properties:
+          install_dependencies: onos-ext-notifier-1.0-SNAPSHOT.oar, onos-ext-volt-event-publisher-1.0-SNAPSHOT.oar
+          dependencies: org.onosproject.openflow-base, org.onosproject.olt, org.ciena.onos.ext_notifier, org.ciena.onos.volt_event_publisher
+          component_config: >
+             {
+                "org.ciena.onos.ext_notifier.KafkaNotificationBridge":{
+                   "rabbit.user": "<rabbit_user>",
+                   "rabbit.password": "<rabbit_password>",
+                   "rabbit.host": "<rabbit_host>",
+                   "publish.rabbit": "true",
+                   "volt.events.rabbit.topic": "notifications.info",
+                   "volt.events.rabbit.exchange": "voltlistener",
+                   "volt.events.opaque.info": "{project_id: <keystone_tenant_id>, user_id: <keystone_user_id>}",
+                   "publish.volt.events": "true"
+                }
+             }
+#          config_network-cfg.json: >
+#            {
+#              "devices" : {
+#                "of:0000000000000001" : {
+#                  "accessDevice" : {
+#                    "uplink" : "2",
+#                    "vlan"   : "222",
+#                    "defaultVlan" : "1"
+#                  },
+#                  "basic" : {
+#                    "driver" : "default"
+#                  }
+#                }
+#              }
+#            }
+
+    # Network templates
+    Private:
+      type: tosca.nodes.NetworkTemplate
+
+    Public network hack:
+      type: tosca.nodes.NetworkTemplate
+      properties:
+          visibility: private
+          translation: NAT
+          shared_network_name: tun0-net
+
+
+    # Networks required by the CORD setup
+    lan_network:
+      type: tosca.nodes.network.Network
+      properties:
+          ip_version: 4
+      requirements:
+          - network_template:
+              node: Private
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_vcpe
+              relationship: tosca.relationships.MemberOfSlice
+          - connection:
+              node: mysite_vcpe
+              relationship: tosca.relationships.ConnectsToSlice
+          - connection:
+              node: mysite_volt
+              relationship: tosca.relationships.ConnectsToSlice
+
+    wan_network:
+      type: tosca.nodes.network.Network
+      properties:
+          ip_version: 4
+      requirements:
+          - network_template:
+              node: Private
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_vcpe
+              relationship: tosca.relationships.MemberOfSlice
+          - connection:
+              node: mysite_vcpe
+              relationship: tosca.relationships.ConnectsToSlice
+          - connection:
+              node: mysite_vbng
+              relationship: tosca.relationships.ConnectsToSlice
+
+    Private-Direct:
+      type: tosca.nodes.NetworkTemplate
+      properties:
+          access: direct
+
+    Private-Indirect:
+      type: tosca.nodes.NetworkTemplate
+      properties:
+          access: indirect
+
+    subscriber_network:
+      type: tosca.nodes.network.Network
+      properties:
+          ip_version: 4
+      requirements:
+          - network_template:
+              node: Private
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_volt
+              relationship: tosca.relationships.MemberOfSlice
+          - connection:
+              node: mysite_volt
+              relationship: tosca.relationships.ConnectsToSlice
+          - connection:
+              node: mysite_clients
+              relationship: tosca.relationships.ConnectsToSlice
+
+    public_network:
+      type: tosca.nodes.network.Network
+      properties:
+      requirements:
+          - network_template:
+              node: Public network hack
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_vbng
+              relationship: tosca.relationships.MemberOfSlice
+          - connection:
+              node: mysite_vbng
+              relationship: tosca.relationships.ConnectsToSlice
+
+
+    mysite:
+      type: tosca.nodes.Site
+
+
+    # CORD Slices
+    mysite_vcpe:
+      description: vCPE Controller Slice
+      type: tosca.nodes.Slice
+      requirements:
+          - vcpe_service:
+              node: service_vsg
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+          - vcpe_docker_image:
+              node: docker-vcpe
+              relationship: tosca.relationships.UsesImage
+#      properties:
+#          default_isolation: container
+
+    mysite_onos_vbng:
+      description: ONOS Controller Slice for vBNG
+      type: tosca.nodes.Slice
+      requirements:
+          - ONOS:
+              node: service_ONOS_vBNG
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+
+    mysite_onos_volt:
+      description: ONOS Controller Slice for vOLT
+      type: tosca.nodes.Slice
+      requirements:
+          - ONOS:
+              node: service_ONOS_vOLT
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+
+    mysite_vbng:
+      description: slice running OVS controlled by vBNG
+      type: tosca.nodes.Slice
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+
+    mysite_volt:
+      description: OVS controlled by vOLT
+      type: tosca.nodes.Slice
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+
+    mysite_clients:
+      description: slice for clients at the subscriber
+      type: tosca.nodes.Slice
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+
+
+    # Virtual machines
+    onos_app_1:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: Ubuntu
+            version: 14.10
+      requirements:
+          - slice:
+                node: mysite_onos_vbng
+                relationship: tosca.relationships.MemberOfSlice
+
+    onos_app_2:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: Ubuntu
+            version: 14.10
+      requirements:
+          - slice:
+                node: mysite_onos_volt
+                relationship: tosca.relationships.MemberOfSlice
+
+    # VM for running the OVS controlled by vBNG
+    ovs_vbng:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: ubuntu
+            version: 14.04
+      requirements:
+          - slice:
+                node: mysite_vbng
+                relationship: tosca.relationships.MemberOfSlice
+
+    # VM for running the OVS controlled by vOLT
+    ovs_volt:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: ubuntu
+            version: 14.04
+      requirements:
+          - slice:
+                node: mysite_volt
+                relationship: tosca.relationships.MemberOfSlice
+
+    # A subscriber client VM
+    client1:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: ubuntu
+            version: 14.04
+      requirements:
+          - slice:
+                node: mysite_clients
+                relationship: tosca.relationships.MemberOfSlice
+
+    # docker image for vcpe containers
+    docker-vcpe:
+      # TODO: need to attach this to mydeployment
+      type: tosca.nodes.Image
+      properties:
+        kind: container
+        container_format: na
+        disk_format: na
+        path: andybavier/docker-vcpe
+        tag: develop
+
+    # Let's add a user who can be administrator of the household
+    johndoe@myhouse.com:
+      type: tosca.nodes.User
+      properties:
+          password: letmein
+          firstname: john
+          lastname: doe
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+
+    # A subscriber
+    My House:
+       type: tosca.nodes.CORDSubscriber
+       properties:
+           service_specific_id: 123
+           firewall_enable: false
+           cdn_enable: false
+           url_filter_enable: false
+           url_filter_level: R
+       requirements:
+          - house_admin:
+              node: johndoe@myhouse.com
+              relationship: tosca.relationships.AdminPrivilege
+
+    Mom's PC:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 01:02:03:04:05:06
+           level: PG_13
+       requirements:
+           - household:
+               node: My House
+               relationship: tosca.relationships.SubscriberDevice
+
+    Dad's PC:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 90:E2:BA:82:F9:75
+           level: PG_13
+       requirements:
+           - household:
+               node: My House
+               relationship: tosca.relationships.SubscriberDevice
+
+    Jack's Laptop:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 68:5B:35:9D:91:D5
+           level: PG_13
+       requirements:
+           - household:
+               node: My House
+               relationship: tosca.relationships.SubscriberDevice
+
+    Jill's Laptop:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 34:36:3B:C9:B6:A6
+           level: PG_13
+       requirements:
+           - household:
+               node: My House
+               relationship: tosca.relationships.SubscriberDevice
+
+    My Volt:
+        type: tosca.nodes.VOLTTenant
+        properties:
+            service_specific_id: 123
+            s_tag: 222
+            c_tag: 432
+        requirements:
+            - provider_service:
+                node: service_volt
+                relationship: tosca.relationships.MemberOfService
+            - subscriber:
+                node: My House
+                relationship: tosca.relationships.BelongsToSubscriber
diff --git a/frontend/mocks/mcord.yaml b/frontend/mocks/mcord.yaml
new file mode 100644
index 0000000..6c10ad3
--- /dev/null
+++ b/frontend/mocks/mcord.yaml
@@ -0,0 +1,319 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Setup CORD-related services
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    # M-CORD Services
+    
+    # RAN
+    vBBU:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=vBBU
+          kind: RAN
+
+    eSON:
+      type: tosca.nodes.Service
+      properties:
+          view_url: http://www.google.com
+          kind: RAN
+
+    # EPC
+    vMME:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=vMME
+          kind: EPC
+
+    vSGW:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=vSGW
+          kind: EPC
+
+    vPGW:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=vPGW
+          kind: EPC
+
+    # EDGE
+    Cache:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=Cache
+          icon_url: /static/mCordServices/service_cache.png
+          kind: EDGE
+
+    Firewall:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=Firewall
+          icon_url: /static/mCordServices/service_firewall.png
+          kind: EDGE
+
+    Video Optimization:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=Video%20Optimization
+          icon_url: /static/mCordServices/service_video.png
+          kind: EDGE
+          
+    # Images
+    trusty-server-multi-nic:
+      type: tosca.nodes.Image
+      properties:
+         disk_format: QCOW2
+         container_format: BARE
+
+    # Deployments
+    StanfordDeployment:
+      type: tosca.nodes.Deployment
+      properties:
+          flavors: m1.large, m1.medium, m1.small
+      requirements:
+          - image:
+              node: trusty-server-multi-nic
+              relationship: tosca.relationships.SupportsImage
+
+    # Site
+    stanford:
+      type: tosca.nodes.Site
+      properties:
+          display_name: Stanford University
+          site_url: https://www.stanford.edu/
+      requirements:
+          - deployment:
+               node: StanfordDeployment
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: CloudLab
+                       relationship: tosca.relationships.UsesController
+
+
+    # Nodes
+    node1.stanford.edu:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: StanfordDeployment
+            relationship: tosca.relationships.MemberOfDeployment
+
+    # Slices
+    stanford_slice:
+      description: Slice that contains sample instances
+      type: tosca.nodes.Slice
+      requirements:
+          - site:
+              node: stanford
+              relationship: tosca.relationships.MemberOfSite
+
+    # Instances
+    BBU_service_instance1:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: ubuntu
+            version: 14.04
+      requirements:
+          - slice:
+                node: stanford_slice
+                relationship: tosca.relationships.MemberOfSlice
+
+    BBU_service_instance2:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: ubuntu
+            version: 14.04
+      requirements:
+          - slice:
+                node: stanford_slice
+                relationship: tosca.relationships.MemberOfSlice
+
+    MME_service_instance1:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: ubuntu
+            version: 14.04
+      requirements:
+          - slice:
+                node: stanford_slice
+                relationship: tosca.relationships.MemberOfSlice
+
+    SGW_service_instance1:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: ubuntu
+            version: 14.04
+      requirements:
+          - slice:
+                node: stanford_slice
+                relationship: tosca.relationships.MemberOfSlice
+
+    PGW_service_instance1:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: ubuntu
+            version: 14.04
+      requirements:
+          - slice:
+                node: stanford_slice
+                relationship: tosca.relationships.MemberOfSlice
+
+    # Let's add a user who can be administrator of the household
+    johndoe@stanford.us:
+      type: tosca.nodes.User
+      properties:
+          password: letmein
+          firstname: john
+          lastname: doe
+      requirements:
+          - site:
+              node: stanford
+              relationship: tosca.relationships.MemberOfSite
+
+    # A subscriber
+    Stanford:
+       type: tosca.nodes.CORDSubscriber
+       properties:
+           service_specific_id: 123
+           firewall_enable: false
+           cdn_enable: false
+           url_filter_enable: false
+           url_filter_level: R
+       requirements:
+          - house_admin:
+              node: johndoe@stanford.us
+              relationship: tosca.relationships.AdminPrivilege
+
+    Barbera Lapinski:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 01:02:03:04:05:06
+           level: PG_13
+       requirements:
+           - household:
+               node: Stanford
+               relationship: tosca.relationships.SubscriberDevice
+
+    Norbert Shumway:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 90:E2:BA:82:F9:75
+           level: PG_13
+       requirements:
+           - household:
+               node: Stanford
+               relationship: tosca.relationships.SubscriberDevice
+
+    Fay Muldoon:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 68:5B:35:9D:91:D5
+           level: PG_13
+       requirements:
+           - household:
+               node: Stanford
+               relationship: tosca.relationships.SubscriberDevice
+
+    Janene Earnest:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 34:36:3B:C9:B6:A6
+           level: PG_13
+       requirements:
+           - household:
+               node: Stanford
+               relationship: tosca.relationships.SubscriberDevice
+
+
+    Topology:
+      type: tosca.nodes.DashboardView
+      properties:
+          url: template:xosMcordTopology
+
+    Ceilometer:
+      type: tosca.nodes.DashboardView
+      properties:
+          url: template:xosCeilometerDashboard
+
+    padmin@vicci.org:
+      type: tosca.nodes.User
+      properties:
+          firstname: XOS
+          lastname: admin
+          is_admin: true
+      requirements:
+          - mcord_dashboard:
+              node: Topology
+              relationship: tosca.relationships.UsesDashboard
+          - ceilometer_dashboard:
+              node: Ceilometer
+              relationship: tosca.relationships.UsesDashboard
diff --git a/frontend/sample.yaml b/frontend/sample.yaml
new file mode 100644
index 0000000..0cc47ab
--- /dev/null
+++ b/frontend/sample.yaml
@@ -0,0 +1,91 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >
+    * Some sample data to populate the demo frontend
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    trusty-server-multi-nic:
+      type: tosca.nodes.Image
+      properties:
+         disk_format: QCOW2
+         container_format: BARE
+
+    MyDeployment:
+      type: tosca.nodes.Deployment
+      properties:
+          flavors: m1.large, m1.medium, m1.small
+      requirements:
+          - image:
+              node: trusty-server-multi-nic
+              relationship: tosca.relationships.SupportsImage
+
+    CloudLab:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: MyDeployment
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Juno
+          auth_url: http://sample/v2
+          admin_user: admin
+          admin_password: adminpassword
+          admin_tenant: admin
+          domain: Default
+
+    mysite:
+      type: tosca.nodes.Site
+      properties:
+          display_name: MySite
+          site_url: http://opencloud.us/
+      requirements:
+          - deployment:
+               node: MyDeployment
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: CloudLab
+                       relationship: tosca.relationships.UsesController
+
+    Public shared IPv4:
+      type: tosca.nodes.NetworkTemplate
+      properties:
+          visibility: private
+          translation: NAT
+
+    padmin@vicci.org:
+      type: tosca.nodes.User
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+      properties:
+          is_admin: true
+          is_active: true
+          firstname: XOS
+          lastname: admin
+
+    node1.opencloud.us:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: mysite
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: MyDeployment
+            relationship: tosca.relationships.MemberOfDeployment
+
+    node2.opencloud.us:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: mysite
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: MyDeployment
+            relationship: tosca.relationships.MemberOfDeployment
\ No newline at end of file
diff --git a/frontend/service_chain.yaml b/frontend/service_chain.yaml
new file mode 100644
index 0000000..557f98e
--- /dev/null
+++ b/frontend/service_chain.yaml
@@ -0,0 +1,204 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Setup two subscriber with related service chain, use for development of serviceTopology view.
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    # CORD Subscribers
+    Night's Watch:
+      type: tosca.nodes.CORDSubscriber
+      properties:
+        service_specific_id: 123
+        firewall_enable: false
+        cdn_enable: false
+        url_filter_enable: false
+        url_filter_level: R
+
+    # CORD Users for Night's Watch
+    Jhon Snow:
+      type: tosca.nodes.CORDUser
+      properties:
+        mac: 01:02:03:04:05:06
+        level: PG_13
+      requirements:
+        - household:
+            node: Night's Watch
+            relationship: tosca.relationships.SubscriberDevice
+
+    House Targaryen:
+      type: tosca.nodes.CORDSubscriber
+      properties:
+        service_specific_id: 321
+        firewall_enable: false
+        cdn_enable: false
+        url_filter_enable: false
+        url_filter_level: R
+
+    # CORD Users for House Targaryen
+    Daenerys:
+      type: tosca.nodes.CORDUser
+      properties:
+        mac: 06:05:04:03:02:01
+        level: PG_13
+      requirements:
+        - household:
+            node: House Targaryen
+            relationship: tosca.relationships.SubscriberDevice
+
+    # vOLT Tenants
+    Night's Watch vOLT:
+      type: tosca.nodes.VOLTTenant
+      properties:
+        service_specific_id: 123
+        s_tag: 123
+        c_tag: 456
+      requirements:
+        - provider_service:
+            node: service_volt
+            relationship: tosca.relationships.MemberOfService
+        - subscriber:
+            node: Night's Watch
+            relationship: tosca.relationships.BelongsToSubscriber
+
+    Targaryen vOLT:
+      type: tosca.nodes.VOLTTenant
+      properties:
+        service_specific_id: 321
+        s_tag: 321
+        c_tag: 654
+      requirements:
+        - provider_service:
+            node: service_volt
+            relationship: tosca.relationships.MemberOfService
+        - subscriber:
+            node: House Targaryen
+            relationship: tosca.relationships.BelongsToSubscriber
+
+    # CORD Services
+    service_volt:
+      type: tosca.nodes.Service
+      requirements:
+        - vcpe_tenant:
+            node: service_vcpe
+            relationship: tosca.relationships.TenantOfService
+        - lan_network:
+            node: lan_network
+            relationship: tosca.relationships.UsesNetwork
+        - wan_network:
+            node: wan_network
+            relationship: tosca.relationships.UsesNetwork
+      properties:
+        view_url: /admin/cord/voltservice/$id$/
+        kind: vOLT
+
+    service_vcpe:
+      type: tosca.nodes.VCPEService
+      requirements:
+        - vbng_tenant:
+            node: service_vbng
+            relationship: tosca.relationships.TenantOfService
+      properties:
+        view_url: /admin/cord/vcpeservice/$id$/
+        backend_network_label: hpc_client
+        public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+        private_key_fn: /opt/xos/observers/vcpe/vcpe_private_key
+      artifacts:
+        pubkey: /root/.ssh/id_rsa.pub #is this right?
+
+    service_vbng:
+      type: tosca.nodes.VBNGService
+      properties:
+        view_url: /admin/cord/vbngservice/$id$/
+
+    # Networks required
+    lan_network:
+      type: tosca.nodes.network.Network
+      properties:
+        ip_version: 4
+      requirements:
+        - network_template:
+            node: Private
+            relationship: tosca.relationships.UsesNetworkTemplate
+        - owner:
+            node: mysite_vcpe
+            relationship: tosca.relationships.MemberOfSlice
+        - connection:
+            node: mysite_vcpe
+            relationship: tosca.relationships.ConnectsToSlice
+        - connection:
+            node: mysite_volt
+            relationship: tosca.relationships.ConnectsToSlice
+
+    wan_network:
+      type: tosca.nodes.network.Network
+      properties:
+        ip_version: 4
+      requirements:
+        - network_template:
+            node: Private
+            relationship: tosca.relationships.UsesNetworkTemplate
+        - owner:
+            node: mysite_vcpe
+            relationship: tosca.relationships.MemberOfSlice
+        - connection:
+            node: mysite_vcpe
+            relationship: tosca.relationships.ConnectsToSlice
+        - connection:
+            node: mysite_vbng
+            relationship: tosca.relationships.ConnectsToSlice
+
+    # Network templates
+    Private:
+      type: tosca.nodes.NetworkTemplate
+
+    # Sites
+    mysite:
+      type: tosca.nodes.Site
+
+    # Slices
+    mysite_vcpe:
+      description: vCPE Controller Slice
+      type: tosca.nodes.Slice
+      requirements:
+        - vcpe_service:
+            node: service_vcpe
+            relationship: tosca.relationships.MemberOfService
+        - site:
+            node: mysite
+            relationship: tosca.relationships.MemberOfSite
+        - vcpe_docker_image:
+            node: docker-vcpe
+            relationship: tosca.relationships.UsesImage
+      properties:
+          default_isolation: container
+
+    mysite_vbng:
+      description: slice running OVS controlled by vBNG
+      type: tosca.nodes.Slice
+      requirements:
+        - site:
+            node: mysite
+            relationship: tosca.relationships.MemberOfSite
+
+    mysite_volt:
+      description: OVS controlled by vOLT
+      type: tosca.nodes.Slice
+      requirements:
+        - site:
+            node: mysite
+            relationship: tosca.relationships.MemberOfSite
+
+    # docker image for vcpe containers
+    docker-vcpe:
+      # TODO: need to attach this to mydeployment
+      type: tosca.nodes.Image
+      properties:
+        kind: container
+        container_format: na
+        disk_format: na
+        path: andybavier/docker-vcpe
+        tag: develop
+
diff --git a/frontend/xos.sql b/frontend/xos.sql
new file mode 100644
index 0000000..d728050
--- /dev/null
+++ b/frontend/xos.sql
@@ -0,0 +1,10017 @@
+--
+-- PostgreSQL database dump
+--
+
+SET statement_timeout = 0;
+SET lock_timeout = 0;
+SET client_encoding = 'SQL_ASCII';
+SET standard_conforming_strings = on;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+
+--
+-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: 
+--
+
+CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog;
+
+
+--
+-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: 
+--
+
+COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language';
+
+
+SET search_path = public, pg_catalog;
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: auth_group; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE auth_group (
+    id integer NOT NULL,
+    name character varying(80) NOT NULL
+);
+
+
+ALTER TABLE public.auth_group OWNER TO postgres;
+
+--
+-- Name: auth_group_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE auth_group_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.auth_group_id_seq OWNER TO postgres;
+
+--
+-- Name: auth_group_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE auth_group_id_seq OWNED BY auth_group.id;
+
+
+--
+-- Name: auth_group_permissions; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE auth_group_permissions (
+    id integer NOT NULL,
+    group_id integer NOT NULL,
+    permission_id integer NOT NULL
+);
+
+
+ALTER TABLE public.auth_group_permissions OWNER TO postgres;
+
+--
+-- Name: auth_group_permissions_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE auth_group_permissions_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.auth_group_permissions_id_seq OWNER TO postgres;
+
+--
+-- Name: auth_group_permissions_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE auth_group_permissions_id_seq OWNED BY auth_group_permissions.id;
+
+
+--
+-- Name: auth_permission; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE auth_permission (
+    id integer NOT NULL,
+    name character varying(50) NOT NULL,
+    content_type_id integer NOT NULL,
+    codename character varying(100) NOT NULL
+);
+
+
+ALTER TABLE public.auth_permission OWNER TO postgres;
+
+--
+-- Name: auth_permission_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE auth_permission_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.auth_permission_id_seq OWNER TO postgres;
+
+--
+-- Name: auth_permission_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE auth_permission_id_seq OWNED BY auth_permission.id;
+
+
+--
+-- Name: core_account; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_account (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    site_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_account OWNER TO postgres;
+
+--
+-- Name: core_account_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_account_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_account_id_seq OWNER TO postgres;
+
+--
+-- Name: core_account_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_account_id_seq OWNED BY core_account.id;
+
+
+--
+-- Name: core_addresspool; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_addresspool (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(32) NOT NULL,
+    addresses text,
+    inuse text
+);
+
+
+ALTER TABLE public.core_addresspool OWNER TO postgres;
+
+--
+-- Name: core_addresspool_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_addresspool_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_addresspool_id_seq OWNER TO postgres;
+
+--
+-- Name: core_addresspool_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_addresspool_id_seq OWNED BY core_addresspool.id;
+
+
+--
+-- Name: core_charge; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_charge (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    kind character varying(30) NOT NULL,
+    state character varying(30) NOT NULL,
+    date timestamp with time zone NOT NULL,
+    amount double precision NOT NULL,
+    "coreHours" double precision NOT NULL,
+    account_id integer NOT NULL,
+    invoice_id integer,
+    object_id integer NOT NULL,
+    slice_id integer
+);
+
+
+ALTER TABLE public.core_charge OWNER TO postgres;
+
+--
+-- Name: core_charge_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_charge_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_charge_id_seq OWNER TO postgres;
+
+--
+-- Name: core_charge_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_charge_id_seq OWNED BY core_charge.id;
+
+
+--
+-- Name: core_controller; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_controller (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(200) NOT NULL,
+    backend_type character varying(200) NOT NULL,
+    version character varying(200) NOT NULL,
+    auth_url character varying(200),
+    admin_user character varying(200),
+    admin_password character varying(200),
+    admin_tenant character varying(200),
+    domain character varying(200),
+    rabbit_host character varying(200),
+    rabbit_user character varying(200),
+    rabbit_password character varying(200),
+    deployment_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_controller OWNER TO postgres;
+
+--
+-- Name: core_controller_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_controller_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_controller_id_seq OWNER TO postgres;
+
+--
+-- Name: core_controller_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_controller_id_seq OWNED BY core_controller.id;
+
+
+--
+-- Name: core_controllercredential; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_controllercredential (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(128) NOT NULL,
+    key_id character varying(1024) NOT NULL,
+    enc_value text NOT NULL,
+    controller_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_controllercredential OWNER TO postgres;
+
+--
+-- Name: core_controllercredential_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_controllercredential_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_controllercredential_id_seq OWNER TO postgres;
+
+--
+-- Name: core_controllercredential_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_controllercredential_id_seq OWNED BY core_controllercredential.id;
+
+
+--
+-- Name: core_controllerdashboardview; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_controllerdashboardview (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    enabled boolean NOT NULL,
+    url character varying(1024) NOT NULL,
+    controller_id integer NOT NULL,
+    "dashboardView_id" integer NOT NULL
+);
+
+
+ALTER TABLE public.core_controllerdashboardview OWNER TO postgres;
+
+--
+-- Name: core_controllerdashboardview_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_controllerdashboardview_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_controllerdashboardview_id_seq OWNER TO postgres;
+
+--
+-- Name: core_controllerdashboardview_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_controllerdashboardview_id_seq OWNED BY core_controllerdashboardview.id;
+
+
+--
+-- Name: core_controllerimages; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_controllerimages (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    glance_image_id character varying(200),
+    controller_id integer NOT NULL,
+    image_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_controllerimages OWNER TO postgres;
+
+--
+-- Name: core_controllerimages_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_controllerimages_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_controllerimages_id_seq OWNER TO postgres;
+
+--
+-- Name: core_controllerimages_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_controllerimages_id_seq OWNED BY core_controllerimages.id;
+
+
+--
+-- Name: core_controllernetwork; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_controllernetwork (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    net_id character varying(256),
+    router_id character varying(256),
+    subnet_id character varying(256),
+    subnet character varying(32) NOT NULL,
+    controller_id integer NOT NULL,
+    network_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_controllernetwork OWNER TO postgres;
+
+--
+-- Name: core_controllernetwork_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_controllernetwork_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_controllernetwork_id_seq OWNER TO postgres;
+
+--
+-- Name: core_controllernetwork_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_controllernetwork_id_seq OWNED BY core_controllernetwork.id;
+
+
+--
+-- Name: core_controllerrole; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_controllerrole (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    role character varying(30) NOT NULL
+);
+
+
+ALTER TABLE public.core_controllerrole OWNER TO postgres;
+
+--
+-- Name: core_controllerrole_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_controllerrole_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_controllerrole_id_seq OWNER TO postgres;
+
+--
+-- Name: core_controllerrole_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_controllerrole_id_seq OWNED BY core_controllerrole.id;
+
+
+--
+-- Name: core_controllersite; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_controllersite (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    tenant_id character varying(200),
+    controller_id integer,
+    site_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_controllersite OWNER TO postgres;
+
+--
+-- Name: core_controllersite_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_controllersite_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_controllersite_id_seq OWNER TO postgres;
+
+--
+-- Name: core_controllersite_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_controllersite_id_seq OWNED BY core_controllersite.id;
+
+
+--
+-- Name: core_controllersiteprivilege; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_controllersiteprivilege (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    role_id character varying(200),
+    controller_id integer NOT NULL,
+    site_privilege_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_controllersiteprivilege OWNER TO postgres;
+
+--
+-- Name: core_controllersiteprivilege_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_controllersiteprivilege_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_controllersiteprivilege_id_seq OWNER TO postgres;
+
+--
+-- Name: core_controllersiteprivilege_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_controllersiteprivilege_id_seq OWNED BY core_controllersiteprivilege.id;
+
+
+--
+-- Name: core_controllerslice; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_controllerslice (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    tenant_id character varying(200),
+    controller_id integer NOT NULL,
+    slice_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_controllerslice OWNER TO postgres;
+
+--
+-- Name: core_controllerslice_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_controllerslice_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_controllerslice_id_seq OWNER TO postgres;
+
+--
+-- Name: core_controllerslice_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_controllerslice_id_seq OWNED BY core_controllerslice.id;
+
+
+--
+-- Name: core_controllersliceprivilege; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_controllersliceprivilege (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    role_id character varying(200),
+    controller_id integer NOT NULL,
+    slice_privilege_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_controllersliceprivilege OWNER TO postgres;
+
+--
+-- Name: core_controllersliceprivilege_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_controllersliceprivilege_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_controllersliceprivilege_id_seq OWNER TO postgres;
+
+--
+-- Name: core_controllersliceprivilege_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_controllersliceprivilege_id_seq OWNED BY core_controllersliceprivilege.id;
+
+
+--
+-- Name: core_controlleruser; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_controlleruser (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    kuser_id character varying(200),
+    controller_id integer NOT NULL,
+    user_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_controlleruser OWNER TO postgres;
+
+--
+-- Name: core_controlleruser_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_controlleruser_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_controlleruser_id_seq OWNER TO postgres;
+
+--
+-- Name: core_controlleruser_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_controlleruser_id_seq OWNED BY core_controlleruser.id;
+
+
+--
+-- Name: core_dashboardview; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_dashboardview (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(200) NOT NULL,
+    url character varying(1024) NOT NULL,
+    enabled boolean NOT NULL
+);
+
+
+ALTER TABLE public.core_dashboardview OWNER TO postgres;
+
+--
+-- Name: core_dashboardview_deployments; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_dashboardview_deployments (
+    id integer NOT NULL,
+    dashboardview_id integer NOT NULL,
+    deployment_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_dashboardview_deployments OWNER TO postgres;
+
+--
+-- Name: core_dashboardview_deployments_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_dashboardview_deployments_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_dashboardview_deployments_id_seq OWNER TO postgres;
+
+--
+-- Name: core_dashboardview_deployments_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_dashboardview_deployments_id_seq OWNED BY core_dashboardview_deployments.id;
+
+
+--
+-- Name: core_dashboardview_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_dashboardview_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_dashboardview_id_seq OWNER TO postgres;
+
+--
+-- Name: core_dashboardview_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_dashboardview_id_seq OWNED BY core_dashboardview.id;
+
+
+--
+-- Name: core_deployment; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_deployment (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(200) NOT NULL,
+    "accessControl" text NOT NULL
+);
+
+
+ALTER TABLE public.core_deployment OWNER TO postgres;
+
+--
+-- Name: core_deployment_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_deployment_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_deployment_id_seq OWNER TO postgres;
+
+--
+-- Name: core_deployment_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_deployment_id_seq OWNED BY core_deployment.id;
+
+
+--
+-- Name: core_deploymentprivilege; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_deploymentprivilege (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    deployment_id integer NOT NULL,
+    role_id integer NOT NULL,
+    user_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_deploymentprivilege OWNER TO postgres;
+
+--
+-- Name: core_deploymentprivilege_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_deploymentprivilege_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_deploymentprivilege_id_seq OWNER TO postgres;
+
+--
+-- Name: core_deploymentprivilege_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_deploymentprivilege_id_seq OWNED BY core_deploymentprivilege.id;
+
+
+--
+-- Name: core_deploymentrole; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_deploymentrole (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    role character varying(30) NOT NULL
+);
+
+
+ALTER TABLE public.core_deploymentrole OWNER TO postgres;
+
+--
+-- Name: core_deploymentrole_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_deploymentrole_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_deploymentrole_id_seq OWNER TO postgres;
+
+--
+-- Name: core_deploymentrole_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_deploymentrole_id_seq OWNED BY core_deploymentrole.id;
+
+
+--
+-- Name: core_diag; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_diag (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(200) NOT NULL
+);
+
+
+ALTER TABLE public.core_diag OWNER TO postgres;
+
+--
+-- Name: core_diag_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_diag_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_diag_id_seq OWNER TO postgres;
+
+--
+-- Name: core_diag_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_diag_id_seq OWNED BY core_diag.id;
+
+
+--
+-- Name: core_flavor; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_flavor (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(32) NOT NULL,
+    description character varying(1024),
+    flavor character varying(32) NOT NULL,
+    "order" integer NOT NULL,
+    "default" boolean NOT NULL
+);
+
+
+ALTER TABLE public.core_flavor OWNER TO postgres;
+
+--
+-- Name: core_flavor_deployments; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_flavor_deployments (
+    id integer NOT NULL,
+    flavor_id integer NOT NULL,
+    deployment_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_flavor_deployments OWNER TO postgres;
+
+--
+-- Name: core_flavor_deployments_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_flavor_deployments_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_flavor_deployments_id_seq OWNER TO postgres;
+
+--
+-- Name: core_flavor_deployments_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_flavor_deployments_id_seq OWNED BY core_flavor_deployments.id;
+
+
+--
+-- Name: core_flavor_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_flavor_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_flavor_id_seq OWNER TO postgres;
+
+--
+-- Name: core_flavor_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_flavor_id_seq OWNED BY core_flavor.id;
+
+
+--
+-- Name: core_image; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_image (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(256) NOT NULL,
+    kind character varying(30) NOT NULL,
+    disk_format character varying(256) NOT NULL,
+    container_format character varying(256) NOT NULL,
+    path character varying(256),
+    tag character varying(256)
+);
+
+
+ALTER TABLE public.core_image OWNER TO postgres;
+
+--
+-- Name: core_image_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_image_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_image_id_seq OWNER TO postgres;
+
+--
+-- Name: core_image_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_image_id_seq OWNED BY core_image.id;
+
+
+--
+-- Name: core_imagedeployments; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_imagedeployments (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    deployment_id integer NOT NULL,
+    image_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_imagedeployments OWNER TO postgres;
+
+--
+-- Name: core_imagedeployments_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_imagedeployments_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_imagedeployments_id_seq OWNER TO postgres;
+
+--
+-- Name: core_imagedeployments_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_imagedeployments_id_seq OWNED BY core_imagedeployments.id;
+
+
+--
+-- Name: core_instance; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_instance (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    instance_id character varying(200),
+    instance_uuid character varying(200),
+    name character varying(200) NOT NULL,
+    instance_name character varying(200),
+    ip inet,
+    "numberCores" integer NOT NULL,
+    "userData" text,
+    isolation character varying(30) NOT NULL,
+    volumes text,
+    creator_id integer,
+    deployment_id integer NOT NULL,
+    flavor_id integer NOT NULL,
+    image_id integer NOT NULL,
+    node_id integer NOT NULL,
+    parent_id integer,
+    slice_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_instance OWNER TO postgres;
+
+--
+-- Name: core_instance_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_instance_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_instance_id_seq OWNER TO postgres;
+
+--
+-- Name: core_instance_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_instance_id_seq OWNED BY core_instance.id;
+
+
+--
+-- Name: core_invoice; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_invoice (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    date timestamp with time zone NOT NULL,
+    account_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_invoice OWNER TO postgres;
+
+--
+-- Name: core_invoice_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_invoice_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_invoice_id_seq OWNER TO postgres;
+
+--
+-- Name: core_invoice_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_invoice_id_seq OWNED BY core_invoice.id;
+
+
+--
+-- Name: core_network; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_network (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(32) NOT NULL,
+    subnet character varying(32) NOT NULL,
+    ports character varying(1024),
+    labels character varying(1024),
+    guaranteed_bandwidth integer NOT NULL,
+    permit_all_slices boolean NOT NULL,
+    topology_parameters text,
+    controller_url character varying(1024),
+    controller_parameters text,
+    network_id character varying(256),
+    router_id character varying(256),
+    subnet_id character varying(256),
+    autoconnect boolean NOT NULL,
+    owner_id integer NOT NULL,
+    template_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_network OWNER TO postgres;
+
+--
+-- Name: core_network_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_network_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_network_id_seq OWNER TO postgres;
+
+--
+-- Name: core_network_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_network_id_seq OWNED BY core_network.id;
+
+
+--
+-- Name: core_network_permitted_slices; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_network_permitted_slices (
+    id integer NOT NULL,
+    network_id integer NOT NULL,
+    slice_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_network_permitted_slices OWNER TO postgres;
+
+--
+-- Name: core_network_permitted_slices_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_network_permitted_slices_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_network_permitted_slices_id_seq OWNER TO postgres;
+
+--
+-- Name: core_network_permitted_slices_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_network_permitted_slices_id_seq OWNED BY core_network_permitted_slices.id;
+
+
+--
+-- Name: core_networkparameter; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_networkparameter (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    value character varying(1024) NOT NULL,
+    object_id integer NOT NULL,
+    content_type_id integer NOT NULL,
+    parameter_id integer NOT NULL,
+    CONSTRAINT core_networkparameter_object_id_check CHECK ((object_id >= 0))
+);
+
+
+ALTER TABLE public.core_networkparameter OWNER TO postgres;
+
+--
+-- Name: core_networkparameter_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_networkparameter_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_networkparameter_id_seq OWNER TO postgres;
+
+--
+-- Name: core_networkparameter_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_networkparameter_id_seq OWNED BY core_networkparameter.id;
+
+
+--
+-- Name: core_networkparametertype; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_networkparametertype (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(128) NOT NULL,
+    description character varying(1024) NOT NULL
+);
+
+
+ALTER TABLE public.core_networkparametertype OWNER TO postgres;
+
+--
+-- Name: core_networkparametertype_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_networkparametertype_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_networkparametertype_id_seq OWNER TO postgres;
+
+--
+-- Name: core_networkparametertype_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_networkparametertype_id_seq OWNED BY core_networkparametertype.id;
+
+
+--
+-- Name: core_networkslice; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_networkslice (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    network_id integer NOT NULL,
+    slice_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_networkslice OWNER TO postgres;
+
+--
+-- Name: core_networkslice_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_networkslice_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_networkslice_id_seq OWNER TO postgres;
+
+--
+-- Name: core_networkslice_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_networkslice_id_seq OWNED BY core_networkslice.id;
+
+
+--
+-- Name: core_networktemplate; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_networktemplate (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(32) NOT NULL,
+    description character varying(1024),
+    guaranteed_bandwidth integer NOT NULL,
+    visibility character varying(30) NOT NULL,
+    translation character varying(30) NOT NULL,
+    access character varying(30),
+    shared_network_name character varying(30),
+    shared_network_id character varying(256),
+    topology_kind character varying(30) NOT NULL,
+    controller_kind character varying(30)
+);
+
+
+ALTER TABLE public.core_networktemplate OWNER TO postgres;
+
+--
+-- Name: core_networktemplate_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_networktemplate_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_networktemplate_id_seq OWNER TO postgres;
+
+--
+-- Name: core_networktemplate_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_networktemplate_id_seq OWNED BY core_networktemplate.id;
+
+
+--
+-- Name: core_node; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_node (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(200) NOT NULL,
+    site_id integer,
+    site_deployment_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_node OWNER TO postgres;
+
+--
+-- Name: core_node_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_node_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_node_id_seq OWNER TO postgres;
+
+--
+-- Name: core_node_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_node_id_seq OWNED BY core_node.id;
+
+
+--
+-- Name: core_nodelabel; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_nodelabel (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(200) NOT NULL
+);
+
+
+ALTER TABLE public.core_nodelabel OWNER TO postgres;
+
+--
+-- Name: core_nodelabel_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_nodelabel_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_nodelabel_id_seq OWNER TO postgres;
+
+--
+-- Name: core_nodelabel_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_nodelabel_id_seq OWNED BY core_nodelabel.id;
+
+
+--
+-- Name: core_nodelabel_node; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_nodelabel_node (
+    id integer NOT NULL,
+    nodelabel_id integer NOT NULL,
+    node_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_nodelabel_node OWNER TO postgres;
+
+--
+-- Name: core_nodelabel_node_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_nodelabel_node_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_nodelabel_node_id_seq OWNER TO postgres;
+
+--
+-- Name: core_nodelabel_node_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_nodelabel_node_id_seq OWNED BY core_nodelabel_node.id;
+
+
+--
+-- Name: core_payment; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_payment (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    amount double precision NOT NULL,
+    date timestamp with time zone NOT NULL,
+    account_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_payment OWNER TO postgres;
+
+--
+-- Name: core_payment_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_payment_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_payment_id_seq OWNER TO postgres;
+
+--
+-- Name: core_payment_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_payment_id_seq OWNED BY core_payment.id;
+
+
+--
+-- Name: core_port; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_port (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    ip inet,
+    port_id character varying(256),
+    mac character varying(256),
+    xos_created boolean NOT NULL,
+    instance_id integer,
+    network_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_port OWNER TO postgres;
+
+--
+-- Name: core_port_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_port_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_port_id_seq OWNER TO postgres;
+
+--
+-- Name: core_port_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_port_id_seq OWNED BY core_port.id;
+
+
+--
+-- Name: core_program; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_program (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(30) NOT NULL,
+    description text,
+    kind character varying(30) NOT NULL,
+    command character varying(30),
+    contents text,
+    output text,
+    messages text,
+    status text,
+    owner_id integer
+);
+
+
+ALTER TABLE public.core_program OWNER TO postgres;
+
+--
+-- Name: core_program_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_program_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_program_id_seq OWNER TO postgres;
+
+--
+-- Name: core_program_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_program_id_seq OWNED BY core_program.id;
+
+
+--
+-- Name: core_project; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_project (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(200) NOT NULL
+);
+
+
+ALTER TABLE public.core_project OWNER TO postgres;
+
+--
+-- Name: core_project_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_project_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_project_id_seq OWNER TO postgres;
+
+--
+-- Name: core_project_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_project_id_seq OWNED BY core_project.id;
+
+
+--
+-- Name: core_reservation; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_reservation (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    "startTime" timestamp with time zone NOT NULL,
+    duration integer NOT NULL,
+    slice_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_reservation OWNER TO postgres;
+
+--
+-- Name: core_reservation_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_reservation_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_reservation_id_seq OWNER TO postgres;
+
+--
+-- Name: core_reservation_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_reservation_id_seq OWNED BY core_reservation.id;
+
+
+--
+-- Name: core_reservedresource; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_reservedresource (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    quantity integer NOT NULL,
+    instance_id integer NOT NULL,
+    "reservationSet_id" integer NOT NULL,
+    resource_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_reservedresource OWNER TO postgres;
+
+--
+-- Name: core_reservedresource_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_reservedresource_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_reservedresource_id_seq OWNER TO postgres;
+
+--
+-- Name: core_reservedresource_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_reservedresource_id_seq OWNED BY core_reservedresource.id;
+
+
+--
+-- Name: core_role; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_role (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    role_type character varying(80) NOT NULL,
+    role character varying(80),
+    description character varying(120) NOT NULL,
+    content_type_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_role OWNER TO postgres;
+
+--
+-- Name: core_role_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_role_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_role_id_seq OWNER TO postgres;
+
+--
+-- Name: core_role_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_role_id_seq OWNED BY core_role.id;
+
+
+--
+-- Name: core_router; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_router (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(32) NOT NULL,
+    owner_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_router OWNER TO postgres;
+
+--
+-- Name: core_router_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_router_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_router_id_seq OWNER TO postgres;
+
+--
+-- Name: core_router_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_router_id_seq OWNED BY core_router.id;
+
+
+--
+-- Name: core_router_networks; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_router_networks (
+    id integer NOT NULL,
+    router_id integer NOT NULL,
+    network_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_router_networks OWNER TO postgres;
+
+--
+-- Name: core_router_networks_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_router_networks_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_router_networks_id_seq OWNER TO postgres;
+
+--
+-- Name: core_router_networks_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_router_networks_id_seq OWNED BY core_router_networks.id;
+
+
+--
+-- Name: core_router_permittedNetworks; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE "core_router_permittedNetworks" (
+    id integer NOT NULL,
+    router_id integer NOT NULL,
+    network_id integer NOT NULL
+);
+
+
+ALTER TABLE public."core_router_permittedNetworks" OWNER TO postgres;
+
+--
+-- Name: core_router_permittedNetworks_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE "core_router_permittedNetworks_id_seq"
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public."core_router_permittedNetworks_id_seq" OWNER TO postgres;
+
+--
+-- Name: core_router_permittedNetworks_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE "core_router_permittedNetworks_id_seq" OWNED BY "core_router_permittedNetworks".id;
+
+
+--
+-- Name: core_service; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_service (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    description text,
+    enabled boolean NOT NULL,
+    kind character varying(30) NOT NULL,
+    name character varying(30) NOT NULL,
+    "versionNumber" character varying(30) NOT NULL,
+    published boolean NOT NULL,
+    view_url character varying(1024),
+    icon_url character varying(1024),
+    public_key text,
+    private_key_fn character varying(1024),
+    service_specific_id character varying(30),
+    service_specific_attribute text
+);
+
+
+ALTER TABLE public.core_service OWNER TO postgres;
+
+--
+-- Name: core_service_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_service_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_service_id_seq OWNER TO postgres;
+
+--
+-- Name: core_service_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_service_id_seq OWNED BY core_service.id;
+
+
+--
+-- Name: core_serviceattribute; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_serviceattribute (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(128) NOT NULL,
+    value character varying(1024) NOT NULL,
+    service_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_serviceattribute OWNER TO postgres;
+
+--
+-- Name: core_serviceattribute_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_serviceattribute_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_serviceattribute_id_seq OWNER TO postgres;
+
+--
+-- Name: core_serviceattribute_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_serviceattribute_id_seq OWNED BY core_serviceattribute.id;
+
+
+--
+-- Name: core_serviceclass; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_serviceclass (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(32) NOT NULL,
+    description character varying(255) NOT NULL,
+    commitment integer NOT NULL,
+    "membershipFee" integer NOT NULL,
+    "membershipFeeMonths" integer NOT NULL,
+    "upgradeRequiresApproval" boolean NOT NULL
+);
+
+
+ALTER TABLE public.core_serviceclass OWNER TO postgres;
+
+--
+-- Name: core_serviceclass_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_serviceclass_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_serviceclass_id_seq OWNER TO postgres;
+
+--
+-- Name: core_serviceclass_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_serviceclass_id_seq OWNED BY core_serviceclass.id;
+
+
+--
+-- Name: core_serviceclass_upgradeFrom; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE "core_serviceclass_upgradeFrom" (
+    id integer NOT NULL,
+    from_serviceclass_id integer NOT NULL,
+    to_serviceclass_id integer NOT NULL
+);
+
+
+ALTER TABLE public."core_serviceclass_upgradeFrom" OWNER TO postgres;
+
+--
+-- Name: core_serviceclass_upgradeFrom_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE "core_serviceclass_upgradeFrom_id_seq"
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public."core_serviceclass_upgradeFrom_id_seq" OWNER TO postgres;
+
+--
+-- Name: core_serviceclass_upgradeFrom_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE "core_serviceclass_upgradeFrom_id_seq" OWNED BY "core_serviceclass_upgradeFrom".id;
+
+
+--
+-- Name: core_serviceprivilege; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_serviceprivilege (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    role_id integer NOT NULL,
+    service_id integer NOT NULL,
+    user_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_serviceprivilege OWNER TO postgres;
+
+--
+-- Name: core_serviceprivilege_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_serviceprivilege_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_serviceprivilege_id_seq OWNER TO postgres;
+
+--
+-- Name: core_serviceprivilege_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_serviceprivilege_id_seq OWNED BY core_serviceprivilege.id;
+
+
+--
+-- Name: core_serviceresource; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_serviceresource (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(32) NOT NULL,
+    "maxUnitsDeployment" integer NOT NULL,
+    "maxUnitsNode" integer NOT NULL,
+    "maxDuration" integer NOT NULL,
+    "bucketInRate" integer NOT NULL,
+    "bucketMaxSize" integer NOT NULL,
+    cost integer NOT NULL,
+    "calendarReservable" boolean NOT NULL,
+    "serviceClass_id" integer NOT NULL
+);
+
+
+ALTER TABLE public.core_serviceresource OWNER TO postgres;
+
+--
+-- Name: core_serviceresource_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_serviceresource_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_serviceresource_id_seq OWNER TO postgres;
+
+--
+-- Name: core_serviceresource_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_serviceresource_id_seq OWNED BY core_serviceresource.id;
+
+
+--
+-- Name: core_servicerole; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_servicerole (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    role character varying(30) NOT NULL
+);
+
+
+ALTER TABLE public.core_servicerole OWNER TO postgres;
+
+--
+-- Name: core_servicerole_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_servicerole_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_servicerole_id_seq OWNER TO postgres;
+
+--
+-- Name: core_servicerole_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_servicerole_id_seq OWNED BY core_servicerole.id;
+
+
+--
+-- Name: core_site; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_site (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(200) NOT NULL,
+    site_url character varying(512),
+    enabled boolean NOT NULL,
+    hosts_nodes boolean NOT NULL,
+    hosts_users boolean NOT NULL,
+    location character varying(42) NOT NULL,
+    longitude double precision,
+    latitude double precision,
+    login_base character varying(50) NOT NULL,
+    is_public boolean NOT NULL,
+    abbreviated_name character varying(80) NOT NULL
+);
+
+
+ALTER TABLE public.core_site OWNER TO postgres;
+
+--
+-- Name: core_site_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_site_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_site_id_seq OWNER TO postgres;
+
+--
+-- Name: core_site_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_site_id_seq OWNED BY core_site.id;
+
+
+--
+-- Name: core_sitecredential; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_sitecredential (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(128) NOT NULL,
+    key_id character varying(1024) NOT NULL,
+    enc_value text NOT NULL,
+    site_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_sitecredential OWNER TO postgres;
+
+--
+-- Name: core_sitecredential_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_sitecredential_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_sitecredential_id_seq OWNER TO postgres;
+
+--
+-- Name: core_sitecredential_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_sitecredential_id_seq OWNED BY core_sitecredential.id;
+
+
+--
+-- Name: core_sitedeployment; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_sitedeployment (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    availability_zone character varying(200),
+    controller_id integer,
+    deployment_id integer NOT NULL,
+    site_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_sitedeployment OWNER TO postgres;
+
+--
+-- Name: core_sitedeployment_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_sitedeployment_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_sitedeployment_id_seq OWNER TO postgres;
+
+--
+-- Name: core_sitedeployment_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_sitedeployment_id_seq OWNED BY core_sitedeployment.id;
+
+
+--
+-- Name: core_siteprivilege; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_siteprivilege (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    role_id integer NOT NULL,
+    site_id integer NOT NULL,
+    user_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_siteprivilege OWNER TO postgres;
+
+--
+-- Name: core_siteprivilege_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_siteprivilege_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_siteprivilege_id_seq OWNER TO postgres;
+
+--
+-- Name: core_siteprivilege_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_siteprivilege_id_seq OWNED BY core_siteprivilege.id;
+
+
+--
+-- Name: core_siterole; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_siterole (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    role character varying(30) NOT NULL
+);
+
+
+ALTER TABLE public.core_siterole OWNER TO postgres;
+
+--
+-- Name: core_siterole_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_siterole_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_siterole_id_seq OWNER TO postgres;
+
+--
+-- Name: core_siterole_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_siterole_id_seq OWNED BY core_siterole.id;
+
+
+--
+-- Name: core_slice; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_slice (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(80) NOT NULL,
+    enabled boolean NOT NULL,
+    omf_friendly boolean NOT NULL,
+    description text NOT NULL,
+    slice_url character varying(512) NOT NULL,
+    max_instances integer NOT NULL,
+    network character varying(256),
+    exposed_ports character varying(256),
+    mount_data_sets character varying(256),
+    default_isolation character varying(30) NOT NULL,
+    creator_id integer,
+    default_flavor_id integer,
+    default_image_id integer,
+    service_id integer,
+    "serviceClass_id" integer,
+    site_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_slice OWNER TO postgres;
+
+--
+-- Name: core_slice_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_slice_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_slice_id_seq OWNER TO postgres;
+
+--
+-- Name: core_slice_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_slice_id_seq OWNED BY core_slice.id;
+
+
+--
+-- Name: core_slicecredential; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_slicecredential (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(128) NOT NULL,
+    key_id character varying(1024) NOT NULL,
+    enc_value text NOT NULL,
+    slice_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_slicecredential OWNER TO postgres;
+
+--
+-- Name: core_slicecredential_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_slicecredential_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_slicecredential_id_seq OWNER TO postgres;
+
+--
+-- Name: core_slicecredential_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_slicecredential_id_seq OWNED BY core_slicecredential.id;
+
+
+--
+-- Name: core_sliceprivilege; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_sliceprivilege (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    role_id integer NOT NULL,
+    slice_id integer NOT NULL,
+    user_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_sliceprivilege OWNER TO postgres;
+
+--
+-- Name: core_sliceprivilege_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_sliceprivilege_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_sliceprivilege_id_seq OWNER TO postgres;
+
+--
+-- Name: core_sliceprivilege_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_sliceprivilege_id_seq OWNED BY core_sliceprivilege.id;
+
+
+--
+-- Name: core_slicerole; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_slicerole (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    role character varying(30) NOT NULL
+);
+
+
+ALTER TABLE public.core_slicerole OWNER TO postgres;
+
+--
+-- Name: core_slicerole_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_slicerole_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_slicerole_id_seq OWNER TO postgres;
+
+--
+-- Name: core_slicerole_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_slicerole_id_seq OWNED BY core_slicerole.id;
+
+
+--
+-- Name: core_slicetag; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_slicetag (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(30) NOT NULL,
+    value character varying(1024) NOT NULL,
+    slice_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_slicetag OWNER TO postgres;
+
+--
+-- Name: core_slicetag_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_slicetag_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_slicetag_id_seq OWNER TO postgres;
+
+--
+-- Name: core_slicetag_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_slicetag_id_seq OWNED BY core_slicetag.id;
+
+
+--
+-- Name: core_tag; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_tag (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(128) NOT NULL,
+    value character varying(1024) NOT NULL,
+    object_id integer NOT NULL,
+    content_type_id integer NOT NULL,
+    service_id integer NOT NULL,
+    CONSTRAINT core_tag_object_id_check CHECK ((object_id >= 0))
+);
+
+
+ALTER TABLE public.core_tag OWNER TO postgres;
+
+--
+-- Name: core_tag_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_tag_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_tag_id_seq OWNER TO postgres;
+
+--
+-- Name: core_tag_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_tag_id_seq OWNED BY core_tag.id;
+
+
+--
+-- Name: core_tenant; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_tenant (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    kind character varying(30) NOT NULL,
+    service_specific_id character varying(30),
+    service_specific_attribute text,
+    connect_method character varying(30) NOT NULL,
+    provider_service_id integer NOT NULL,
+    subscriber_root_id integer,
+    subscriber_service_id integer,
+    subscriber_tenant_id integer,
+    subscriber_user_id integer
+);
+
+
+ALTER TABLE public.core_tenant OWNER TO postgres;
+
+--
+-- Name: core_tenant_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_tenant_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_tenant_id_seq OWNER TO postgres;
+
+--
+-- Name: core_tenant_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_tenant_id_seq OWNED BY core_tenant.id;
+
+
+--
+-- Name: core_tenantattribute; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_tenantattribute (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(128) NOT NULL,
+    value text NOT NULL,
+    tenant_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_tenantattribute OWNER TO postgres;
+
+--
+-- Name: core_tenantattribute_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_tenantattribute_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_tenantattribute_id_seq OWNER TO postgres;
+
+--
+-- Name: core_tenantattribute_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_tenantattribute_id_seq OWNED BY core_tenantattribute.id;
+
+
+--
+-- Name: core_tenantroot; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_tenantroot (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    kind character varying(30) NOT NULL,
+    name character varying(255),
+    service_specific_attribute text,
+    service_specific_id character varying(30)
+);
+
+
+ALTER TABLE public.core_tenantroot OWNER TO postgres;
+
+--
+-- Name: core_tenantroot_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_tenantroot_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_tenantroot_id_seq OWNER TO postgres;
+
+--
+-- Name: core_tenantroot_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_tenantroot_id_seq OWNED BY core_tenantroot.id;
+
+
+--
+-- Name: core_tenantrootprivilege; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_tenantrootprivilege (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    role_id integer NOT NULL,
+    tenant_root_id integer NOT NULL,
+    user_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_tenantrootprivilege OWNER TO postgres;
+
+--
+-- Name: core_tenantrootprivilege_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_tenantrootprivilege_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_tenantrootprivilege_id_seq OWNER TO postgres;
+
+--
+-- Name: core_tenantrootprivilege_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_tenantrootprivilege_id_seq OWNED BY core_tenantrootprivilege.id;
+
+
+--
+-- Name: core_tenantrootrole; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_tenantrootrole (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    role character varying(30) NOT NULL
+);
+
+
+ALTER TABLE public.core_tenantrootrole OWNER TO postgres;
+
+--
+-- Name: core_tenantrootrole_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_tenantrootrole_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_tenantrootrole_id_seq OWNER TO postgres;
+
+--
+-- Name: core_tenantrootrole_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_tenantrootrole_id_seq OWNED BY core_tenantrootrole.id;
+
+
+--
+-- Name: core_usableobject; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_usableobject (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(1024) NOT NULL
+);
+
+
+ALTER TABLE public.core_usableobject OWNER TO postgres;
+
+--
+-- Name: core_usableobject_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_usableobject_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_usableobject_id_seq OWNER TO postgres;
+
+--
+-- Name: core_usableobject_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_usableobject_id_seq OWNED BY core_usableobject.id;
+
+
+--
+-- Name: core_user; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_user (
+    id integer NOT NULL,
+    password character varying(128) NOT NULL,
+    last_login timestamp with time zone NOT NULL,
+    email character varying(255) NOT NULL,
+    username character varying(255) NOT NULL,
+    firstname character varying(200) NOT NULL,
+    lastname character varying(200) NOT NULL,
+    phone character varying(100),
+    user_url character varying(200),
+    public_key text,
+    is_active boolean NOT NULL,
+    is_admin boolean NOT NULL,
+    is_staff boolean NOT NULL,
+    is_readonly boolean NOT NULL,
+    is_registering boolean NOT NULL,
+    is_appuser boolean NOT NULL,
+    login_page character varying(200),
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    timezone character varying(100) NOT NULL,
+    site_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_user OWNER TO postgres;
+
+--
+-- Name: core_user_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_user_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_user_id_seq OWNER TO postgres;
+
+--
+-- Name: core_user_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_user_id_seq OWNED BY core_user.id;
+
+
+--
+-- Name: core_usercredential; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_usercredential (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(128) NOT NULL,
+    key_id character varying(1024) NOT NULL,
+    enc_value text NOT NULL,
+    user_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_usercredential OWNER TO postgres;
+
+--
+-- Name: core_usercredential_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_usercredential_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_usercredential_id_seq OWNER TO postgres;
+
+--
+-- Name: core_usercredential_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_usercredential_id_seq OWNED BY core_usercredential.id;
+
+
+--
+-- Name: core_userdashboardview; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE core_userdashboardview (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    "order" integer NOT NULL,
+    "dashboardView_id" integer NOT NULL,
+    user_id integer NOT NULL
+);
+
+
+ALTER TABLE public.core_userdashboardview OWNER TO postgres;
+
+--
+-- Name: core_userdashboardview_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE core_userdashboardview_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.core_userdashboardview_id_seq OWNER TO postgres;
+
+--
+-- Name: core_userdashboardview_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE core_userdashboardview_id_seq OWNED BY core_userdashboardview.id;
+
+
+--
+-- Name: django_admin_log; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE django_admin_log (
+    id integer NOT NULL,
+    action_time timestamp with time zone NOT NULL,
+    object_id text,
+    object_repr character varying(200) NOT NULL,
+    action_flag smallint NOT NULL,
+    change_message text NOT NULL,
+    content_type_id integer,
+    user_id integer NOT NULL,
+    CONSTRAINT django_admin_log_action_flag_check CHECK ((action_flag >= 0))
+);
+
+
+ALTER TABLE public.django_admin_log OWNER TO postgres;
+
+--
+-- Name: django_admin_log_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE django_admin_log_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.django_admin_log_id_seq OWNER TO postgres;
+
+--
+-- Name: django_admin_log_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE django_admin_log_id_seq OWNED BY django_admin_log.id;
+
+
+--
+-- Name: django_content_type; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE django_content_type (
+    id integer NOT NULL,
+    name character varying(100) NOT NULL,
+    app_label character varying(100) NOT NULL,
+    model character varying(100) NOT NULL
+);
+
+
+ALTER TABLE public.django_content_type OWNER TO postgres;
+
+--
+-- Name: django_content_type_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE django_content_type_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.django_content_type_id_seq OWNER TO postgres;
+
+--
+-- Name: django_content_type_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE django_content_type_id_seq OWNED BY django_content_type.id;
+
+
+--
+-- Name: django_migrations; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE django_migrations (
+    id integer NOT NULL,
+    app character varying(255) NOT NULL,
+    name character varying(255) NOT NULL,
+    applied timestamp with time zone NOT NULL
+);
+
+
+ALTER TABLE public.django_migrations OWNER TO postgres;
+
+--
+-- Name: django_migrations_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE django_migrations_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.django_migrations_id_seq OWNER TO postgres;
+
+--
+-- Name: django_migrations_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE django_migrations_id_seq OWNED BY django_migrations.id;
+
+
+--
+-- Name: django_session; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE django_session (
+    session_key character varying(40) NOT NULL,
+    session_data text NOT NULL,
+    expire_date timestamp with time zone NOT NULL
+);
+
+
+ALTER TABLE public.django_session OWNER TO postgres;
+
+--
+-- Name: hpc_accessmap; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE hpc_accessmap (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(64) NOT NULL,
+    description text,
+    map character varying(100) NOT NULL,
+    "contentProvider_id" integer NOT NULL
+);
+
+
+ALTER TABLE public.hpc_accessmap OWNER TO postgres;
+
+--
+-- Name: hpc_accessmap_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE hpc_accessmap_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.hpc_accessmap_id_seq OWNER TO postgres;
+
+--
+-- Name: hpc_accessmap_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE hpc_accessmap_id_seq OWNED BY hpc_accessmap.id;
+
+
+--
+-- Name: hpc_cdnprefix; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE hpc_cdnprefix (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    cdn_prefix_id integer,
+    prefix character varying(200) NOT NULL,
+    description text,
+    enabled boolean NOT NULL,
+    "contentProvider_id" integer NOT NULL,
+    "defaultOriginServer_id" integer
+);
+
+
+ALTER TABLE public.hpc_cdnprefix OWNER TO postgres;
+
+--
+-- Name: hpc_cdnprefix_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE hpc_cdnprefix_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.hpc_cdnprefix_id_seq OWNER TO postgres;
+
+--
+-- Name: hpc_cdnprefix_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE hpc_cdnprefix_id_seq OWNED BY hpc_cdnprefix.id;
+
+
+--
+-- Name: hpc_contentprovider; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE hpc_contentprovider (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    content_provider_id integer,
+    name character varying(254) NOT NULL,
+    enabled boolean NOT NULL,
+    description text,
+    "serviceProvider_id" integer NOT NULL
+);
+
+
+ALTER TABLE public.hpc_contentprovider OWNER TO postgres;
+
+--
+-- Name: hpc_contentprovider_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE hpc_contentprovider_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.hpc_contentprovider_id_seq OWNER TO postgres;
+
+--
+-- Name: hpc_contentprovider_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE hpc_contentprovider_id_seq OWNED BY hpc_contentprovider.id;
+
+
+--
+-- Name: hpc_contentprovider_users; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE hpc_contentprovider_users (
+    id integer NOT NULL,
+    contentprovider_id integer NOT NULL,
+    user_id integer NOT NULL
+);
+
+
+ALTER TABLE public.hpc_contentprovider_users OWNER TO postgres;
+
+--
+-- Name: hpc_contentprovider_users_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE hpc_contentprovider_users_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.hpc_contentprovider_users_id_seq OWNER TO postgres;
+
+--
+-- Name: hpc_contentprovider_users_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE hpc_contentprovider_users_id_seq OWNED BY hpc_contentprovider_users.id;
+
+
+--
+-- Name: hpc_hpchealthcheck; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE hpc_hpchealthcheck (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    kind character varying(30) NOT NULL,
+    resource_name character varying(1024) NOT NULL,
+    result_contains character varying(1024),
+    result_min_size integer,
+    result_max_size integer,
+    "hpcService_id" integer
+);
+
+
+ALTER TABLE public.hpc_hpchealthcheck OWNER TO postgres;
+
+--
+-- Name: hpc_hpchealthcheck_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE hpc_hpchealthcheck_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.hpc_hpchealthcheck_id_seq OWNER TO postgres;
+
+--
+-- Name: hpc_hpchealthcheck_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE hpc_hpchealthcheck_id_seq OWNED BY hpc_hpchealthcheck.id;
+
+
+--
+-- Name: hpc_hpcservice; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE hpc_hpcservice (
+    service_ptr_id integer NOT NULL,
+    cmi_hostname character varying(254),
+    hpc_port80 boolean NOT NULL,
+    watcher_hpc_network character varying(254),
+    watcher_dnsdemux_network character varying(254),
+    watcher_dnsredir_network character varying(254)
+);
+
+
+ALTER TABLE public.hpc_hpcservice OWNER TO postgres;
+
+--
+-- Name: hpc_originserver; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE hpc_originserver (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    origin_server_id integer,
+    url character varying(1024) NOT NULL,
+    authenticated boolean NOT NULL,
+    enabled boolean NOT NULL,
+    protocol character varying(12) NOT NULL,
+    redirects boolean NOT NULL,
+    description text,
+    "contentProvider_id" integer NOT NULL
+);
+
+
+ALTER TABLE public.hpc_originserver OWNER TO postgres;
+
+--
+-- Name: hpc_originserver_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE hpc_originserver_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.hpc_originserver_id_seq OWNER TO postgres;
+
+--
+-- Name: hpc_originserver_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE hpc_originserver_id_seq OWNED BY hpc_originserver.id;
+
+
+--
+-- Name: hpc_serviceprovider; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE hpc_serviceprovider (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    service_provider_id integer,
+    name character varying(254) NOT NULL,
+    description text,
+    enabled boolean NOT NULL,
+    "hpcService_id" integer NOT NULL
+);
+
+
+ALTER TABLE public.hpc_serviceprovider OWNER TO postgres;
+
+--
+-- Name: hpc_serviceprovider_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE hpc_serviceprovider_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.hpc_serviceprovider_id_seq OWNER TO postgres;
+
+--
+-- Name: hpc_serviceprovider_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE hpc_serviceprovider_id_seq OWNED BY hpc_serviceprovider.id;
+
+
+--
+-- Name: hpc_sitemap; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE hpc_sitemap (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(64) NOT NULL,
+    description text,
+    map character varying(100) NOT NULL,
+    map_id integer,
+    "cdnPrefix_id" integer,
+    "contentProvider_id" integer,
+    "hpcService_id" integer,
+    "serviceProvider_id" integer
+);
+
+
+ALTER TABLE public.hpc_sitemap OWNER TO postgres;
+
+--
+-- Name: hpc_sitemap_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE hpc_sitemap_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.hpc_sitemap_id_seq OWNER TO postgres;
+
+--
+-- Name: hpc_sitemap_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE hpc_sitemap_id_seq OWNED BY hpc_sitemap.id;
+
+
+--
+-- Name: requestrouter_requestrouterservice; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE requestrouter_requestrouterservice (
+    service_ptr_id integer NOT NULL,
+    "behindNat" boolean NOT NULL,
+    "defaultTTL" integer NOT NULL,
+    "defaultAction" character varying(30) NOT NULL,
+    "lastResortAction" character varying(30) NOT NULL,
+    "maxAnswers" integer NOT NULL,
+    CONSTRAINT "requestrouter_requestrouterservice_defaultTTL_check" CHECK (("defaultTTL" >= 0)),
+    CONSTRAINT "requestrouter_requestrouterservice_maxAnswers_check" CHECK (("maxAnswers" >= 0))
+);
+
+
+ALTER TABLE public.requestrouter_requestrouterservice OWNER TO postgres;
+
+--
+-- Name: requestrouter_servicemap; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE requestrouter_servicemap (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(50) NOT NULL,
+    prefix character varying(256) NOT NULL,
+    "siteMap" character varying(100) NOT NULL,
+    "accessMap" character varying(100) NOT NULL,
+    owner_id integer NOT NULL,
+    slice_id integer NOT NULL
+);
+
+
+ALTER TABLE public.requestrouter_servicemap OWNER TO postgres;
+
+--
+-- Name: requestrouter_servicemap_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE requestrouter_servicemap_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.requestrouter_servicemap_id_seq OWNER TO postgres;
+
+--
+-- Name: requestrouter_servicemap_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE requestrouter_servicemap_id_seq OWNED BY requestrouter_servicemap.id;
+
+
+--
+-- Name: syndicate_storage_slicesecret; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE syndicate_storage_slicesecret (
+    id integer NOT NULL,
+    secret text NOT NULL,
+    slice_id_id integer NOT NULL
+);
+
+
+ALTER TABLE public.syndicate_storage_slicesecret OWNER TO postgres;
+
+--
+-- Name: syndicate_storage_slicesecret_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE syndicate_storage_slicesecret_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.syndicate_storage_slicesecret_id_seq OWNER TO postgres;
+
+--
+-- Name: syndicate_storage_slicesecret_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE syndicate_storage_slicesecret_id_seq OWNED BY syndicate_storage_slicesecret.id;
+
+
+--
+-- Name: syndicate_storage_syndicateprincipal; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE syndicate_storage_syndicateprincipal (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    principal_id text NOT NULL,
+    public_key_pem text NOT NULL,
+    sealed_private_key text NOT NULL
+);
+
+
+ALTER TABLE public.syndicate_storage_syndicateprincipal OWNER TO postgres;
+
+--
+-- Name: syndicate_storage_syndicateprincipal_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE syndicate_storage_syndicateprincipal_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.syndicate_storage_syndicateprincipal_id_seq OWNER TO postgres;
+
+--
+-- Name: syndicate_storage_syndicateprincipal_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE syndicate_storage_syndicateprincipal_id_seq OWNED BY syndicate_storage_syndicateprincipal.id;
+
+
+--
+-- Name: syndicate_storage_syndicateservice; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE syndicate_storage_syndicateservice (
+    service_ptr_id integer NOT NULL
+);
+
+
+ALTER TABLE public.syndicate_storage_syndicateservice OWNER TO postgres;
+
+--
+-- Name: syndicate_storage_volume; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE syndicate_storage_volume (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    name character varying(64) NOT NULL,
+    description text,
+    blocksize integer NOT NULL,
+    private boolean NOT NULL,
+    archive boolean NOT NULL,
+    cap_read_data boolean NOT NULL,
+    cap_write_data boolean NOT NULL,
+    cap_host_data boolean NOT NULL,
+    owner_id_id integer NOT NULL,
+    CONSTRAINT syndicate_storage_volume_blocksize_check CHECK ((blocksize >= 0))
+);
+
+
+ALTER TABLE public.syndicate_storage_volume OWNER TO postgres;
+
+--
+-- Name: syndicate_storage_volume_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE syndicate_storage_volume_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.syndicate_storage_volume_id_seq OWNER TO postgres;
+
+--
+-- Name: syndicate_storage_volume_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE syndicate_storage_volume_id_seq OWNED BY syndicate_storage_volume.id;
+
+
+--
+-- Name: syndicate_storage_volumeaccessright; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE syndicate_storage_volumeaccessright (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    cap_read_data boolean NOT NULL,
+    cap_write_data boolean NOT NULL,
+    cap_host_data boolean NOT NULL,
+    owner_id_id integer NOT NULL,
+    volume_id integer NOT NULL
+);
+
+
+ALTER TABLE public.syndicate_storage_volumeaccessright OWNER TO postgres;
+
+--
+-- Name: syndicate_storage_volumeaccessright_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE syndicate_storage_volumeaccessright_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.syndicate_storage_volumeaccessright_id_seq OWNER TO postgres;
+
+--
+-- Name: syndicate_storage_volumeaccessright_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE syndicate_storage_volumeaccessright_id_seq OWNED BY syndicate_storage_volumeaccessright.id;
+
+
+--
+-- Name: syndicate_storage_volumeslice; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE TABLE syndicate_storage_volumeslice (
+    id integer NOT NULL,
+    created timestamp with time zone NOT NULL,
+    updated timestamp with time zone NOT NULL,
+    enacted timestamp with time zone,
+    policed timestamp with time zone,
+    backend_register character varying(140),
+    backend_status character varying(1024) NOT NULL,
+    deleted boolean NOT NULL,
+    write_protect boolean NOT NULL,
+    lazy_blocked boolean NOT NULL,
+    no_sync boolean NOT NULL,
+    cap_read_data boolean NOT NULL,
+    cap_write_data boolean NOT NULL,
+    cap_host_data boolean NOT NULL,
+    "UG_portnum" integer NOT NULL,
+    "RG_portnum" integer NOT NULL,
+    credentials_blob text,
+    slice_id_id integer NOT NULL,
+    volume_id_id integer NOT NULL,
+    CONSTRAINT "syndicate_storage_volumeslice_RG_portnum_check" CHECK (("RG_portnum" >= 0)),
+    CONSTRAINT "syndicate_storage_volumeslice_UG_portnum_check" CHECK (("UG_portnum" >= 0))
+);
+
+
+ALTER TABLE public.syndicate_storage_volumeslice OWNER TO postgres;
+
+--
+-- Name: syndicate_storage_volumeslice_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE syndicate_storage_volumeslice_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+ALTER TABLE public.syndicate_storage_volumeslice_id_seq OWNER TO postgres;
+
+--
+-- Name: syndicate_storage_volumeslice_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE syndicate_storage_volumeslice_id_seq OWNED BY syndicate_storage_volumeslice.id;
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY auth_group ALTER COLUMN id SET DEFAULT nextval('auth_group_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY auth_group_permissions ALTER COLUMN id SET DEFAULT nextval('auth_group_permissions_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY auth_permission ALTER COLUMN id SET DEFAULT nextval('auth_permission_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_account ALTER COLUMN id SET DEFAULT nextval('core_account_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_addresspool ALTER COLUMN id SET DEFAULT nextval('core_addresspool_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_charge ALTER COLUMN id SET DEFAULT nextval('core_charge_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controller ALTER COLUMN id SET DEFAULT nextval('core_controller_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllercredential ALTER COLUMN id SET DEFAULT nextval('core_controllercredential_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllerdashboardview ALTER COLUMN id SET DEFAULT nextval('core_controllerdashboardview_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllerimages ALTER COLUMN id SET DEFAULT nextval('core_controllerimages_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllernetwork ALTER COLUMN id SET DEFAULT nextval('core_controllernetwork_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllerrole ALTER COLUMN id SET DEFAULT nextval('core_controllerrole_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllersite ALTER COLUMN id SET DEFAULT nextval('core_controllersite_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllersiteprivilege ALTER COLUMN id SET DEFAULT nextval('core_controllersiteprivilege_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllerslice ALTER COLUMN id SET DEFAULT nextval('core_controllerslice_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllersliceprivilege ALTER COLUMN id SET DEFAULT nextval('core_controllersliceprivilege_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controlleruser ALTER COLUMN id SET DEFAULT nextval('core_controlleruser_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_dashboardview ALTER COLUMN id SET DEFAULT nextval('core_dashboardview_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_dashboardview_deployments ALTER COLUMN id SET DEFAULT nextval('core_dashboardview_deployments_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_deployment ALTER COLUMN id SET DEFAULT nextval('core_deployment_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_deploymentprivilege ALTER COLUMN id SET DEFAULT nextval('core_deploymentprivilege_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_deploymentrole ALTER COLUMN id SET DEFAULT nextval('core_deploymentrole_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_diag ALTER COLUMN id SET DEFAULT nextval('core_diag_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_flavor ALTER COLUMN id SET DEFAULT nextval('core_flavor_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_flavor_deployments ALTER COLUMN id SET DEFAULT nextval('core_flavor_deployments_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_image ALTER COLUMN id SET DEFAULT nextval('core_image_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_imagedeployments ALTER COLUMN id SET DEFAULT nextval('core_imagedeployments_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_instance ALTER COLUMN id SET DEFAULT nextval('core_instance_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_invoice ALTER COLUMN id SET DEFAULT nextval('core_invoice_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_network ALTER COLUMN id SET DEFAULT nextval('core_network_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_network_permitted_slices ALTER COLUMN id SET DEFAULT nextval('core_network_permitted_slices_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_networkparameter ALTER COLUMN id SET DEFAULT nextval('core_networkparameter_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_networkparametertype ALTER COLUMN id SET DEFAULT nextval('core_networkparametertype_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_networkslice ALTER COLUMN id SET DEFAULT nextval('core_networkslice_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_networktemplate ALTER COLUMN id SET DEFAULT nextval('core_networktemplate_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_node ALTER COLUMN id SET DEFAULT nextval('core_node_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_nodelabel ALTER COLUMN id SET DEFAULT nextval('core_nodelabel_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_nodelabel_node ALTER COLUMN id SET DEFAULT nextval('core_nodelabel_node_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_payment ALTER COLUMN id SET DEFAULT nextval('core_payment_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_port ALTER COLUMN id SET DEFAULT nextval('core_port_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_program ALTER COLUMN id SET DEFAULT nextval('core_program_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_project ALTER COLUMN id SET DEFAULT nextval('core_project_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_reservation ALTER COLUMN id SET DEFAULT nextval('core_reservation_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_reservedresource ALTER COLUMN id SET DEFAULT nextval('core_reservedresource_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_role ALTER COLUMN id SET DEFAULT nextval('core_role_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_router ALTER COLUMN id SET DEFAULT nextval('core_router_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_router_networks ALTER COLUMN id SET DEFAULT nextval('core_router_networks_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY "core_router_permittedNetworks" ALTER COLUMN id SET DEFAULT nextval('"core_router_permittedNetworks_id_seq"'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_service ALTER COLUMN id SET DEFAULT nextval('core_service_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_serviceattribute ALTER COLUMN id SET DEFAULT nextval('core_serviceattribute_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_serviceclass ALTER COLUMN id SET DEFAULT nextval('core_serviceclass_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY "core_serviceclass_upgradeFrom" ALTER COLUMN id SET DEFAULT nextval('"core_serviceclass_upgradeFrom_id_seq"'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_serviceprivilege ALTER COLUMN id SET DEFAULT nextval('core_serviceprivilege_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_serviceresource ALTER COLUMN id SET DEFAULT nextval('core_serviceresource_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_servicerole ALTER COLUMN id SET DEFAULT nextval('core_servicerole_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_site ALTER COLUMN id SET DEFAULT nextval('core_site_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_sitecredential ALTER COLUMN id SET DEFAULT nextval('core_sitecredential_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_sitedeployment ALTER COLUMN id SET DEFAULT nextval('core_sitedeployment_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_siteprivilege ALTER COLUMN id SET DEFAULT nextval('core_siteprivilege_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_siterole ALTER COLUMN id SET DEFAULT nextval('core_siterole_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_slice ALTER COLUMN id SET DEFAULT nextval('core_slice_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_slicecredential ALTER COLUMN id SET DEFAULT nextval('core_slicecredential_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_sliceprivilege ALTER COLUMN id SET DEFAULT nextval('core_sliceprivilege_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_slicerole ALTER COLUMN id SET DEFAULT nextval('core_slicerole_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_slicetag ALTER COLUMN id SET DEFAULT nextval('core_slicetag_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_tag ALTER COLUMN id SET DEFAULT nextval('core_tag_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_tenant ALTER COLUMN id SET DEFAULT nextval('core_tenant_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_tenantattribute ALTER COLUMN id SET DEFAULT nextval('core_tenantattribute_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_tenantroot ALTER COLUMN id SET DEFAULT nextval('core_tenantroot_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_tenantrootprivilege ALTER COLUMN id SET DEFAULT nextval('core_tenantrootprivilege_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_tenantrootrole ALTER COLUMN id SET DEFAULT nextval('core_tenantrootrole_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_usableobject ALTER COLUMN id SET DEFAULT nextval('core_usableobject_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_user ALTER COLUMN id SET DEFAULT nextval('core_user_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_usercredential ALTER COLUMN id SET DEFAULT nextval('core_usercredential_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_userdashboardview ALTER COLUMN id SET DEFAULT nextval('core_userdashboardview_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY django_admin_log ALTER COLUMN id SET DEFAULT nextval('django_admin_log_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY django_content_type ALTER COLUMN id SET DEFAULT nextval('django_content_type_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY django_migrations ALTER COLUMN id SET DEFAULT nextval('django_migrations_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_accessmap ALTER COLUMN id SET DEFAULT nextval('hpc_accessmap_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_cdnprefix ALTER COLUMN id SET DEFAULT nextval('hpc_cdnprefix_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_contentprovider ALTER COLUMN id SET DEFAULT nextval('hpc_contentprovider_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_contentprovider_users ALTER COLUMN id SET DEFAULT nextval('hpc_contentprovider_users_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_hpchealthcheck ALTER COLUMN id SET DEFAULT nextval('hpc_hpchealthcheck_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_originserver ALTER COLUMN id SET DEFAULT nextval('hpc_originserver_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_serviceprovider ALTER COLUMN id SET DEFAULT nextval('hpc_serviceprovider_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_sitemap ALTER COLUMN id SET DEFAULT nextval('hpc_sitemap_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY requestrouter_servicemap ALTER COLUMN id SET DEFAULT nextval('requestrouter_servicemap_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY syndicate_storage_slicesecret ALTER COLUMN id SET DEFAULT nextval('syndicate_storage_slicesecret_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY syndicate_storage_syndicateprincipal ALTER COLUMN id SET DEFAULT nextval('syndicate_storage_syndicateprincipal_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY syndicate_storage_volume ALTER COLUMN id SET DEFAULT nextval('syndicate_storage_volume_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY syndicate_storage_volumeaccessright ALTER COLUMN id SET DEFAULT nextval('syndicate_storage_volumeaccessright_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY syndicate_storage_volumeslice ALTER COLUMN id SET DEFAULT nextval('syndicate_storage_volumeslice_id_seq'::regclass);
+
+
+--
+-- Data for Name: auth_group; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY auth_group (id, name) FROM stdin;
+\.
+
+
+--
+-- Name: auth_group_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('auth_group_id_seq', 1, false);
+
+
+--
+-- Data for Name: auth_group_permissions; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY auth_group_permissions (id, group_id, permission_id) FROM stdin;
+\.
+
+
+--
+-- Name: auth_group_permissions_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('auth_group_permissions_id_seq', 1, false);
+
+
+--
+-- Data for Name: auth_permission; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY auth_permission (id, name, content_type_id, codename) FROM stdin;
+1	Can add permission	1	add_permission
+2	Can change permission	1	change_permission
+3	Can delete permission	1	delete_permission
+4	Can add group	2	add_group
+5	Can change group	2	change_group
+6	Can delete group	2	delete_group
+7	Can add content type	3	add_contenttype
+8	Can change content type	3	change_contenttype
+9	Can delete content type	3	delete_contenttype
+10	Can add session	4	add_session
+11	Can change session	4	change_session
+12	Can delete session	4	delete_session
+13	Can add log entry	5	add_logentry
+14	Can change log entry	5	change_logentry
+15	Can delete log entry	5	delete_logentry
+16	Can add project	6	add_project
+17	Can change project	6	change_project
+18	Can delete project	6	delete_project
+19	Can add service	7	add_service
+20	Can change service	7	change_service
+21	Can delete service	7	delete_service
+22	Can add service attribute	8	add_serviceattribute
+23	Can change service attribute	8	change_serviceattribute
+24	Can delete service attribute	8	delete_serviceattribute
+25	Can add service role	9	add_servicerole
+26	Can change service role	9	change_servicerole
+27	Can delete service role	9	delete_servicerole
+28	Can add service privilege	10	add_serviceprivilege
+29	Can change service privilege	10	change_serviceprivilege
+30	Can delete service privilege	10	delete_serviceprivilege
+31	Can add tenant root	11	add_tenantroot
+32	Can change tenant root	11	change_tenantroot
+33	Can delete tenant root	11	delete_tenantroot
+34	Can add tenant	12	add_tenant
+35	Can change tenant	12	change_tenant
+36	Can delete tenant	12	delete_tenant
+37	Can add tenant with container	12	add_tenantwithcontainer
+38	Can change tenant with container	12	change_tenantwithcontainer
+39	Can delete tenant with container	12	delete_tenantwithcontainer
+40	Can add coarse tenant	12	add_coarsetenant
+41	Can change coarse tenant	12	change_coarsetenant
+42	Can delete coarse tenant	12	delete_coarsetenant
+43	Can add subscriber	11	add_subscriber
+44	Can change subscriber	11	change_subscriber
+45	Can delete subscriber	11	delete_subscriber
+46	Can add provider	11	add_provider
+47	Can change provider	11	change_provider
+48	Can delete provider	11	delete_provider
+49	Can add tenant attribute	13	add_tenantattribute
+50	Can change tenant attribute	13	change_tenantattribute
+51	Can delete tenant attribute	13	delete_tenantattribute
+52	Can add tenant root role	14	add_tenantrootrole
+53	Can change tenant root role	14	change_tenantrootrole
+54	Can delete tenant root role	14	delete_tenantrootrole
+55	Can add tenant root privilege	15	add_tenantrootprivilege
+56	Can change tenant root privilege	15	change_tenantrootprivilege
+57	Can delete tenant root privilege	15	delete_tenantrootprivilege
+58	Can add tag	16	add_tag
+59	Can change tag	16	change_tag
+60	Can delete tag	16	delete_tag
+61	Can add role	17	add_role
+62	Can change role	17	change_role
+63	Can delete role	17	delete_role
+64	Can add site	18	add_site
+65	Can change site	18	change_site
+66	Can delete site	18	delete_site
+67	Can add site role	19	add_siterole
+68	Can change site role	19	change_siterole
+69	Can delete site role	19	delete_siterole
+70	Can add site privilege	20	add_siteprivilege
+71	Can change site privilege	20	change_siteprivilege
+72	Can delete site privilege	20	delete_siteprivilege
+73	Can add deployment	21	add_deployment
+74	Can change deployment	21	change_deployment
+75	Can delete deployment	21	delete_deployment
+76	Can add deployment role	22	add_deploymentrole
+77	Can change deployment role	22	change_deploymentrole
+78	Can delete deployment role	22	delete_deploymentrole
+79	Can add deployment privilege	23	add_deploymentprivilege
+80	Can change deployment privilege	23	change_deploymentprivilege
+81	Can delete deployment privilege	23	delete_deploymentprivilege
+82	Can add controller role	24	add_controllerrole
+83	Can change controller role	24	change_controllerrole
+84	Can delete controller role	24	delete_controllerrole
+85	Can add controller	25	add_controller
+86	Can change controller	25	change_controller
+87	Can delete controller	25	delete_controller
+88	Can add site deployment	26	add_sitedeployment
+89	Can change site deployment	26	change_sitedeployment
+90	Can delete site deployment	26	delete_sitedeployment
+91	Can add controller site	27	add_controllersite
+92	Can change controller site	27	change_controllersite
+93	Can delete controller site	27	delete_controllersite
+94	Can add diag	28	add_diag
+95	Can change diag	28	change_diag
+96	Can delete diag	28	delete_diag
+97	Can add dashboard view	29	add_dashboardview
+98	Can change dashboard view	29	change_dashboardview
+99	Can delete dashboard view	29	delete_dashboardview
+100	Can add controller dashboard view	30	add_controllerdashboardview
+101	Can change controller dashboard view	30	change_controllerdashboardview
+102	Can delete controller dashboard view	30	delete_controllerdashboardview
+103	Can add user	31	add_user
+104	Can change user	31	change_user
+105	Can delete user	31	delete_user
+106	Can add user dashboard view	32	add_userdashboardview
+107	Can change user dashboard view	32	change_userdashboardview
+108	Can delete user dashboard view	32	delete_userdashboardview
+109	Can add service class	33	add_serviceclass
+110	Can change service class	33	change_serviceclass
+111	Can delete service class	33	delete_serviceclass
+112	Can add flavor	34	add_flavor
+113	Can change flavor	34	change_flavor
+114	Can delete flavor	34	delete_flavor
+115	Can add image	35	add_image
+116	Can change image	35	change_image
+117	Can delete image	35	delete_image
+118	Can add image deployments	36	add_imagedeployments
+119	Can change image deployments	36	change_imagedeployments
+120	Can delete image deployments	36	delete_imagedeployments
+121	Can add controller images	37	add_controllerimages
+122	Can change controller images	37	change_controllerimages
+123	Can delete controller images	37	delete_controllerimages
+124	Can add slice	38	add_slice
+125	Can change slice	38	change_slice
+126	Can delete slice	38	delete_slice
+127	Can add slice role	39	add_slicerole
+128	Can change slice role	39	change_slicerole
+129	Can delete slice role	39	delete_slicerole
+130	Can add slice privilege	40	add_sliceprivilege
+131	Can change slice privilege	40	change_sliceprivilege
+132	Can delete slice privilege	40	delete_sliceprivilege
+133	Can add controller slice	41	add_controllerslice
+134	Can change controller slice	41	change_controllerslice
+135	Can delete controller slice	41	delete_controllerslice
+136	Can add controller user	42	add_controlleruser
+137	Can change controller user	42	change_controlleruser
+138	Can delete controller user	42	delete_controlleruser
+139	Can add controller site privilege	43	add_controllersiteprivilege
+140	Can change controller site privilege	43	change_controllersiteprivilege
+141	Can delete controller site privilege	43	delete_controllersiteprivilege
+142	Can add controller slice privilege	44	add_controllersliceprivilege
+143	Can change controller slice privilege	44	change_controllersliceprivilege
+144	Can delete controller slice privilege	44	delete_controllersliceprivilege
+145	Can add service resource	45	add_serviceresource
+146	Can change service resource	45	change_serviceresource
+147	Can delete service resource	45	delete_serviceresource
+148	Can add user credential	46	add_usercredential
+149	Can change user credential	46	change_usercredential
+150	Can delete user credential	46	delete_usercredential
+151	Can add site credential	47	add_sitecredential
+152	Can change site credential	47	change_sitecredential
+153	Can delete site credential	47	delete_sitecredential
+154	Can add slice credential	48	add_slicecredential
+155	Can change slice credential	48	change_slicecredential
+156	Can delete slice credential	48	delete_slicecredential
+157	Can add controller credential	49	add_controllercredential
+158	Can change controller credential	49	change_controllercredential
+159	Can delete controller credential	49	delete_controllercredential
+160	Can add node	50	add_node
+161	Can change node	50	change_node
+162	Can delete node	50	delete_node
+163	Can add node label	51	add_nodelabel
+164	Can change node label	51	change_nodelabel
+165	Can delete node label	51	delete_nodelabel
+166	Can add slice tag	52	add_slicetag
+167	Can change slice tag	52	change_slicetag
+168	Can delete slice tag	52	delete_slicetag
+169	Can add instance	53	add_instance
+170	Can change instance	53	change_instance
+171	Can delete instance	53	delete_instance
+172	Can add reservation	54	add_reservation
+173	Can change reservation	54	change_reservation
+174	Can delete reservation	54	delete_reservation
+175	Can add reserved resource	55	add_reservedresource
+176	Can change reserved resource	55	change_reservedresource
+177	Can delete reserved resource	55	delete_reservedresource
+178	Can add network template	56	add_networktemplate
+179	Can change network template	56	change_networktemplate
+180	Can delete network template	56	delete_networktemplate
+181	Can add network	57	add_network
+182	Can change network	57	change_network
+183	Can delete network	57	delete_network
+184	Can add controller network	58	add_controllernetwork
+185	Can change controller network	58	change_controllernetwork
+186	Can delete controller network	58	delete_controllernetwork
+187	Can add network slice	59	add_networkslice
+188	Can change network slice	59	change_networkslice
+189	Can delete network slice	59	delete_networkslice
+190	Can add port	60	add_port
+191	Can change port	60	change_port
+192	Can delete port	60	delete_port
+193	Can add router	61	add_router
+194	Can change router	61	change_router
+195	Can delete router	61	delete_router
+196	Can add network parameter type	62	add_networkparametertype
+197	Can change network parameter type	62	change_networkparametertype
+198	Can delete network parameter type	62	delete_networkparametertype
+199	Can add network parameter	63	add_networkparameter
+200	Can change network parameter	63	change_networkparameter
+201	Can delete network parameter	63	delete_networkparameter
+202	Can add address pool	64	add_addresspool
+203	Can change address pool	64	change_addresspool
+204	Can delete address pool	64	delete_addresspool
+205	Can add account	65	add_account
+206	Can change account	65	change_account
+207	Can delete account	65	delete_account
+208	Can add invoice	66	add_invoice
+209	Can change invoice	66	change_invoice
+210	Can delete invoice	66	delete_invoice
+211	Can add usable object	67	add_usableobject
+212	Can change usable object	67	change_usableobject
+213	Can delete usable object	67	delete_usableobject
+214	Can add payment	68	add_payment
+215	Can change payment	68	change_payment
+216	Can delete payment	68	delete_payment
+217	Can add charge	69	add_charge
+218	Can change charge	69	change_charge
+219	Can delete charge	69	delete_charge
+220	Can add program	70	add_program
+221	Can change program	70	change_program
+222	Can delete program	70	delete_program
+223	Can add HPC Service	75	add_hpcservice
+224	Can change HPC Service	75	change_hpcservice
+225	Can delete HPC Service	75	delete_hpcservice
+226	Can add service provider	76	add_serviceprovider
+227	Can change service provider	76	change_serviceprovider
+228	Can delete service provider	76	delete_serviceprovider
+229	Can add content provider	77	add_contentprovider
+230	Can change content provider	77	change_contentprovider
+231	Can delete content provider	77	delete_contentprovider
+232	Can add origin server	78	add_originserver
+233	Can change origin server	78	change_originserver
+234	Can delete origin server	78	delete_originserver
+235	Can add cdn prefix	79	add_cdnprefix
+236	Can change cdn prefix	79	change_cdnprefix
+237	Can delete cdn prefix	79	delete_cdnprefix
+238	Can add access map	80	add_accessmap
+239	Can change access map	80	change_accessmap
+240	Can delete access map	80	delete_accessmap
+241	Can add site map	81	add_sitemap
+242	Can change site map	81	change_sitemap
+243	Can delete site map	81	delete_sitemap
+244	Can add hpc health check	82	add_hpchealthcheck
+245	Can change hpc health check	82	change_hpchealthcheck
+246	Can delete hpc health check	82	delete_hpchealthcheck
+247	Can add cord subscriber root	11	add_cordsubscriberroot
+248	Can change cord subscriber root	11	change_cordsubscriberroot
+249	Can delete cord subscriber root	11	delete_cordsubscriberroot
+250	Can add vOLT Service	7	add_voltservice
+251	Can change vOLT Service	7	change_voltservice
+252	Can delete vOLT Service	7	delete_voltservice
+253	Can add volt tenant	12	add_volttenant
+254	Can change volt tenant	12	change_volttenant
+255	Can delete volt tenant	12	delete_volttenant
+256	Can add vSG Service	7	add_vsgservice
+257	Can change vSG Service	7	change_vsgservice
+258	Can delete vSG Service	7	delete_vsgservice
+259	Can add vsg tenant	12	add_vsgtenant
+260	Can change vsg tenant	12	change_vsgtenant
+261	Can delete vsg tenant	12	delete_vsgtenant
+262	Can add vBNG Service	7	add_vbngservice
+263	Can change vBNG Service	7	change_vbngservice
+264	Can delete vBNG Service	7	delete_vbngservice
+265	Can add vbng tenant	12	add_vbngtenant
+266	Can change vbng tenant	12	change_vbngtenant
+267	Can delete vbng tenant	12	delete_vbngtenant
+274	Can add ONOS Service	7	add_onosservice
+275	Can change ONOS Service	7	change_onosservice
+276	Can delete ONOS Service	7	delete_onosservice
+277	Can add onos app	12	add_onosapp
+278	Can change onos app	12	change_onosapp
+279	Can delete onos app	12	delete_onosapp
+280	Can add Ceilometer Service	7	add_ceilometerservice
+281	Can change Ceilometer Service	7	change_ceilometerservice
+282	Can delete Ceilometer Service	7	delete_ceilometerservice
+283	Can add monitoring channel	12	add_monitoringchannel
+284	Can change monitoring channel	12	change_monitoringchannel
+285	Can delete monitoring channel	12	delete_monitoringchannel
+286	Can add sFlow Collection Service	7	add_sflowservice
+287	Can change sFlow Collection Service	7	change_sflowservice
+288	Can delete sFlow Collection Service	7	delete_sflowservice
+289	Can add s flow tenant	12	add_sflowtenant
+290	Can change s flow tenant	12	change_sflowtenant
+291	Can delete s flow tenant	12	delete_sflowtenant
+292	Can add Request Router Service	98	add_requestrouterservice
+293	Can change Request Router Service	98	change_requestrouterservice
+294	Can delete Request Router Service	98	delete_requestrouterservice
+295	Can add service map	99	add_servicemap
+296	Can change service map	99	change_servicemap
+297	Can delete service map	99	delete_servicemap
+298	Can add Syndicate Service	100	add_syndicateservice
+299	Can change Syndicate Service	100	change_syndicateservice
+300	Can delete Syndicate Service	100	delete_syndicateservice
+301	Can add syndicate principal	101	add_syndicateprincipal
+302	Can change syndicate principal	101	change_syndicateprincipal
+303	Can delete syndicate principal	101	delete_syndicateprincipal
+304	Can add volume	102	add_volume
+305	Can change volume	102	change_volume
+306	Can delete volume	102	delete_volume
+307	Can add volume access right	103	add_volumeaccessright
+308	Can change volume access right	103	change_volumeaccessright
+309	Can delete volume access right	103	delete_volumeaccessright
+310	Can add slice secret	104	add_slicesecret
+311	Can change slice secret	104	change_slicesecret
+312	Can delete slice secret	104	delete_slicesecret
+313	Can add volume slice	105	add_volumeslice
+314	Can change volume slice	105	change_volumeslice
+315	Can delete volume slice	105	delete_volumeslice
+316	Can add vTR Service	7	add_vtrservice
+317	Can change vTR Service	7	change_vtrservice
+318	Can delete vTR Service	7	delete_vtrservice
+319	Can add vtr tenant	12	add_vtrtenant
+320	Can change vtr tenant	12	change_vtrtenant
+321	Can delete vtr tenant	12	delete_vtrtenant
+\.
+
+
+--
+-- Name: auth_permission_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('auth_permission_id_seq', 321, true);
+
+
+--
+-- Data for Name: core_account; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_account (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, site_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_account_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_account_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_addresspool; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_addresspool (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, addresses, inuse) FROM stdin;
+\.
+
+
+--
+-- Name: core_addresspool_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_addresspool_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_charge; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_charge (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, kind, state, date, amount, "coreHours", account_id, invoice_id, object_id, slice_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_charge_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_charge_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_controller; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_controller (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, backend_type, version, auth_url, admin_user, admin_password, admin_tenant, domain, rabbit_host, rabbit_user, rabbit_password, deployment_id) FROM stdin;
+1	2016-04-05 17:41:57.870164+00	2016-04-05 17:41:57.870189+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	t	CloudLab	OpenStack	Juno	http://sample/v2	admin	adminpassword	admin	Default	\N	\N	\N	1
+\.
+
+
+--
+-- Name: core_controller_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_controller_id_seq', 1, true);
+
+
+--
+-- Data for Name: core_controllercredential; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_controllercredential (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, key_id, enc_value, controller_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_controllercredential_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_controllercredential_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_controllerdashboardview; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_controllerdashboardview (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, enabled, url, controller_id, "dashboardView_id") FROM stdin;
+\.
+
+
+--
+-- Name: core_controllerdashboardview_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_controllerdashboardview_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_controllerimages; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_controllerimages (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, glance_image_id, controller_id, image_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_controllerimages_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_controllerimages_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_controllernetwork; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_controllernetwork (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, net_id, router_id, subnet_id, subnet, controller_id, network_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_controllernetwork_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_controllernetwork_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_controllerrole; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_controllerrole (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, role) FROM stdin;
+\.
+
+
+--
+-- Name: core_controllerrole_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_controllerrole_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_controllersite; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_controllersite (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, tenant_id, controller_id, site_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_controllersite_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_controllersite_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_controllersiteprivilege; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_controllersiteprivilege (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, role_id, controller_id, site_privilege_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_controllersiteprivilege_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_controllersiteprivilege_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_controllerslice; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_controllerslice (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, tenant_id, controller_id, slice_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_controllerslice_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_controllerslice_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_controllersliceprivilege; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_controllersliceprivilege (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, role_id, controller_id, slice_privilege_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_controllersliceprivilege_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_controllersliceprivilege_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_controlleruser; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_controlleruser (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, kuser_id, controller_id, user_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_controlleruser_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_controlleruser_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_dashboardview; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_dashboardview (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, url, enabled) FROM stdin;
+--1	2015-02-17 22:06:38.953+00	2015-02-17 22:06:38.953+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	xsh	template:xsh	t
+2	2015-02-17 22:06:39.011+00	2015-02-17 22:06:39.011+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	Customize	template:customize	t
+3	2015-02-17 22:06:39.069+00	2015-02-17 22:06:39.244+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	Tenant	template:xosTenant	t
+4	2015-02-17 22:06:39.302+00	2015-02-17 22:06:39.302+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	Developer	template:xosDeveloper_datatables	t
+5	2016-04-05 17:42:11.341605+00	2016-04-05 17:42:11.341634+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	Customer Care	template:xosDiagnostic	t
+6	2016-04-05 18:46:36.638199+00	2016-04-05 18:46:36.638233+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	truckroll	template:xosTruckroll	t
+\.
+
+
+--
+-- Data for Name: core_dashboardview_deployments; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_dashboardview_deployments (id, dashboardview_id, deployment_id) FROM stdin;
+1	3	1
+\.
+
+
+--
+-- Name: core_dashboardview_deployments_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_dashboardview_deployments_id_seq', 1, true);
+
+
+--
+-- Name: core_dashboardview_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_dashboardview_id_seq', 6, true);
+
+
+--
+-- Data for Name: core_deployment; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_deployment (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, "accessControl") FROM stdin;
+1	2015-02-17 22:06:37.789+00	2016-04-05 17:41:57.865591+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	t	MyDeployment	allow all
+\.
+
+
+--
+-- Name: core_deployment_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_deployment_id_seq', 1, true);
+
+
+--
+-- Data for Name: core_deploymentprivilege; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_deploymentprivilege (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, deployment_id, role_id, user_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_deploymentprivilege_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_deploymentprivilege_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_deploymentrole; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_deploymentrole (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, role) FROM stdin;
+1	2015-02-17 22:06:38.894+00	2015-02-17 22:06:38.894+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	admin
+\.
+
+
+--
+-- Name: core_deploymentrole_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_deploymentrole_id_seq', 1, true);
+
+
+--
+-- Data for Name: core_diag; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_diag (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name) FROM stdin;
+\.
+
+
+--
+-- Name: core_diag_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_diag_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_flavor; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_flavor (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, description, flavor, "order", "default") FROM stdin;
+1	2015-02-17 22:06:38.095+00	2015-02-17 22:06:38.236+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	t	m1.small	\N	m1.small	0	f
+2	2015-02-17 22:06:38.287+00	2015-02-17 22:06:38.394+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	t	m1.medium	\N	m1.medium	0	f
+3	2015-02-17 22:06:38.445+00	2015-02-17 22:06:38.561+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	t	m1.large	\N	m1.large	0	f
+\.
+
+
+--
+-- Data for Name: core_flavor_deployments; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_flavor_deployments (id, flavor_id, deployment_id) FROM stdin;
+1	1	1
+2	2	1
+3	3	1
+\.
+
+
+--
+-- Name: core_flavor_deployments_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_flavor_deployments_id_seq', 3, true);
+
+
+--
+-- Name: core_flavor_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_flavor_id_seq', 3, true);
+
+
+--
+-- Data for Name: core_image; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_image (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, kind, disk_format, container_format, path, tag) FROM stdin;
+1	2016-04-05 17:41:57.84418+00	2016-04-05 17:41:57.844213+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	trusty-server-multi-nic	vm	QCOW2	BARE	\N	\N
+2	2016-04-05 17:42:10.875232+00	2016-04-05 17:42:10.87526+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	docker-vcpe	container	na	na	andybavier/docker-vcpe	develop
+\.
+
+
+--
+-- Name: core_image_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_image_id_seq', 2, true);
+
+
+--
+-- Data for Name: core_imagedeployments; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_imagedeployments (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, deployment_id, image_id) FROM stdin;
+1	2016-04-05 17:41:57.855026+00	2016-04-05 17:41:57.855053+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	1	1
+\.
+
+
+--
+-- Name: core_imagedeployments_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_imagedeployments_id_seq', 1, true);
+
+
+--
+-- Data for Name: core_instance; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_instance (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, instance_id, instance_uuid, name, instance_name, ip, "numberCores", "userData", isolation, volumes, creator_id, deployment_id, flavor_id, image_id, node_id, parent_id, slice_id) FROM stdin;
+1	2016-04-05 17:42:11.041143+00	2016-04-05 17:42:11.041177+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	\N	\N	mysite_vcpe	\N	\N	0	\N	vm	/etc/dnsmasq.d,/etc/ufw	1	1	1	1	1	\N	1
+2	2016-04-05 17:42:11.143845+00	2016-04-05 17:42:11.505705+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	\N	\N	onos_app_1	\N	\N	0	\N	vm	\N	1	1	1	1	2	\N	5
+3	2016-04-05 17:42:11.180193+00	2016-04-05 17:42:11.508623+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	\N	\N	client1	\N	\N	0	\N	vm	\N	1	1	1	1	1	\N	4
+4	2016-04-05 17:42:11.24868+00	2016-04-05 17:42:11.512938+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	\N	\N	onos_app_2	\N	\N	0	\N	vm	\N	1	1	1	1	2	\N	6
+5	2016-04-05 17:42:11.32255+00	2016-04-05 17:42:11.516734+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	\N	\N	ovs_volt	\N	\N	0	\N	vm	\N	1	1	1	1	1	\N	3
+6	2016-04-05 17:42:11.336187+00	2016-04-05 17:42:11.519581+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	\N	\N	ovs_vbng	\N	\N	0	\N	vm	\N	1	1	1	1	2	\N	2
+\.
+
+
+--
+-- Name: core_instance_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_instance_id_seq', 6, true);
+
+
+--
+-- Data for Name: core_invoice; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_invoice (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, date, account_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_invoice_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_invoice_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_network; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_network (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, subnet, ports, labels, guaranteed_bandwidth, permit_all_slices, topology_parameters, controller_url, controller_parameters, network_id, router_id, subnet_id, autoconnect, owner_id, template_id) FROM stdin;
+1	2016-04-05 17:42:10.926943+00	2016-04-05 17:42:10.926967+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	wan_network		\N	\N	0	t	\N	\N	\N	\N	\N	\N	f	1	1
+2	2016-04-05 17:42:10.963196+00	2016-04-05 17:42:10.963223+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	lan_network		\N	\N	0	t	\N	\N	\N	\N	\N	\N	f	1	1
+3	2016-04-05 17:42:11.188082+00	2016-04-05 17:42:11.188111+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	public_network		\N	\N	0	t	\N	\N	\N	\N	\N	\N	f	2	5
+4	2016-04-05 17:42:11.370317+00	2016-04-05 17:42:11.370343+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	subscriber_network		\N	\N	0	t	\N	\N	\N	\N	\N	\N	f	3	1
+\.
+
+
+--
+-- Name: core_network_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_network_id_seq', 4, true);
+
+
+--
+-- Data for Name: core_network_permitted_slices; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_network_permitted_slices (id, network_id, slice_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_network_permitted_slices_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_network_permitted_slices_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_networkparameter; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_networkparameter (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, value, object_id, content_type_id, parameter_id) FROM stdin;
+1	2016-04-05 17:42:11.062812+00	2016-04-05 17:42:11.062838+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	432	1	60	5
+2	2016-04-05 17:42:11.068024+00	2016-04-05 17:42:11.068048+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	222	1	60	6
+3	2016-04-05 17:42:11.071633+00	2016-04-05 17:42:11.071645+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	stag-222	1	60	4
+\.
+
+
+--
+-- Name: core_networkparameter_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_networkparameter_id_seq', 3, true);
+
+
+--
+-- Data for Name: core_networkparametertype; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_networkparametertype (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, description) FROM stdin;
+1	2016-04-05 17:42:09.268578+00	2016-04-05 17:42:09.26861+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	bridge	
+2	2016-04-05 17:42:09.271808+00	2016-04-05 17:42:09.271835+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	next_hop	
+3	2016-04-05 17:42:09.274484+00	2016-04-05 17:42:09.27451+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	device	
+4	2016-04-05 17:42:09.276919+00	2016-04-05 17:42:09.276946+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	neutron_port_name	
+5	2016-04-05 17:42:09.279956+00	2016-04-05 17:42:09.279984+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	c_tag	
+6	2016-04-05 17:42:09.282047+00	2016-04-05 17:42:09.282073+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	s_tag	
+\.
+
+
+--
+-- Name: core_networkparametertype_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_networkparametertype_id_seq', 6, true);
+
+
+--
+-- Data for Name: core_networkslice; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_networkslice (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, network_id, slice_id) FROM stdin;
+1	2016-04-05 17:42:10.934539+00	2016-04-05 17:42:10.93457+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	1	1
+2	2016-04-05 17:42:10.948028+00	2016-04-05 17:42:10.948057+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	1	2
+3	2016-04-05 17:42:10.967241+00	2016-04-05 17:42:10.967274+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	2	1
+4	2016-04-05 17:42:10.971959+00	2016-04-05 17:42:10.971989+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	2	3
+5	2016-04-05 17:42:11.193081+00	2016-04-05 17:42:11.193111+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	3	2
+6	2016-04-05 17:42:11.37604+00	2016-04-05 17:42:11.376067+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	4	3
+7	2016-04-05 17:42:11.380003+00	2016-04-05 17:42:11.380064+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	4	4
+\.
+
+
+--
+-- Name: core_networkslice_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_networkslice_id_seq', 7, true);
+
+
+--
+-- Data for Name: core_networktemplate; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_networktemplate (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, description, guaranteed_bandwidth, visibility, translation, access, shared_network_name, shared_network_id, topology_kind, controller_kind) FROM stdin;
+3	2015-02-17 22:06:39.536+00	2015-02-17 22:06:39.536+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	Public dedicated IPv4	Connect a instance to the public network	0	public	none	\N	ext-net	\N	bigswitch	\N
+2	2015-02-17 22:06:39.477+00	2016-04-05 17:41:57.912367+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	Public shared IPv4	Connect a instance to the public network	0	private	NAT	\N	nat-net	\N	bigswitch	\N
+1	2015-02-17 22:06:39.419+00	2016-04-05 17:42:10.920852+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	Private	A private virtual network	0	private	none	\N	\N	\N	bigswitch	\N
+4	2016-04-05 17:42:10.950692+00	2016-04-05 17:42:10.950719+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	Private-Direct	\N	0	private	none	direct	\N	\N	bigswitch	\N
+5	2016-04-05 17:42:11.11517+00	2016-04-05 17:42:11.115197+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	Public network hack	\N	0	private	NAT	\N	tun0-net	\N	bigswitch	\N
+6	2016-04-05 17:42:11.309984+00	2016-04-05 17:42:11.310025+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	Private-Indirect	\N	0	private	none	indirect	\N	\N	bigswitch	\N
+\.
+
+
+--
+-- Name: core_networktemplate_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_networktemplate_id_seq', 6, true);
+
+
+--
+-- Data for Name: core_node; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_node (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, site_id, site_deployment_id) FROM stdin;
+1	2016-04-05 17:41:57.898724+00	2016-04-05 17:41:57.898751+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	t	node2.opencloud.us	1	1
+2	2016-04-05 17:41:57.908261+00	2016-04-05 17:41:57.908283+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	t	node1.opencloud.us	1	1
+\.
+
+
+--
+-- Name: core_node_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_node_id_seq', 2, true);
+
+
+--
+-- Data for Name: core_nodelabel; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_nodelabel (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name) FROM stdin;
+\.
+
+
+--
+-- Name: core_nodelabel_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_nodelabel_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_nodelabel_node; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_nodelabel_node (id, nodelabel_id, node_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_nodelabel_node_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_nodelabel_node_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_payment; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_payment (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, amount, date, account_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_payment_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_payment_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_port; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_port (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, ip, port_id, mac, xos_created, instance_id, network_id) FROM stdin;
+1	2016-04-05 17:42:11.049219+00	2016-04-05 17:42:11.073126+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	\N	\N	\N	f	1	2
+\.
+
+
+--
+-- Name: core_port_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_port_id_seq', 1, true);
+
+
+--
+-- Data for Name: core_program; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_program (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, description, kind, command, contents, output, messages, status, owner_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_program_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_program_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_project; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_project (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name) FROM stdin;
+\.
+
+
+--
+-- Name: core_project_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_project_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_reservation; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_reservation (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, "startTime", duration, slice_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_reservation_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_reservation_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_reservedresource; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_reservedresource (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, quantity, instance_id, "reservationSet_id", resource_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_reservedresource_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_reservedresource_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_role; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_role (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, role_type, role, description, content_type_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_role_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_role_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_router; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_router (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, owner_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_router_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_router_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_router_networks; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_router_networks (id, router_id, network_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_router_networks_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_router_networks_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_router_permittedNetworks; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY "core_router_permittedNetworks" (id, router_id, network_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_router_permittedNetworks_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('"core_router_permittedNetworks_id_seq"', 1, false);
+
+
+--
+-- Data for Name: core_service; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_service (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, description, enabled, kind, name, "versionNumber", published, view_url, icon_url, public_key, private_key_fn, service_specific_id, service_specific_attribute) FROM stdin;
+1	2016-04-05 17:42:10.879967+00	2016-04-05 17:42:10.879992+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	\N	t	vBNG	service_vbng		t	/admin/cord/vbngservice/$id$/	\N	\N	\N	\N	\N
+2	2016-04-05 17:42:10.891972+00	2016-04-05 17:42:10.892005+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	\N	t	vCPE	service_vsg		t	/admin/cord/vsgservice/$id$/	\N	\N	\N	\N	{"backend_network_label": "hpc_client"}
+3	2016-04-05 17:42:10.974546+00	2016-04-05 17:42:10.97457+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	\N	t	vOLT	service_volt		t	/admin/cord/voltservice/$id$/	\N	\N	\N	\N	\N
+4	2016-04-05 17:42:11.119371+00	2016-04-05 17:42:11.119403+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	\N	t	onos	service_ONOS_vBNG		t	/admin/onos/onosservice/$id$/	\N	\N	\N	\N	{"no_container": false}
+5	2016-04-05 17:42:11.15434+00	2016-04-05 17:42:11.154366+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	\N	t	onos	service_ONOS_vOLT		t	/admin/onos/onosservice/$id$/	\N	\N	\N	\N	{"no_container": false}
+6	2016-04-05 17:42:11.183911+00	2016-04-05 17:42:11.183952+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	\N	t	vTR	service_vtr		t	/admin/vtr/vtrservice/$id$/	\N	\N	\N	\N	\N
+\.
+
+
+--
+-- Name: core_service_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_service_id_seq', 6, true);
+
+
+--
+-- Data for Name: core_serviceattribute; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_serviceattribute (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, value, service_id) FROM stdin;
+1	2016-04-05 17:42:11.158787+00	2016-04-05 17:42:11.158814+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	rest_onos/v1/network/configuration/	{\n  "devices" : {\n    "of:0000000000000001" : {\n      "accessDevice" : {\n        "uplink" : "2",\n        "vlan"   : "222",\n        "defaultVlan" : "1"\n      },\n      "basic" : {\n        "driver" : "pmc-olt"\n      }\n    }\n  }\n}\n	5
+\.
+
+
+--
+-- Name: core_serviceattribute_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_serviceattribute_id_seq', 1, true);
+
+
+--
+-- Data for Name: core_serviceclass; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_serviceclass (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, description, commitment, "membershipFee", "membershipFeeMonths", "upgradeRequiresApproval") FROM stdin;
+1	2015-02-17 22:06:39.361+00	2015-02-17 22:06:39.361+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	Best Effort	Best Effort	365	0	12	f
+\.
+
+
+--
+-- Name: core_serviceclass_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_serviceclass_id_seq', 1, true);
+
+
+--
+-- Data for Name: core_serviceclass_upgradeFrom; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY "core_serviceclass_upgradeFrom" (id, from_serviceclass_id, to_serviceclass_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_serviceclass_upgradeFrom_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('"core_serviceclass_upgradeFrom_id_seq"', 1, false);
+
+
+--
+-- Data for Name: core_serviceprivilege; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_serviceprivilege (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, role_id, service_id, user_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_serviceprivilege_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_serviceprivilege_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_serviceresource; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_serviceresource (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, "maxUnitsDeployment", "maxUnitsNode", "maxDuration", "bucketInRate", "bucketMaxSize", cost, "calendarReservable", "serviceClass_id") FROM stdin;
+\.
+
+
+--
+-- Name: core_serviceresource_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_serviceresource_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_servicerole; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_servicerole (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, role) FROM stdin;
+\.
+
+
+--
+-- Name: core_servicerole_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_servicerole_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_site; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_site (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, site_url, enabled, hosts_nodes, hosts_users, location, longitude, latitude, login_base, is_public, abbreviated_name) FROM stdin;
+1	2015-02-17 22:06:37.837+00	2016-04-05 17:42:10.798018+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	mysite	http://opencloud.us/	t	t	t	0,0	\N	\N	mysite	t	mysite
+\.
+
+
+--
+-- Name: core_site_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_site_id_seq', 1, true);
+
+
+--
+-- Data for Name: core_sitecredential; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_sitecredential (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, key_id, enc_value, site_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_sitecredential_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_sitecredential_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_sitedeployment; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_sitedeployment (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, availability_zone, controller_id, deployment_id, site_id) FROM stdin;
+1	2015-02-17 22:06:37.893+00	2016-04-05 17:41:57.888081+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	\N	1	1	1
+\.
+
+
+--
+-- Name: core_sitedeployment_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_sitedeployment_id_seq', 1, true);
+
+
+--
+-- Data for Name: core_siteprivilege; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_siteprivilege (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, role_id, site_id, user_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_siteprivilege_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_siteprivilege_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_siterole; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_siterole (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, role) FROM stdin;
+1	2015-02-17 22:06:38.62+00	2015-02-17 22:06:38.62+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	admin
+2	2015-02-17 22:06:38.669+00	2015-02-17 22:06:38.67+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	pi
+3	2015-02-17 22:06:38.73+00	2015-02-17 22:06:38.731+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	tech
+\.
+
+
+--
+-- Name: core_siterole_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_siterole_id_seq', 3, true);
+
+
+--
+-- Data for Name: core_slice; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_slice (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, enabled, omf_friendly, description, slice_url, max_instances, network, exposed_ports, mount_data_sets, default_isolation, creator_id, default_flavor_id, default_image_id, service_id, "serviceClass_id", site_id) FROM stdin;
+1	2016-04-05 17:42:10.909075+00	2016-04-05 17:42:10.909102+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	mysite_vcpe	t	f			10	\N	\N	GenBank	vm	1	\N	\N	2	1	1
+2	2016-04-05 17:42:10.915949+00	2016-04-05 17:42:10.915976+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	mysite_vbng	t	f			10	\N	\N	GenBank	vm	1	\N	\N	\N	1	1
+3	2016-04-05 17:42:10.958832+00	2016-04-05 17:42:10.958858+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	mysite_volt	t	f			10	\N	\N	GenBank	vm	1	\N	\N	\N	1	1
+4	2016-04-05 17:42:11.112233+00	2016-04-05 17:42:11.112263+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	mysite_clients	t	f			10	\N	\N	GenBank	vm	1	\N	\N	\N	1	1
+5	2016-04-05 17:42:11.127649+00	2016-04-05 17:42:11.127678+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	mysite_onos_vbng	t	f			10	\N	\N	GenBank	vm	1	\N	\N	4	1	1
+6	2016-04-05 17:42:11.167287+00	2016-04-05 17:42:11.167313+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	mysite_onos_volt	t	f			10	\N	\N	GenBank	vm	1	\N	\N	5	1	1
+\.
+
+
+--
+-- Name: core_slice_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_slice_id_seq', 6, true);
+
+
+--
+-- Data for Name: core_slicecredential; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_slicecredential (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, key_id, enc_value, slice_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_slicecredential_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_slicecredential_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_sliceprivilege; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_sliceprivilege (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, role_id, slice_id, user_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_sliceprivilege_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_sliceprivilege_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_slicerole; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_slicerole (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, role) FROM stdin;
+1	2015-02-17 22:06:38.778+00	2015-02-17 22:06:38.778+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	admin
+2	2015-02-17 22:06:38.836+00	2015-02-17 22:06:38.836+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	access
+\.
+
+
+--
+-- Name: core_slicerole_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_slicerole_id_seq', 2, true);
+
+
+--
+-- Data for Name: core_slicetag; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_slicetag (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, value, slice_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_slicetag_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_slicetag_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_tag; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_tag (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, value, object_id, content_type_id, service_id) FROM stdin;
+1	2016-04-05 17:42:11.08141+00	2016-04-05 17:42:11.081432+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	s_tag	222	1	53	2
+\.
+
+
+--
+-- Name: core_tag_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_tag_id_seq', 1, true);
+
+
+--
+-- Data for Name: core_tenant; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_tenant (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, kind, service_specific_id, service_specific_attribute, connect_method, provider_service_id, subscriber_root_id, subscriber_service_id, subscriber_tenant_id, subscriber_user_id) FROM stdin;
+1	2016-04-05 17:42:10.897527+00	2016-04-05 17:42:10.897558+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	coarse	\N	\N	na	1	\N	2	\N	\N
+2	2016-04-05 17:42:10.978184+00	2016-04-05 17:42:10.978207+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	coarse	\N	\N	na	2	\N	3	\N	\N
+3	2016-04-05 17:42:10.995794+00	2016-04-05 17:42:10.995832+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	vOLT	123	{"creator_id": 1, "c_tag": "432", "s_tag": "222"}	na	3	1	\N	\N	\N
+5	2016-04-05 17:42:11.086763+00	2016-04-05 17:42:11.086785+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	vBNG	\N	\N	na	1	\N	\N	4	\N
+6	2016-04-05 17:42:11.224376+00	2016-04-05 17:42:11.22441+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	onos	\N	{"creator_id": 1, "dependencies": "org.onosproject.proxyarp, org.onosproject.virtualbng, org.onosproject.openflow, org.onosproject.fwd", "name": "vBNG_ONOS_app"}	na	4	\N	1	\N	\N
+7	2016-04-05 17:42:11.359136+00	2016-04-05 17:42:11.359169+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	onos	\N	{"creator_id": 1, "dependencies": "org.onosproject.openflow-base, org.onosproject.olt, org.ciena.onos.ext_notifier, org.ciena.onos.volt_event_publisher", "name": "vOLT_ONOS_app", "install_dependencies": "onos-ext-notifier-1.0-SNAPSHOT.oar, onos-ext-volt-event-publisher-1.0-SNAPSHOT.oar"}	na	5	\N	3	\N	\N
+4	2016-04-05 17:42:11.006459+00	2016-04-05 21:11:13.789553+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	vCPE	\N	{"instance_id": 1, "creator_id": 1}	na	2	\N	\N	3	\N
+\.
+
+
+--
+-- Name: core_tenant_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_tenant_id_seq', 7, true);
+
+
+--
+-- Data for Name: core_tenantattribute; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_tenantattribute (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, value, tenant_id) FROM stdin;
+1	2016-04-05 17:42:11.232458+00	2016-04-05 17:42:11.23249+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	config_network-cfg.json	{\n  "ports" : {\n    "of:0000000000000001/1" : {\n      "interfaces" : [\n        {\n          "ips"  : [ "10.0.1.253/24" ],\n          "mac"  : "00:00:00:00:00:99"\n        }\n      ]\n    },\n    "of:0000000000000001/2" : {\n      "interfaces" : [\n        {\n          "ips"  : [ "10.254.0.2/24" ],\n          "mac"  : "00:00:00:00:00:98"\n        }\n      ]\n    }\n  }\n}\n	6
+2	2016-04-05 17:42:11.366036+00	2016-04-05 17:42:11.366073+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	component_config	{\n   "org.ciena.onos.ext_notifier.KafkaNotificationBridge":{\n      "rabbit.user": "<rabbit_user>",\n      "rabbit.password": "<rabbit_password>",\n      "rabbit.host": "<rabbit_host>",\n      "publish.rabbit": "true",\n      "volt.events.rabbit.topic": "notifications.info",\n      "volt.events.rabbit.exchange": "voltlistener",\n      "volt.events.opaque.info": "{project_id: <keystone_tenant_id>, user_id: <keystone_user_id>}",\n      "publish.volt.events": "true"\n   }\n}\n	7
+\.
+
+
+--
+-- Name: core_tenantattribute_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_tenantattribute_id_seq', 2, true);
+
+
+--
+-- Data for Name: core_tenantroot; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_tenantroot (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, kind, name, service_specific_attribute, service_specific_id) FROM stdin;
+1	2016-04-05 17:42:10.85283+00	2016-04-05 21:11:13.773087+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	CordSubscriberRoot	My House	{"status": "enabled", "cdn_enable": false, "users": [{"mac": "01:02:03:04:05:06", "level": "PG_13", "id": 0, "name": "Mom's PC"}, {"mac": "34:36:3B:C9:B6:A6", "id": 1, "name": "Jill's Laptop", "level": "PG_13"}, {"mac": "68:5B:35:9D:91:D5", "level": "PG_13", "id": 2, "name": "Jack's Laptop"}, {"mac": "90:E2:BA:82:F9:75", "id": 3, "name": "Dad's PC", "level": "PG_13"}], "downlink_speed": 1000000000, "url_filter_level": "R", "uplink_speed": 1000000000, "enable_uverse": false, "firewall_enable": false, "url_filter_enable": false}	123
+\.
+
+
+--
+-- Name: core_tenantroot_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_tenantroot_id_seq', 1, true);
+
+
+--
+-- Data for Name: core_tenantrootprivilege; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_tenantrootprivilege (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, role_id, tenant_root_id, user_id) FROM stdin;
+1	2016-04-05 17:42:10.864854+00	2016-04-05 17:42:10.864879+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	1	1	2
+\.
+
+
+--
+-- Name: core_tenantrootprivilege_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_tenantrootprivilege_id_seq', 1, true);
+
+
+--
+-- Data for Name: core_tenantrootrole; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_tenantrootrole (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, role) FROM stdin;
+1	2016-04-05 17:42:10.859991+00	2016-04-05 17:42:10.860017+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	admin
+\.
+
+
+--
+-- Name: core_tenantrootrole_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_tenantrootrole_id_seq', 1, true);
+
+
+--
+-- Data for Name: core_usableobject; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_usableobject (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name) FROM stdin;
+\.
+
+
+--
+-- Name: core_usableobject_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_usableobject_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_user; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_user (id, password, last_login, email, username, firstname, lastname, phone, user_url, public_key, is_active, is_admin, is_staff, is_readonly, is_registering, is_appuser, login_page, created, updated, enacted, policed, backend_status, deleted, write_protect, timezone, site_id) FROM stdin;
+2	pbkdf2_sha256$12000$Oc7yu5OUSNRK$lV5m9OLtVPWAfog5aX0CHYfh4gyLYj1iSvRq+wk8kTk=	2016-04-05 17:42:10.803373+00	johndoe@myhouse.com	johndoe@myhouse.com	john	doe	\N	\N	\N	t	f	t	f	f	f	\N	2016-04-05 17:42:10.844525+00	2016-04-05 17:42:10.844548+00	\N	\N	Provisioning in progress	f	f	America/New_York	1
+1	pbkdf2_sha256$12000$Qufx9iqtaYma$xs0YurPOcj9qYQna/Qrb3K+im9Yr2XEVr0J4Kqek7AE=	2016-04-05 17:42:16.66369+00	padmin@vicci.org	padmin@vicci.org	XOS	admin	\N	\N	\N	t	t	t	f	f	f	\N	2015-02-17 22:06:38.059+00	2016-04-05 17:42:11.387962+00	\N	\N	Provisioning in progress	f	f	America/New_York	1
+\.
+
+
+--
+-- Name: core_user_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_user_id_seq', 2, true);
+
+
+--
+-- Data for Name: core_usercredential; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_usercredential (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, key_id, enc_value, user_id) FROM stdin;
+\.
+
+
+--
+-- Name: core_usercredential_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_usercredential_id_seq', 1, false);
+
+
+--
+-- Data for Name: core_userdashboardview; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY core_userdashboardview (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, "order", "dashboardView_id", user_id) FROM stdin;
+2	2016-04-05 18:46:44.099329+00	2016-04-05 18:46:44.099362+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	0	5	1
+3	2016-04-05 18:46:44.101231+00	2016-04-05 18:46:44.101257+00	\N	\N	{}	0 - Provisioning in progress	f	f	f	f	1	6	1
+\.
+
+
+--
+-- Name: core_userdashboardview_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('core_userdashboardview_id_seq', 3, true);
+
+
+--
+-- Data for Name: django_admin_log; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY django_admin_log (id, action_time, object_id, object_repr, action_flag, change_message, content_type_id, user_id) FROM stdin;
+1	2016-04-05 18:46:36.64407+00	6	truckroll	1		29	1
+\.
+
+
+--
+-- Name: django_admin_log_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('django_admin_log_id_seq', 1, true);
+
+
+--
+-- Data for Name: django_content_type; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY django_content_type (id, name, app_label, model) FROM stdin;
+1	permission	auth	permission
+2	group	auth	group
+3	content type	contenttypes	contenttype
+4	session	sessions	session
+5	log entry	admin	logentry
+6	project	core	project
+7	service	core	service
+8	service attribute	core	serviceattribute
+9	service role	core	servicerole
+10	service privilege	core	serviceprivilege
+11	tenant root	core	tenantroot
+12	tenant	core	tenant
+13	tenant attribute	core	tenantattribute
+14	tenant root role	core	tenantrootrole
+15	tenant root privilege	core	tenantrootprivilege
+16	tag	core	tag
+17	role	core	role
+18	site	core	site
+19	site role	core	siterole
+20	site privilege	core	siteprivilege
+21	deployment	core	deployment
+22	deployment role	core	deploymentrole
+23	deployment privilege	core	deploymentprivilege
+24	controller role	core	controllerrole
+25	controller	core	controller
+26	site deployment	core	sitedeployment
+27	controller site	core	controllersite
+28	diag	core	diag
+29	dashboard view	core	dashboardview
+30	controller dashboard view	core	controllerdashboardview
+31	user	core	user
+32	user dashboard view	core	userdashboardview
+33	service class	core	serviceclass
+34	flavor	core	flavor
+35	image	core	image
+36	image deployments	core	imagedeployments
+37	controller images	core	controllerimages
+38	slice	core	slice
+39	slice role	core	slicerole
+40	slice privilege	core	sliceprivilege
+41	controller slice	core	controllerslice
+42	controller user	core	controlleruser
+43	controller site privilege	core	controllersiteprivilege
+44	controller slice privilege	core	controllersliceprivilege
+45	service resource	core	serviceresource
+46	user credential	core	usercredential
+47	site credential	core	sitecredential
+48	slice credential	core	slicecredential
+49	controller credential	core	controllercredential
+50	node	core	node
+51	node label	core	nodelabel
+52	slice tag	core	slicetag
+53	instance	core	instance
+54	reservation	core	reservation
+55	reserved resource	core	reservedresource
+56	network template	core	networktemplate
+57	network	core	network
+58	controller network	core	controllernetwork
+59	network slice	core	networkslice
+60	port	core	port
+61	router	core	router
+62	network parameter type	core	networkparametertype
+63	network parameter	core	networkparameter
+64	address pool	core	addresspool
+65	account	core	account
+66	invoice	core	invoice
+67	usable object	core	usableobject
+68	payment	core	payment
+69	charge	core	charge
+70	program	core	program
+71	subscriber	core	subscriber
+72	provider	core	provider
+73	tenant with container	core	tenantwithcontainer
+74	coarse tenant	core	coarsetenant
+75	HPC Service	hpc	hpcservice
+76	service provider	hpc	serviceprovider
+77	content provider	hpc	contentprovider
+78	origin server	hpc	originserver
+79	cdn prefix	hpc	cdnprefix
+80	access map	hpc	accessmap
+81	site map	hpc	sitemap
+82	hpc health check	hpc	hpchealthcheck
+83	vBNG Service	cord	vbngservice
+84	vsg tenant	cord	vsgtenant
+85	volt tenant	cord	volttenant
+86	vbng tenant	cord	vbngtenant
+87	cord subscriber root	cord	cordsubscriberroot
+88	vOLT Service	cord	voltservice
+89	vSG Service	cord	vsgservice
+92	ONOS Service	onos	onosservice
+93	onos app	onos	onosapp
+94	s flow tenant	ceilometer	sflowtenant
+95	Ceilometer Service	ceilometer	ceilometerservice
+96	sFlow Collection Service	ceilometer	sflowservice
+97	monitoring channel	ceilometer	monitoringchannel
+98	Request Router Service	requestrouter	requestrouterservice
+99	service map	requestrouter	servicemap
+100	Syndicate Service	syndicate_storage	syndicateservice
+101	syndicate principal	syndicate_storage	syndicateprincipal
+102	volume	syndicate_storage	volume
+103	volume access right	syndicate_storage	volumeaccessright
+104	slice secret	syndicate_storage	slicesecret
+105	volume slice	syndicate_storage	volumeslice
+106	vtr tenant	vtr	vtrtenant
+107	vTR Service	vtr	vtrservice
+\.
+
+
+--
+-- Name: django_content_type_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('django_content_type_id_seq', 107, true);
+
+
+--
+-- Data for Name: django_migrations; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY django_migrations (id, app, name, applied) FROM stdin;
+1	contenttypes	0001_initial	2016-04-05 17:41:25.364327+00
+2	core	0001_initial	2016-04-05 17:41:45.947411+00
+3	admin	0001_initial	2016-04-05 17:41:46.336359+00
+4	auth	0001_initial	2016-04-05 17:41:46.384468+00
+5	ceilometer	0001_initial	2016-04-05 17:41:46.659809+00
+6	cord	0001_initial	2016-04-05 17:41:46.862406+00
+8	hpc	0001_initial	2016-04-05 17:41:50.450946+00
+9	onos	0001_initial	2016-04-05 17:41:50.637887+00
+10	requestrouter	0001_initial	2016-04-05 17:41:51.319325+00
+11	sessions	0001_initial	2016-04-05 17:41:51.331342+00
+12	syndicate_storage	0001_initial	2016-04-05 17:41:53.077489+00
+13	vtr	0001_initial	2016-04-05 17:41:53.270146+00
+\.
+
+
+--
+-- Name: django_migrations_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('django_migrations_id_seq', 13, true);
+
+
+--
+-- Data for Name: django_session; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY django_session (session_key, session_data, expire_date) FROM stdin;
+7ppjuoyfejs1zo7q3rn47a6sciqpqjxs	ZTMzOTkxNDYwNzJiZGI4NDdjMTM2YmU1ZDNjNmI2N2Y0NWM2MDdlMTp7Il9hdXRoX3VzZXJfaGFzaCI6IjVkMTdkNWYxYmQxYjNmOTJhMWJiYzc3YzE0NDNlMzNhNDRiNjQ0YzQiLCJhdXRoIjp7InVzZXJuYW1lIjoicGFkbWluQHZpY2NpLm9yZyIsInBhc3N3b3JkIjoibGV0bWVpbiJ9LCJfYXV0aF91c2VyX2JhY2tlbmQiOiJkamFuZ28uY29udHJpYi5hdXRoLmJhY2tlbmRzLk1vZGVsQmFja2VuZCIsIl9hdXRoX3VzZXJfaWQiOjF9	2016-04-19 17:42:16.666323+00
+\.
+
+
+--
+-- Data for Name: hpc_accessmap; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY hpc_accessmap (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, description, map, "contentProvider_id") FROM stdin;
+\.
+
+
+--
+-- Name: hpc_accessmap_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('hpc_accessmap_id_seq', 1, false);
+
+
+--
+-- Data for Name: hpc_cdnprefix; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY hpc_cdnprefix (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, cdn_prefix_id, prefix, description, enabled, "contentProvider_id", "defaultOriginServer_id") FROM stdin;
+\.
+
+
+--
+-- Name: hpc_cdnprefix_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('hpc_cdnprefix_id_seq', 1, false);
+
+
+--
+-- Data for Name: hpc_contentprovider; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY hpc_contentprovider (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, content_provider_id, name, enabled, description, "serviceProvider_id") FROM stdin;
+\.
+
+
+--
+-- Name: hpc_contentprovider_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('hpc_contentprovider_id_seq', 1, false);
+
+
+--
+-- Data for Name: hpc_contentprovider_users; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY hpc_contentprovider_users (id, contentprovider_id, user_id) FROM stdin;
+\.
+
+
+--
+-- Name: hpc_contentprovider_users_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('hpc_contentprovider_users_id_seq', 1, false);
+
+
+--
+-- Data for Name: hpc_hpchealthcheck; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY hpc_hpchealthcheck (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, kind, resource_name, result_contains, result_min_size, result_max_size, "hpcService_id") FROM stdin;
+\.
+
+
+--
+-- Name: hpc_hpchealthcheck_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('hpc_hpchealthcheck_id_seq', 1, false);
+
+
+--
+-- Data for Name: hpc_hpcservice; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY hpc_hpcservice (service_ptr_id, cmi_hostname, hpc_port80, watcher_hpc_network, watcher_dnsdemux_network, watcher_dnsredir_network) FROM stdin;
+\.
+
+
+--
+-- Data for Name: hpc_originserver; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY hpc_originserver (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, origin_server_id, url, authenticated, enabled, protocol, redirects, description, "contentProvider_id") FROM stdin;
+\.
+
+
+--
+-- Name: hpc_originserver_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('hpc_originserver_id_seq', 1, false);
+
+
+--
+-- Data for Name: hpc_serviceprovider; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY hpc_serviceprovider (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, service_provider_id, name, description, enabled, "hpcService_id") FROM stdin;
+\.
+
+
+--
+-- Name: hpc_serviceprovider_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('hpc_serviceprovider_id_seq', 1, false);
+
+
+--
+-- Data for Name: hpc_sitemap; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY hpc_sitemap (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, description, map, map_id, "cdnPrefix_id", "contentProvider_id", "hpcService_id", "serviceProvider_id") FROM stdin;
+\.
+
+
+--
+-- Name: hpc_sitemap_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('hpc_sitemap_id_seq', 1, false);
+
+
+--
+-- Data for Name: requestrouter_requestrouterservice; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY requestrouter_requestrouterservice (service_ptr_id, "behindNat", "defaultTTL", "defaultAction", "lastResortAction", "maxAnswers") FROM stdin;
+\.
+
+
+--
+-- Data for Name: requestrouter_servicemap; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY requestrouter_servicemap (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, prefix, "siteMap", "accessMap", owner_id, slice_id) FROM stdin;
+\.
+
+
+--
+-- Name: requestrouter_servicemap_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('requestrouter_servicemap_id_seq', 1, false);
+
+
+--
+-- Data for Name: syndicate_storage_slicesecret; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY syndicate_storage_slicesecret (id, secret, slice_id_id) FROM stdin;
+\.
+
+
+--
+-- Name: syndicate_storage_slicesecret_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('syndicate_storage_slicesecret_id_seq', 1, false);
+
+
+--
+-- Data for Name: syndicate_storage_syndicateprincipal; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY syndicate_storage_syndicateprincipal (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, principal_id, public_key_pem, sealed_private_key) FROM stdin;
+\.
+
+
+--
+-- Name: syndicate_storage_syndicateprincipal_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('syndicate_storage_syndicateprincipal_id_seq', 1, false);
+
+
+--
+-- Data for Name: syndicate_storage_syndicateservice; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY syndicate_storage_syndicateservice (service_ptr_id) FROM stdin;
+\.
+
+
+--
+-- Data for Name: syndicate_storage_volume; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY syndicate_storage_volume (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, name, description, blocksize, private, archive, cap_read_data, cap_write_data, cap_host_data, owner_id_id) FROM stdin;
+\.
+
+
+--
+-- Name: syndicate_storage_volume_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('syndicate_storage_volume_id_seq', 1, false);
+
+
+--
+-- Data for Name: syndicate_storage_volumeaccessright; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY syndicate_storage_volumeaccessright (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, cap_read_data, cap_write_data, cap_host_data, owner_id_id, volume_id) FROM stdin;
+\.
+
+
+--
+-- Name: syndicate_storage_volumeaccessright_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('syndicate_storage_volumeaccessright_id_seq', 1, false);
+
+
+--
+-- Data for Name: syndicate_storage_volumeslice; Type: TABLE DATA; Schema: public; Owner: postgres
+--
+
+COPY syndicate_storage_volumeslice (id, created, updated, enacted, policed, backend_register, backend_status, deleted, write_protect, lazy_blocked, no_sync, cap_read_data, cap_write_data, cap_host_data, "UG_portnum", "RG_portnum", credentials_blob, slice_id_id, volume_id_id) FROM stdin;
+\.
+
+
+--
+-- Name: syndicate_storage_volumeslice_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('syndicate_storage_volumeslice_id_seq', 1, false);
+
+
+--
+-- Name: auth_group_name_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY auth_group
+    ADD CONSTRAINT auth_group_name_key UNIQUE (name);
+
+
+--
+-- Name: auth_group_permissions_group_id_permission_id_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY auth_group_permissions
+    ADD CONSTRAINT auth_group_permissions_group_id_permission_id_key UNIQUE (group_id, permission_id);
+
+
+--
+-- Name: auth_group_permissions_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY auth_group_permissions
+    ADD CONSTRAINT auth_group_permissions_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: auth_group_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY auth_group
+    ADD CONSTRAINT auth_group_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: auth_permission_content_type_id_codename_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY auth_permission
+    ADD CONSTRAINT auth_permission_content_type_id_codename_key UNIQUE (content_type_id, codename);
+
+
+--
+-- Name: auth_permission_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY auth_permission
+    ADD CONSTRAINT auth_permission_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_account_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_account
+    ADD CONSTRAINT core_account_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_addresspool_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_addresspool
+    ADD CONSTRAINT core_addresspool_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_charge_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_charge
+    ADD CONSTRAINT core_charge_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_controller_name_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_controller
+    ADD CONSTRAINT core_controller_name_key UNIQUE (name);
+
+
+--
+-- Name: core_controller_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_controller
+    ADD CONSTRAINT core_controller_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_controllercredential_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_controllercredential
+    ADD CONSTRAINT core_controllercredential_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_controllerdashboardview_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_controllerdashboardview
+    ADD CONSTRAINT core_controllerdashboardview_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_controllerimages_image_id_77d3516dbca0a5d3_uniq; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_controllerimages
+    ADD CONSTRAINT core_controllerimages_image_id_77d3516dbca0a5d3_uniq UNIQUE (image_id, controller_id);
+
+
+--
+-- Name: core_controllerimages_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_controllerimages
+    ADD CONSTRAINT core_controllerimages_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_controllernetwork_network_id_30ce4dc681f2844f_uniq; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_controllernetwork
+    ADD CONSTRAINT core_controllernetwork_network_id_30ce4dc681f2844f_uniq UNIQUE (network_id, controller_id);
+
+
+--
+-- Name: core_controllernetwork_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_controllernetwork
+    ADD CONSTRAINT core_controllernetwork_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_controllerrole_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_controllerrole
+    ADD CONSTRAINT core_controllerrole_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_controllerrole_role_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_controllerrole
+    ADD CONSTRAINT core_controllerrole_role_key UNIQUE (role);
+
+
+--
+-- Name: core_controllersite_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_controllersite
+    ADD CONSTRAINT core_controllersite_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_controllersite_site_id_22f56d79564bc81b_uniq; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_controllersite
+    ADD CONSTRAINT core_controllersite_site_id_22f56d79564bc81b_uniq UNIQUE (site_id, controller_id);
+
+
+--
+-- Name: core_controllersiteprivileg_controller_id_5d0f19c7a7ceb9e5_uniq; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_controllersiteprivilege
+    ADD CONSTRAINT core_controllersiteprivileg_controller_id_5d0f19c7a7ceb9e5_uniq UNIQUE (controller_id, site_privilege_id, role_id);
+
+
+--
+-- Name: core_controllersiteprivilege_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_controllersiteprivilege
+    ADD CONSTRAINT core_controllersiteprivilege_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_controllerslice_controller_id_427703e66574ab83_uniq; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_controllerslice
+    ADD CONSTRAINT core_controllerslice_controller_id_427703e66574ab83_uniq UNIQUE (controller_id, slice_id);
+
+
+--
+-- Name: core_controllerslice_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_controllerslice
+    ADD CONSTRAINT core_controllerslice_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_controllersliceprivile_controller_id_4e8a6f6f999d67c3_uniq; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_controllersliceprivilege
+    ADD CONSTRAINT core_controllersliceprivile_controller_id_4e8a6f6f999d67c3_uniq UNIQUE (controller_id, slice_privilege_id);
+
+
+--
+-- Name: core_controllersliceprivilege_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_controllersliceprivilege
+    ADD CONSTRAINT core_controllersliceprivilege_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_controlleruser_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_controlleruser
+    ADD CONSTRAINT core_controlleruser_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_controlleruser_user_id_3beb039133bd099b_uniq; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_controlleruser
+    ADD CONSTRAINT core_controlleruser_user_id_3beb039133bd099b_uniq UNIQUE (user_id, controller_id);
+
+
+--
+-- Name: core_dashboardview_deployment_dashboardview_id_deployment_i_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_dashboardview_deployments
+    ADD CONSTRAINT core_dashboardview_deployment_dashboardview_id_deployment_i_key UNIQUE (dashboardview_id, deployment_id);
+
+
+--
+-- Name: core_dashboardview_deployments_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_dashboardview_deployments
+    ADD CONSTRAINT core_dashboardview_deployments_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_dashboardview_name_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_dashboardview
+    ADD CONSTRAINT core_dashboardview_name_key UNIQUE (name);
+
+
+--
+-- Name: core_dashboardview_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_dashboardview
+    ADD CONSTRAINT core_dashboardview_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_deployment_name_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_deployment
+    ADD CONSTRAINT core_deployment_name_key UNIQUE (name);
+
+
+--
+-- Name: core_deployment_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_deployment
+    ADD CONSTRAINT core_deployment_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_deploymentprivilege_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_deploymentprivilege
+    ADD CONSTRAINT core_deploymentprivilege_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_deploymentprivilege_user_id_8f49da97c7cff06_uniq; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_deploymentprivilege
+    ADD CONSTRAINT core_deploymentprivilege_user_id_8f49da97c7cff06_uniq UNIQUE (user_id, deployment_id, role_id);
+
+
+--
+-- Name: core_deploymentrole_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_deploymentrole
+    ADD CONSTRAINT core_deploymentrole_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_deploymentrole_role_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_deploymentrole
+    ADD CONSTRAINT core_deploymentrole_role_key UNIQUE (role);
+
+
+--
+-- Name: core_diag_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_diag
+    ADD CONSTRAINT core_diag_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_flavor_deployments_flavor_id_deployment_id_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_flavor_deployments
+    ADD CONSTRAINT core_flavor_deployments_flavor_id_deployment_id_key UNIQUE (flavor_id, deployment_id);
+
+
+--
+-- Name: core_flavor_deployments_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_flavor_deployments
+    ADD CONSTRAINT core_flavor_deployments_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_flavor_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_flavor
+    ADD CONSTRAINT core_flavor_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_image_name_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_image
+    ADD CONSTRAINT core_image_name_key UNIQUE (name);
+
+
+--
+-- Name: core_image_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_image
+    ADD CONSTRAINT core_image_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_imagedeployments_image_id_3bc8a23925d399ff_uniq; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_imagedeployments
+    ADD CONSTRAINT core_imagedeployments_image_id_3bc8a23925d399ff_uniq UNIQUE (image_id, deployment_id);
+
+
+--
+-- Name: core_imagedeployments_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_imagedeployments
+    ADD CONSTRAINT core_imagedeployments_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_instance_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_instance
+    ADD CONSTRAINT core_instance_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_invoice_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_invoice
+    ADD CONSTRAINT core_invoice_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_network_permitted_slices_network_id_slice_id_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_network_permitted_slices
+    ADD CONSTRAINT core_network_permitted_slices_network_id_slice_id_key UNIQUE (network_id, slice_id);
+
+
+--
+-- Name: core_network_permitted_slices_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_network_permitted_slices
+    ADD CONSTRAINT core_network_permitted_slices_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_network_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_network
+    ADD CONSTRAINT core_network_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_networkparameter_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_networkparameter
+    ADD CONSTRAINT core_networkparameter_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_networkparametertype_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_networkparametertype
+    ADD CONSTRAINT core_networkparametertype_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_networkslice_network_id_78984d02ac7c1fb3_uniq; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_networkslice
+    ADD CONSTRAINT core_networkslice_network_id_78984d02ac7c1fb3_uniq UNIQUE (network_id, slice_id);
+
+
+--
+-- Name: core_networkslice_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_networkslice
+    ADD CONSTRAINT core_networkslice_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_networktemplate_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_networktemplate
+    ADD CONSTRAINT core_networktemplate_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_node_name_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_node
+    ADD CONSTRAINT core_node_name_key UNIQUE (name);
+
+
+--
+-- Name: core_node_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_node
+    ADD CONSTRAINT core_node_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_nodelabel_name_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_nodelabel
+    ADD CONSTRAINT core_nodelabel_name_key UNIQUE (name);
+
+
+--
+-- Name: core_nodelabel_node_nodelabel_id_node_id_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_nodelabel_node
+    ADD CONSTRAINT core_nodelabel_node_nodelabel_id_node_id_key UNIQUE (nodelabel_id, node_id);
+
+
+--
+-- Name: core_nodelabel_node_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_nodelabel_node
+    ADD CONSTRAINT core_nodelabel_node_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_nodelabel_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_nodelabel
+    ADD CONSTRAINT core_nodelabel_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_payment_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_payment
+    ADD CONSTRAINT core_payment_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_port_network_id_693ab091ccd5a89a_uniq; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_port
+    ADD CONSTRAINT core_port_network_id_693ab091ccd5a89a_uniq UNIQUE (network_id, instance_id);
+
+
+--
+-- Name: core_port_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_port
+    ADD CONSTRAINT core_port_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_program_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_program
+    ADD CONSTRAINT core_program_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_project_name_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_project
+    ADD CONSTRAINT core_project_name_key UNIQUE (name);
+
+
+--
+-- Name: core_project_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_project
+    ADD CONSTRAINT core_project_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_reservation_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_reservation
+    ADD CONSTRAINT core_reservation_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_reservedresource_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_reservedresource
+    ADD CONSTRAINT core_reservedresource_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_role_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_role
+    ADD CONSTRAINT core_role_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_router_networks_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_router_networks
+    ADD CONSTRAINT core_router_networks_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_router_networks_router_id_network_id_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_router_networks
+    ADD CONSTRAINT core_router_networks_router_id_network_id_key UNIQUE (router_id, network_id);
+
+
+--
+-- Name: core_router_permittedNetworks_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY "core_router_permittedNetworks"
+    ADD CONSTRAINT "core_router_permittedNetworks_pkey" PRIMARY KEY (id);
+
+
+--
+-- Name: core_router_permittedNetworks_router_id_network_id_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY "core_router_permittedNetworks"
+    ADD CONSTRAINT "core_router_permittedNetworks_router_id_network_id_key" UNIQUE (router_id, network_id);
+
+
+--
+-- Name: core_router_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_router
+    ADD CONSTRAINT core_router_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_service_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_service
+    ADD CONSTRAINT core_service_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_serviceattribute_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_serviceattribute
+    ADD CONSTRAINT core_serviceattribute_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_serviceclass_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_serviceclass
+    ADD CONSTRAINT core_serviceclass_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_serviceclass_upgradeFrom_from_serviceclass_id_to_servi_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY "core_serviceclass_upgradeFrom"
+    ADD CONSTRAINT "core_serviceclass_upgradeFrom_from_serviceclass_id_to_servi_key" UNIQUE (from_serviceclass_id, to_serviceclass_id);
+
+
+--
+-- Name: core_serviceclass_upgradeFrom_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY "core_serviceclass_upgradeFrom"
+    ADD CONSTRAINT "core_serviceclass_upgradeFrom_pkey" PRIMARY KEY (id);
+
+
+--
+-- Name: core_serviceprivilege_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_serviceprivilege
+    ADD CONSTRAINT core_serviceprivilege_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_serviceprivilege_user_id_3e7ef04b1340e86c_uniq; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_serviceprivilege
+    ADD CONSTRAINT core_serviceprivilege_user_id_3e7ef04b1340e86c_uniq UNIQUE (user_id, service_id, role_id);
+
+
+--
+-- Name: core_serviceresource_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_serviceresource
+    ADD CONSTRAINT core_serviceresource_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_servicerole_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_servicerole
+    ADD CONSTRAINT core_servicerole_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_servicerole_role_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_servicerole
+    ADD CONSTRAINT core_servicerole_role_key UNIQUE (role);
+
+
+--
+-- Name: core_site_login_base_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_site
+    ADD CONSTRAINT core_site_login_base_key UNIQUE (login_base);
+
+
+--
+-- Name: core_site_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_site
+    ADD CONSTRAINT core_site_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_sitecredential_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_sitecredential
+    ADD CONSTRAINT core_sitecredential_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_sitedeployment_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_sitedeployment
+    ADD CONSTRAINT core_sitedeployment_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_sitedeployment_site_id_ed533b8a1954fbb_uniq; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_sitedeployment
+    ADD CONSTRAINT core_sitedeployment_site_id_ed533b8a1954fbb_uniq UNIQUE (site_id, deployment_id, controller_id);
+
+
+--
+-- Name: core_siteprivilege_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_siteprivilege
+    ADD CONSTRAINT core_siteprivilege_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_siterole_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_siterole
+    ADD CONSTRAINT core_siterole_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_siterole_role_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_siterole
+    ADD CONSTRAINT core_siterole_role_key UNIQUE (role);
+
+
+--
+-- Name: core_slice_name_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_slice
+    ADD CONSTRAINT core_slice_name_key UNIQUE (name);
+
+
+--
+-- Name: core_slice_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_slice
+    ADD CONSTRAINT core_slice_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_slicecredential_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_slicecredential
+    ADD CONSTRAINT core_slicecredential_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_sliceprivilege_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_sliceprivilege
+    ADD CONSTRAINT core_sliceprivilege_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_sliceprivilege_user_id_6bed734e37df8596_uniq; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_sliceprivilege
+    ADD CONSTRAINT core_sliceprivilege_user_id_6bed734e37df8596_uniq UNIQUE (user_id, slice_id, role_id);
+
+
+--
+-- Name: core_slicerole_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_slicerole
+    ADD CONSTRAINT core_slicerole_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_slicerole_role_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_slicerole
+    ADD CONSTRAINT core_slicerole_role_key UNIQUE (role);
+
+
+--
+-- Name: core_slicetag_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_slicetag
+    ADD CONSTRAINT core_slicetag_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_tag_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_tag
+    ADD CONSTRAINT core_tag_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_tenant_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_tenant
+    ADD CONSTRAINT core_tenant_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_tenantattribute_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_tenantattribute
+    ADD CONSTRAINT core_tenantattribute_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_tenantroot_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_tenantroot
+    ADD CONSTRAINT core_tenantroot_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_tenantrootprivilege_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_tenantrootprivilege
+    ADD CONSTRAINT core_tenantrootprivilege_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_tenantrootprivilege_user_id_2bfebdce70c89f50_uniq; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_tenantrootprivilege
+    ADD CONSTRAINT core_tenantrootprivilege_user_id_2bfebdce70c89f50_uniq UNIQUE (user_id, tenant_root_id, role_id);
+
+
+--
+-- Name: core_tenantrootrole_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_tenantrootrole
+    ADD CONSTRAINT core_tenantrootrole_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_tenantrootrole_role_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_tenantrootrole
+    ADD CONSTRAINT core_tenantrootrole_role_key UNIQUE (role);
+
+
+--
+-- Name: core_usableobject_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_usableobject
+    ADD CONSTRAINT core_usableobject_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_user_email_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_user
+    ADD CONSTRAINT core_user_email_key UNIQUE (email);
+
+
+--
+-- Name: core_user_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_user
+    ADD CONSTRAINT core_user_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_usercredential_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_usercredential
+    ADD CONSTRAINT core_usercredential_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: core_userdashboardview_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY core_userdashboardview
+    ADD CONSTRAINT core_userdashboardview_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: django_admin_log_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY django_admin_log
+    ADD CONSTRAINT django_admin_log_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: django_content_type_app_label_45f3b1d93ec8c61c_uniq; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY django_content_type
+    ADD CONSTRAINT django_content_type_app_label_45f3b1d93ec8c61c_uniq UNIQUE (app_label, model);
+
+
+--
+-- Name: django_content_type_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY django_content_type
+    ADD CONSTRAINT django_content_type_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: django_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY django_migrations
+    ADD CONSTRAINT django_migrations_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: django_session_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY django_session
+    ADD CONSTRAINT django_session_pkey PRIMARY KEY (session_key);
+
+
+--
+-- Name: hpc_accessmap_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY hpc_accessmap
+    ADD CONSTRAINT hpc_accessmap_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: hpc_cdnprefix_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY hpc_cdnprefix
+    ADD CONSTRAINT hpc_cdnprefix_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: hpc_contentprovider_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY hpc_contentprovider
+    ADD CONSTRAINT hpc_contentprovider_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: hpc_contentprovider_users_contentprovider_id_user_id_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY hpc_contentprovider_users
+    ADD CONSTRAINT hpc_contentprovider_users_contentprovider_id_user_id_key UNIQUE (contentprovider_id, user_id);
+
+
+--
+-- Name: hpc_contentprovider_users_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY hpc_contentprovider_users
+    ADD CONSTRAINT hpc_contentprovider_users_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: hpc_hpchealthcheck_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY hpc_hpchealthcheck
+    ADD CONSTRAINT hpc_hpchealthcheck_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: hpc_hpcservice_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY hpc_hpcservice
+    ADD CONSTRAINT hpc_hpcservice_pkey PRIMARY KEY (service_ptr_id);
+
+
+--
+-- Name: hpc_originserver_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY hpc_originserver
+    ADD CONSTRAINT hpc_originserver_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: hpc_serviceprovider_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY hpc_serviceprovider
+    ADD CONSTRAINT hpc_serviceprovider_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: hpc_sitemap_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY hpc_sitemap
+    ADD CONSTRAINT hpc_sitemap_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: requestrouter_requestrouterservice_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY requestrouter_requestrouterservice
+    ADD CONSTRAINT requestrouter_requestrouterservice_pkey PRIMARY KEY (service_ptr_id);
+
+
+--
+-- Name: requestrouter_servicemap_name_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY requestrouter_servicemap
+    ADD CONSTRAINT requestrouter_servicemap_name_key UNIQUE (name);
+
+
+--
+-- Name: requestrouter_servicemap_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY requestrouter_servicemap
+    ADD CONSTRAINT requestrouter_servicemap_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: syndicate_storage_slicesecret_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY syndicate_storage_slicesecret
+    ADD CONSTRAINT syndicate_storage_slicesecret_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: syndicate_storage_syndicateprincipal_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY syndicate_storage_syndicateprincipal
+    ADD CONSTRAINT syndicate_storage_syndicateprincipal_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: syndicate_storage_syndicateprincipal_principal_id_key; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY syndicate_storage_syndicateprincipal
+    ADD CONSTRAINT syndicate_storage_syndicateprincipal_principal_id_key UNIQUE (principal_id);
+
+
+--
+-- Name: syndicate_storage_syndicateservice_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY syndicate_storage_syndicateservice
+    ADD CONSTRAINT syndicate_storage_syndicateservice_pkey PRIMARY KEY (service_ptr_id);
+
+
+--
+-- Name: syndicate_storage_volume_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY syndicate_storage_volume
+    ADD CONSTRAINT syndicate_storage_volume_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: syndicate_storage_volumeaccessright_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY syndicate_storage_volumeaccessright
+    ADD CONSTRAINT syndicate_storage_volumeaccessright_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: syndicate_storage_volumeslice_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace: 
+--
+
+ALTER TABLE ONLY syndicate_storage_volumeslice
+    ADD CONSTRAINT syndicate_storage_volumeslice_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: auth_group_permissions_0e939a4f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX auth_group_permissions_0e939a4f ON auth_group_permissions USING btree (group_id);
+
+
+--
+-- Name: auth_group_permissions_8373b171; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX auth_group_permissions_8373b171 ON auth_group_permissions USING btree (permission_id);
+
+
+--
+-- Name: auth_permission_417f1b1c; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX auth_permission_417f1b1c ON auth_permission USING btree (content_type_id);
+
+
+--
+-- Name: core_account_9365d6e7; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_account_9365d6e7 ON core_account USING btree (site_id);
+
+
+--
+-- Name: core_charge_8a089c2a; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_charge_8a089c2a ON core_charge USING btree (account_id);
+
+
+--
+-- Name: core_charge_af31437c; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_charge_af31437c ON core_charge USING btree (object_id);
+
+
+--
+-- Name: core_charge_be7f3a0f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_charge_be7f3a0f ON core_charge USING btree (slice_id);
+
+
+--
+-- Name: core_charge_f1f5d967; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_charge_f1f5d967 ON core_charge USING btree (invoice_id);
+
+
+--
+-- Name: core_controller_5921cd4f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controller_5921cd4f ON core_controller USING btree (deployment_id);
+
+
+--
+-- Name: core_controllercredential_a31c1112; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controllercredential_a31c1112 ON core_controllercredential USING btree (controller_id);
+
+
+--
+-- Name: core_controllercredential_b068931c; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controllercredential_b068931c ON core_controllercredential USING btree (name);
+
+
+--
+-- Name: core_controllerdashboardview_5da0369f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controllerdashboardview_5da0369f ON core_controllerdashboardview USING btree ("dashboardView_id");
+
+
+--
+-- Name: core_controllerdashboardview_a31c1112; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controllerdashboardview_a31c1112 ON core_controllerdashboardview USING btree (controller_id);
+
+
+--
+-- Name: core_controllerimages_a31c1112; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controllerimages_a31c1112 ON core_controllerimages USING btree (controller_id);
+
+
+--
+-- Name: core_controllerimages_f33175e6; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controllerimages_f33175e6 ON core_controllerimages USING btree (image_id);
+
+
+--
+-- Name: core_controllernetwork_4e19114d; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controllernetwork_4e19114d ON core_controllernetwork USING btree (network_id);
+
+
+--
+-- Name: core_controllernetwork_a31c1112; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controllernetwork_a31c1112 ON core_controllernetwork USING btree (controller_id);
+
+
+--
+-- Name: core_controllersite_38543614; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controllersite_38543614 ON core_controllersite USING btree (tenant_id);
+
+
+--
+-- Name: core_controllersite_9365d6e7; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controllersite_9365d6e7 ON core_controllersite USING btree (site_id);
+
+
+--
+-- Name: core_controllersite_a31c1112; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controllersite_a31c1112 ON core_controllersite USING btree (controller_id);
+
+
+--
+-- Name: core_controllersiteprivilege_28116b8e; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controllersiteprivilege_28116b8e ON core_controllersiteprivilege USING btree (site_privilege_id);
+
+
+--
+-- Name: core_controllersiteprivilege_84566833; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controllersiteprivilege_84566833 ON core_controllersiteprivilege USING btree (role_id);
+
+
+--
+-- Name: core_controllersiteprivilege_a31c1112; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controllersiteprivilege_a31c1112 ON core_controllersiteprivilege USING btree (controller_id);
+
+
+--
+-- Name: core_controllerslice_a31c1112; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controllerslice_a31c1112 ON core_controllerslice USING btree (controller_id);
+
+
+--
+-- Name: core_controllerslice_be7f3a0f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controllerslice_be7f3a0f ON core_controllerslice USING btree (slice_id);
+
+
+--
+-- Name: core_controllersliceprivilege_25740d9a; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controllersliceprivilege_25740d9a ON core_controllersliceprivilege USING btree (slice_privilege_id);
+
+
+--
+-- Name: core_controllersliceprivilege_84566833; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controllersliceprivilege_84566833 ON core_controllersliceprivilege USING btree (role_id);
+
+
+--
+-- Name: core_controllersliceprivilege_a31c1112; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controllersliceprivilege_a31c1112 ON core_controllersliceprivilege USING btree (controller_id);
+
+
+--
+-- Name: core_controlleruser_a31c1112; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controlleruser_a31c1112 ON core_controlleruser USING btree (controller_id);
+
+
+--
+-- Name: core_controlleruser_e8701ad4; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_controlleruser_e8701ad4 ON core_controlleruser USING btree (user_id);
+
+
+--
+-- Name: core_dashboardview_deployments_5921cd4f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_dashboardview_deployments_5921cd4f ON core_dashboardview_deployments USING btree (deployment_id);
+
+
+--
+-- Name: core_dashboardview_deployments_79bd56c8; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_dashboardview_deployments_79bd56c8 ON core_dashboardview_deployments USING btree (dashboardview_id);
+
+
+--
+-- Name: core_deploymentprivilege_5921cd4f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_deploymentprivilege_5921cd4f ON core_deploymentprivilege USING btree (deployment_id);
+
+
+--
+-- Name: core_deploymentprivilege_84566833; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_deploymentprivilege_84566833 ON core_deploymentprivilege USING btree (role_id);
+
+
+--
+-- Name: core_deploymentprivilege_e8701ad4; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_deploymentprivilege_e8701ad4 ON core_deploymentprivilege USING btree (user_id);
+
+
+--
+-- Name: core_flavor_deployments_5921cd4f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_flavor_deployments_5921cd4f ON core_flavor_deployments USING btree (deployment_id);
+
+
+--
+-- Name: core_flavor_deployments_dd3f198d; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_flavor_deployments_dd3f198d ON core_flavor_deployments USING btree (flavor_id);
+
+
+--
+-- Name: core_imagedeployments_5921cd4f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_imagedeployments_5921cd4f ON core_imagedeployments USING btree (deployment_id);
+
+
+--
+-- Name: core_imagedeployments_f33175e6; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_imagedeployments_f33175e6 ON core_imagedeployments USING btree (image_id);
+
+
+--
+-- Name: core_instance_3700153c; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_instance_3700153c ON core_instance USING btree (creator_id);
+
+
+--
+-- Name: core_instance_5921cd4f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_instance_5921cd4f ON core_instance USING btree (deployment_id);
+
+
+--
+-- Name: core_instance_6be37982; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_instance_6be37982 ON core_instance USING btree (parent_id);
+
+
+--
+-- Name: core_instance_be7f3a0f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_instance_be7f3a0f ON core_instance USING btree (slice_id);
+
+
+--
+-- Name: core_instance_c693ebc8; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_instance_c693ebc8 ON core_instance USING btree (node_id);
+
+
+--
+-- Name: core_instance_dd3f198d; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_instance_dd3f198d ON core_instance USING btree (flavor_id);
+
+
+--
+-- Name: core_instance_f33175e6; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_instance_f33175e6 ON core_instance USING btree (image_id);
+
+
+--
+-- Name: core_invoice_8a089c2a; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_invoice_8a089c2a ON core_invoice USING btree (account_id);
+
+
+--
+-- Name: core_network_5e7b1936; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_network_5e7b1936 ON core_network USING btree (owner_id);
+
+
+--
+-- Name: core_network_74f53564; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_network_74f53564 ON core_network USING btree (template_id);
+
+
+--
+-- Name: core_network_permitted_slices_4e19114d; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_network_permitted_slices_4e19114d ON core_network_permitted_slices USING btree (network_id);
+
+
+--
+-- Name: core_network_permitted_slices_be7f3a0f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_network_permitted_slices_be7f3a0f ON core_network_permitted_slices USING btree (slice_id);
+
+
+--
+-- Name: core_networkparameter_417f1b1c; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_networkparameter_417f1b1c ON core_networkparameter USING btree (content_type_id);
+
+
+--
+-- Name: core_networkparameter_80740216; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_networkparameter_80740216 ON core_networkparameter USING btree (parameter_id);
+
+
+--
+-- Name: core_networkparametertype_b068931c; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_networkparametertype_b068931c ON core_networkparametertype USING btree (name);
+
+
+--
+-- Name: core_networkslice_4e19114d; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_networkslice_4e19114d ON core_networkslice USING btree (network_id);
+
+
+--
+-- Name: core_networkslice_be7f3a0f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_networkslice_be7f3a0f ON core_networkslice USING btree (slice_id);
+
+
+--
+-- Name: core_node_86aed61a; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_node_86aed61a ON core_node USING btree (site_deployment_id);
+
+
+--
+-- Name: core_node_9365d6e7; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_node_9365d6e7 ON core_node USING btree (site_id);
+
+
+--
+-- Name: core_nodelabel_node_c693ebc8; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_nodelabel_node_c693ebc8 ON core_nodelabel_node USING btree (node_id);
+
+
+--
+-- Name: core_nodelabel_node_dd685172; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_nodelabel_node_dd685172 ON core_nodelabel_node USING btree (nodelabel_id);
+
+
+--
+-- Name: core_payment_8a089c2a; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_payment_8a089c2a ON core_payment USING btree (account_id);
+
+
+--
+-- Name: core_port_4e19114d; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_port_4e19114d ON core_port USING btree (network_id);
+
+
+--
+-- Name: core_port_51afcc4f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_port_51afcc4f ON core_port USING btree (instance_id);
+
+
+--
+-- Name: core_program_5e7b1936; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_program_5e7b1936 ON core_program USING btree (owner_id);
+
+
+--
+-- Name: core_reservation_be7f3a0f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_reservation_be7f3a0f ON core_reservation USING btree (slice_id);
+
+
+--
+-- Name: core_reservedresource_51afcc4f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_reservedresource_51afcc4f ON core_reservedresource USING btree (instance_id);
+
+
+--
+-- Name: core_reservedresource_732beb09; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_reservedresource_732beb09 ON core_reservedresource USING btree ("reservationSet_id");
+
+
+--
+-- Name: core_reservedresource_e2f3ef5b; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_reservedresource_e2f3ef5b ON core_reservedresource USING btree (resource_id);
+
+
+--
+-- Name: core_role_417f1b1c; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_role_417f1b1c ON core_role USING btree (content_type_id);
+
+
+--
+-- Name: core_router_5e7b1936; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_router_5e7b1936 ON core_router USING btree (owner_id);
+
+
+--
+-- Name: core_router_networks_4e19114d; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_router_networks_4e19114d ON core_router_networks USING btree (network_id);
+
+
+--
+-- Name: core_router_networks_52d4f3af; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_router_networks_52d4f3af ON core_router_networks USING btree (router_id);
+
+
+--
+-- Name: core_router_permittednetworks_4e19114d; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_router_permittednetworks_4e19114d ON "core_router_permittedNetworks" USING btree (network_id);
+
+
+--
+-- Name: core_router_permittednetworks_52d4f3af; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_router_permittednetworks_52d4f3af ON "core_router_permittedNetworks" USING btree (router_id);
+
+
+--
+-- Name: core_serviceattribute_b0dc1e29; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_serviceattribute_b0dc1e29 ON core_serviceattribute USING btree (service_id);
+
+
+--
+-- Name: core_serviceclass_upgradefrom_a90aba97; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_serviceclass_upgradefrom_a90aba97 ON "core_serviceclass_upgradeFrom" USING btree (to_serviceclass_id);
+
+
+--
+-- Name: core_serviceclass_upgradefrom_e970e0f1; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_serviceclass_upgradefrom_e970e0f1 ON "core_serviceclass_upgradeFrom" USING btree (from_serviceclass_id);
+
+
+--
+-- Name: core_serviceprivilege_84566833; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_serviceprivilege_84566833 ON core_serviceprivilege USING btree (role_id);
+
+
+--
+-- Name: core_serviceprivilege_b0dc1e29; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_serviceprivilege_b0dc1e29 ON core_serviceprivilege USING btree (service_id);
+
+
+--
+-- Name: core_serviceprivilege_e8701ad4; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_serviceprivilege_e8701ad4 ON core_serviceprivilege USING btree (user_id);
+
+
+--
+-- Name: core_serviceresource_aa578034; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_serviceresource_aa578034 ON core_serviceresource USING btree ("serviceClass_id");
+
+
+--
+-- Name: core_sitecredential_9365d6e7; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_sitecredential_9365d6e7 ON core_sitecredential USING btree (site_id);
+
+
+--
+-- Name: core_sitecredential_b068931c; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_sitecredential_b068931c ON core_sitecredential USING btree (name);
+
+
+--
+-- Name: core_sitedeployment_5921cd4f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_sitedeployment_5921cd4f ON core_sitedeployment USING btree (deployment_id);
+
+
+--
+-- Name: core_sitedeployment_9365d6e7; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_sitedeployment_9365d6e7 ON core_sitedeployment USING btree (site_id);
+
+
+--
+-- Name: core_sitedeployment_a31c1112; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_sitedeployment_a31c1112 ON core_sitedeployment USING btree (controller_id);
+
+
+--
+-- Name: core_siteprivilege_84566833; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_siteprivilege_84566833 ON core_siteprivilege USING btree (role_id);
+
+
+--
+-- Name: core_siteprivilege_9365d6e7; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_siteprivilege_9365d6e7 ON core_siteprivilege USING btree (site_id);
+
+
+--
+-- Name: core_siteprivilege_e8701ad4; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_siteprivilege_e8701ad4 ON core_siteprivilege USING btree (user_id);
+
+
+--
+-- Name: core_slice_3700153c; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_slice_3700153c ON core_slice USING btree (creator_id);
+
+
+--
+-- Name: core_slice_531a000f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_slice_531a000f ON core_slice USING btree (default_flavor_id);
+
+
+--
+-- Name: core_slice_9365d6e7; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_slice_9365d6e7 ON core_slice USING btree (site_id);
+
+
+--
+-- Name: core_slice_a82f732f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_slice_a82f732f ON core_slice USING btree (default_image_id);
+
+
+--
+-- Name: core_slice_aa578034; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_slice_aa578034 ON core_slice USING btree ("serviceClass_id");
+
+
+--
+-- Name: core_slice_b0dc1e29; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_slice_b0dc1e29 ON core_slice USING btree (service_id);
+
+
+--
+-- Name: core_slicecredential_b068931c; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_slicecredential_b068931c ON core_slicecredential USING btree (name);
+
+
+--
+-- Name: core_slicecredential_be7f3a0f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_slicecredential_be7f3a0f ON core_slicecredential USING btree (slice_id);
+
+
+--
+-- Name: core_sliceprivilege_84566833; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_sliceprivilege_84566833 ON core_sliceprivilege USING btree (role_id);
+
+
+--
+-- Name: core_sliceprivilege_be7f3a0f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_sliceprivilege_be7f3a0f ON core_sliceprivilege USING btree (slice_id);
+
+
+--
+-- Name: core_sliceprivilege_e8701ad4; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_sliceprivilege_e8701ad4 ON core_sliceprivilege USING btree (user_id);
+
+
+--
+-- Name: core_slicetag_be7f3a0f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_slicetag_be7f3a0f ON core_slicetag USING btree (slice_id);
+
+
+--
+-- Name: core_tag_417f1b1c; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_tag_417f1b1c ON core_tag USING btree (content_type_id);
+
+
+--
+-- Name: core_tag_b068931c; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_tag_b068931c ON core_tag USING btree (name);
+
+
+--
+-- Name: core_tag_b0dc1e29; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_tag_b0dc1e29 ON core_tag USING btree (service_id);
+
+
+--
+-- Name: core_tenant_6d0512e4; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_tenant_6d0512e4 ON core_tenant USING btree (subscriber_tenant_id);
+
+
+--
+-- Name: core_tenant_a5c60fe7; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_tenant_a5c60fe7 ON core_tenant USING btree (subscriber_service_id);
+
+
+--
+-- Name: core_tenant_d1fbfb28; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_tenant_d1fbfb28 ON core_tenant USING btree (provider_service_id);
+
+
+--
+-- Name: core_tenant_ec8cbfdc; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_tenant_ec8cbfdc ON core_tenant USING btree (subscriber_user_id);
+
+
+--
+-- Name: core_tenant_f687e49c; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_tenant_f687e49c ON core_tenant USING btree (subscriber_root_id);
+
+
+--
+-- Name: core_tenantattribute_38543614; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_tenantattribute_38543614 ON core_tenantattribute USING btree (tenant_id);
+
+
+--
+-- Name: core_tenantrootprivilege_84566833; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_tenantrootprivilege_84566833 ON core_tenantrootprivilege USING btree (role_id);
+
+
+--
+-- Name: core_tenantrootprivilege_ad876f96; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_tenantrootprivilege_ad876f96 ON core_tenantrootprivilege USING btree (tenant_root_id);
+
+
+--
+-- Name: core_tenantrootprivilege_e8701ad4; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_tenantrootprivilege_e8701ad4 ON core_tenantrootprivilege USING btree (user_id);
+
+
+--
+-- Name: core_user_9365d6e7; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_user_9365d6e7 ON core_user USING btree (site_id);
+
+
+--
+-- Name: core_usercredential_b068931c; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_usercredential_b068931c ON core_usercredential USING btree (name);
+
+
+--
+-- Name: core_usercredential_e8701ad4; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_usercredential_e8701ad4 ON core_usercredential USING btree (user_id);
+
+
+--
+-- Name: core_userdashboardview_5da0369f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_userdashboardview_5da0369f ON core_userdashboardview USING btree ("dashboardView_id");
+
+
+--
+-- Name: core_userdashboardview_e8701ad4; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX core_userdashboardview_e8701ad4 ON core_userdashboardview USING btree (user_id);
+
+
+--
+-- Name: django_admin_log_417f1b1c; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX django_admin_log_417f1b1c ON django_admin_log USING btree (content_type_id);
+
+
+--
+-- Name: django_admin_log_e8701ad4; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX django_admin_log_e8701ad4 ON django_admin_log USING btree (user_id);
+
+
+--
+-- Name: django_session_de54fa62; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX django_session_de54fa62 ON django_session USING btree (expire_date);
+
+
+--
+-- Name: hpc_accessmap_bc4912a0; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX hpc_accessmap_bc4912a0 ON hpc_accessmap USING btree ("contentProvider_id");
+
+
+--
+-- Name: hpc_cdnprefix_8473b38b; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX hpc_cdnprefix_8473b38b ON hpc_cdnprefix USING btree ("defaultOriginServer_id");
+
+
+--
+-- Name: hpc_cdnprefix_bc4912a0; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX hpc_cdnprefix_bc4912a0 ON hpc_cdnprefix USING btree ("contentProvider_id");
+
+
+--
+-- Name: hpc_contentprovider_ebdbc659; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX hpc_contentprovider_ebdbc659 ON hpc_contentprovider USING btree ("serviceProvider_id");
+
+
+--
+-- Name: hpc_contentprovider_users_82c06917; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX hpc_contentprovider_users_82c06917 ON hpc_contentprovider_users USING btree (contentprovider_id);
+
+
+--
+-- Name: hpc_contentprovider_users_e8701ad4; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX hpc_contentprovider_users_e8701ad4 ON hpc_contentprovider_users USING btree (user_id);
+
+
+--
+-- Name: hpc_hpchealthcheck_591847bf; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX hpc_hpchealthcheck_591847bf ON hpc_hpchealthcheck USING btree ("hpcService_id");
+
+
+--
+-- Name: hpc_originserver_bc4912a0; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX hpc_originserver_bc4912a0 ON hpc_originserver USING btree ("contentProvider_id");
+
+
+--
+-- Name: hpc_serviceprovider_591847bf; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX hpc_serviceprovider_591847bf ON hpc_serviceprovider USING btree ("hpcService_id");
+
+
+--
+-- Name: hpc_sitemap_23b3ec8f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX hpc_sitemap_23b3ec8f ON hpc_sitemap USING btree ("cdnPrefix_id");
+
+
+--
+-- Name: hpc_sitemap_591847bf; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX hpc_sitemap_591847bf ON hpc_sitemap USING btree ("hpcService_id");
+
+
+--
+-- Name: hpc_sitemap_bc4912a0; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX hpc_sitemap_bc4912a0 ON hpc_sitemap USING btree ("contentProvider_id");
+
+
+--
+-- Name: hpc_sitemap_ebdbc659; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX hpc_sitemap_ebdbc659 ON hpc_sitemap USING btree ("serviceProvider_id");
+
+
+--
+-- Name: requestrouter_servicemap_5e7b1936; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX requestrouter_servicemap_5e7b1936 ON requestrouter_servicemap USING btree (owner_id);
+
+
+--
+-- Name: requestrouter_servicemap_be7f3a0f; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX requestrouter_servicemap_be7f3a0f ON requestrouter_servicemap USING btree (slice_id);
+
+
+--
+-- Name: syndicate_storage_slicesecret_b717f5ab; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX syndicate_storage_slicesecret_b717f5ab ON syndicate_storage_slicesecret USING btree (slice_id_id);
+
+
+--
+-- Name: syndicate_storage_volume_279564bf; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX syndicate_storage_volume_279564bf ON syndicate_storage_volume USING btree (owner_id_id);
+
+
+--
+-- Name: syndicate_storage_volumeaccessright_279564bf; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX syndicate_storage_volumeaccessright_279564bf ON syndicate_storage_volumeaccessright USING btree (owner_id_id);
+
+
+--
+-- Name: syndicate_storage_volumeaccessright_654102bb; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX syndicate_storage_volumeaccessright_654102bb ON syndicate_storage_volumeaccessright USING btree (volume_id);
+
+
+--
+-- Name: syndicate_storage_volumeslice_5b591651; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX syndicate_storage_volumeslice_5b591651 ON syndicate_storage_volumeslice USING btree (volume_id_id);
+
+
+--
+-- Name: syndicate_storage_volumeslice_b717f5ab; Type: INDEX; Schema: public; Owner: postgres; Tablespace: 
+--
+
+CREATE INDEX syndicate_storage_volumeslice_b717f5ab ON syndicate_storage_volumeslice USING btree (slice_id_id);
+
+
+--
+-- Name: auth_content_type_id_508cf46651277a81_fk_django_content_type_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY auth_permission
+    ADD CONSTRAINT auth_content_type_id_508cf46651277a81_fk_django_content_type_id FOREIGN KEY (content_type_id) REFERENCES django_content_type(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: auth_group_permissio_group_id_689710a9a73b7457_fk_auth_group_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY auth_group_permissions
+    ADD CONSTRAINT auth_group_permissio_group_id_689710a9a73b7457_fk_auth_group_id FOREIGN KEY (group_id) REFERENCES auth_group(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: auth_group_permission_id_1f49ccbbdc69d2fc_fk_auth_permission_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY auth_group_permissions
+    ADD CONSTRAINT auth_group_permission_id_1f49ccbbdc69d2fc_fk_auth_permission_id FOREIGN KEY (permission_id) REFERENCES auth_permission(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: b8a90faf34a5dd47a7f1e2f88e99f8a2; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_hpchealthcheck
+    ADD CONSTRAINT b8a90faf34a5dd47a7f1e2f88e99f8a2 FOREIGN KEY ("hpcService_id") REFERENCES hpc_hpcservice(service_ptr_id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: c_from_serviceclass_id_188a83eaefe26390_fk_core_serviceclass_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY "core_serviceclass_upgradeFrom"
+    ADD CONSTRAINT c_from_serviceclass_id_188a83eaefe26390_fk_core_serviceclass_id FOREIGN KEY (from_serviceclass_id) REFERENCES core_serviceclass(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: c_parameter_id_2c17791ba32bd8c8_fk_core_networkparametertype_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_networkparameter
+    ADD CONSTRAINT c_parameter_id_2c17791ba32bd8c8_fk_core_networkparametertype_id FOREIGN KEY (parameter_id) REFERENCES core_networkparametertype(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: c_site_deployment_id_2dc763428bdc2781_fk_core_sitedeployment_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_node
+    ADD CONSTRAINT c_site_deployment_id_2dc763428bdc2781_fk_core_sitedeployment_id FOREIGN KEY (site_deployment_id) REFERENCES core_sitedeployment(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: co_slice_privilege_id_21402f4f2399079_fk_core_sliceprivilege_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllersliceprivilege
+    ADD CONSTRAINT co_slice_privilege_id_21402f4f2399079_fk_core_sliceprivilege_id FOREIGN KEY (slice_privilege_id) REFERENCES core_sliceprivilege(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: cor_site_privilege_id_41490e8c05c2e685_fk_core_siteprivilege_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllersiteprivilege
+    ADD CONSTRAINT cor_site_privilege_id_41490e8c05c2e685_fk_core_siteprivilege_id FOREIGN KEY (site_privilege_id) REFERENCES core_siteprivilege(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: cor_to_serviceclass_id_4e2748248647c43b_fk_core_serviceclass_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY "core_serviceclass_upgradeFrom"
+    ADD CONSTRAINT cor_to_serviceclass_id_4e2748248647c43b_fk_core_serviceclass_id FOREIGN KEY (to_serviceclass_id) REFERENCES core_serviceclass(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core__reservationset_id_395058233c59a671_fk_core_reservation_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_reservedresource
+    ADD CONSTRAINT core__reservationset_id_395058233c59a671_fk_core_reservation_id FOREIGN KEY ("reservationSet_id") REFERENCES core_reservation(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core__subscriber_root_id_26f21610cb2711f9_fk_core_tenantroot_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_tenant
+    ADD CONSTRAINT core__subscriber_root_id_26f21610cb2711f9_fk_core_tenantroot_id FOREIGN KEY (subscriber_root_id) REFERENCES core_tenantroot(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core__subscriber_service_id_5049d522dc2feae7_fk_core_service_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_tenant
+    ADD CONSTRAINT core__subscriber_service_id_5049d522dc2feae7_fk_core_service_id FOREIGN KEY (subscriber_service_id) REFERENCES core_service(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_account_site_id_7d8af010f408acb2_fk_core_site_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_account
+    ADD CONSTRAINT core_account_site_id_7d8af010f408acb2_fk_core_site_id FOREIGN KEY (site_id) REFERENCES core_site(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_charge_account_id_277c66c32427fb_fk_core_account_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_charge
+    ADD CONSTRAINT core_charge_account_id_277c66c32427fb_fk_core_account_id FOREIGN KEY (account_id) REFERENCES core_account(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_charge_invoice_id_7af39adf58aad977_fk_core_invoice_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_charge
+    ADD CONSTRAINT core_charge_invoice_id_7af39adf58aad977_fk_core_invoice_id FOREIGN KEY (invoice_id) REFERENCES core_invoice(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_charge_object_id_349f8834f1bf5ce6_fk_core_usableobject_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_charge
+    ADD CONSTRAINT core_charge_object_id_349f8834f1bf5ce6_fk_core_usableobject_id FOREIGN KEY (object_id) REFERENCES core_usableobject(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_charge_slice_id_5f33de3b320604f2_fk_core_slice_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_charge
+    ADD CONSTRAINT core_charge_slice_id_5f33de3b320604f2_fk_core_slice_id FOREIGN KEY (slice_id) REFERENCES core_slice(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_content_type_id_150a10ada282bcf9_fk_django_content_type_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_role
+    ADD CONSTRAINT core_content_type_id_150a10ada282bcf9_fk_django_content_type_id FOREIGN KEY (content_type_id) REFERENCES django_content_type(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_content_type_id_3cc30601489a3056_fk_django_content_type_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_networkparameter
+    ADD CONSTRAINT core_content_type_id_3cc30601489a3056_fk_django_content_type_id FOREIGN KEY (content_type_id) REFERENCES django_content_type(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_content_type_id_413c7b5400f8ad9c_fk_django_content_type_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_tag
+    ADD CONSTRAINT core_content_type_id_413c7b5400f8ad9c_fk_django_content_type_id FOREIGN KEY (content_type_id) REFERENCES django_content_type(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_contr_controller_id_11d29f7e2a4a5462_fk_core_controller_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllersiteprivilege
+    ADD CONSTRAINT core_contr_controller_id_11d29f7e2a4a5462_fk_core_controller_id FOREIGN KEY (controller_id) REFERENCES core_controller(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_contr_controller_id_1f82c3216437715f_fk_core_controller_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllerdashboardview
+    ADD CONSTRAINT core_contr_controller_id_1f82c3216437715f_fk_core_controller_id FOREIGN KEY (controller_id) REFERENCES core_controller(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_contr_controller_id_46178c1d21384e5e_fk_core_controller_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllersite
+    ADD CONSTRAINT core_contr_controller_id_46178c1d21384e5e_fk_core_controller_id FOREIGN KEY (controller_id) REFERENCES core_controller(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_contr_controller_id_4fb982de67c3b742_fk_core_controller_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllersliceprivilege
+    ADD CONSTRAINT core_contr_controller_id_4fb982de67c3b742_fk_core_controller_id FOREIGN KEY (controller_id) REFERENCES core_controller(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_contr_controller_id_5cd05d37bbdf1d96_fk_core_controller_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controlleruser
+    ADD CONSTRAINT core_contr_controller_id_5cd05d37bbdf1d96_fk_core_controller_id FOREIGN KEY (controller_id) REFERENCES core_controller(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_contr_controller_id_60b467e792b15198_fk_core_controller_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllernetwork
+    ADD CONSTRAINT core_contr_controller_id_60b467e792b15198_fk_core_controller_id FOREIGN KEY (controller_id) REFERENCES core_controller(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_contr_controller_id_7095bdbd27f73f56_fk_core_controller_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllerslice
+    ADD CONSTRAINT core_contr_controller_id_7095bdbd27f73f56_fk_core_controller_id FOREIGN KEY (controller_id) REFERENCES core_controller(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_contr_deployment_id_772a055c58b6e43a_fk_core_deployment_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controller
+    ADD CONSTRAINT core_contr_deployment_id_772a055c58b6e43a_fk_core_deployment_id FOREIGN KEY (deployment_id) REFERENCES core_deployment(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_contro_controller_id_5906172a2f34d3a_fk_core_controller_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllercredential
+    ADD CONSTRAINT core_contro_controller_id_5906172a2f34d3a_fk_core_controller_id FOREIGN KEY (controller_id) REFERENCES core_controller(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_contro_controller_id_6d1311b7cc69cd7_fk_core_controller_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllerimages
+    ADD CONSTRAINT core_contro_controller_id_6d1311b7cc69cd7_fk_core_controller_id FOREIGN KEY (controller_id) REFERENCES core_controller(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_controllerimage_image_id_5713221a6b077f6b_fk_core_image_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllerimages
+    ADD CONSTRAINT core_controllerimage_image_id_5713221a6b077f6b_fk_core_image_id FOREIGN KEY (image_id) REFERENCES core_image(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_controllern_network_id_3fe7748f6851d06f_fk_core_network_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllernetwork
+    ADD CONSTRAINT core_controllern_network_id_3fe7748f6851d06f_fk_core_network_id FOREIGN KEY (network_id) REFERENCES core_network(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_controllersite_site_id_4fa87f0734a60665_fk_core_site_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllersite
+    ADD CONSTRAINT core_controllersite_site_id_4fa87f0734a60665_fk_core_site_id FOREIGN KEY (site_id) REFERENCES core_site(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_controllerslice_slice_id_7005d287c601356b_fk_core_slice_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllerslice
+    ADD CONSTRAINT core_controllerslice_slice_id_7005d287c601356b_fk_core_slice_id FOREIGN KEY (slice_id) REFERENCES core_slice(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_controlleruser_user_id_60dc3a7220b1005b_fk_core_user_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controlleruser
+    ADD CONSTRAINT core_controlleruser_user_id_60dc3a7220b1005b_fk_core_user_id FOREIGN KEY (user_id) REFERENCES core_user(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_dashbo_deployment_id_8b902dfc7ab128b_fk_core_deployment_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_dashboardview_deployments
+    ADD CONSTRAINT core_dashbo_deployment_id_8b902dfc7ab128b_fk_core_deployment_id FOREIGN KEY (deployment_id) REFERENCES core_deployment(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_dashboardview_id_1241776e11825a15_fk_core_dashboardview_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_controllerdashboardview
+    ADD CONSTRAINT core_dashboardview_id_1241776e11825a15_fk_core_dashboardview_id FOREIGN KEY ("dashboardView_id") REFERENCES core_dashboardview(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_dashboardview_id_623d5d799346e0f8_fk_core_dashboardview_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_dashboardview_deployments
+    ADD CONSTRAINT core_dashboardview_id_623d5d799346e0f8_fk_core_dashboardview_id FOREIGN KEY (dashboardview_id) REFERENCES core_dashboardview(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_dashboardview_id_7d9723f531eefdde_fk_core_dashboardview_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_userdashboardview
+    ADD CONSTRAINT core_dashboardview_id_7d9723f531eefdde_fk_core_dashboardview_id FOREIGN KEY ("dashboardView_id") REFERENCES core_dashboardview(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_deplo_deployment_id_4606c90fff2e5ecf_fk_core_deployment_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_deploymentprivilege
+    ADD CONSTRAINT core_deplo_deployment_id_4606c90fff2e5ecf_fk_core_deployment_id FOREIGN KEY (deployment_id) REFERENCES core_deployment(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_deploym_role_id_221f61258b29e608_fk_core_deploymentrole_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_deploymentprivilege
+    ADD CONSTRAINT core_deploym_role_id_221f61258b29e608_fk_core_deploymentrole_id FOREIGN KEY (role_id) REFERENCES core_deploymentrole(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_deploymentprivile_user_id_2ac00d41376e2a8d_fk_core_user_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_deploymentprivilege
+    ADD CONSTRAINT core_deploymentprivile_user_id_2ac00d41376e2a8d_fk_core_user_id FOREIGN KEY (user_id) REFERENCES core_user(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_flavo_deployment_id_33af1c761c0497e3_fk_core_deployment_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_flavor_deployments
+    ADD CONSTRAINT core_flavo_deployment_id_33af1c761c0497e3_fk_core_deployment_id FOREIGN KEY (deployment_id) REFERENCES core_deployment(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_flavor_deploy_flavor_id_3e598722be0b3446_fk_core_flavor_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_flavor_deployments
+    ADD CONSTRAINT core_flavor_deploy_flavor_id_3e598722be0b3446_fk_core_flavor_id FOREIGN KEY (flavor_id) REFERENCES core_flavor(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_image_deployment_id_31772dfdcf4b80eb_fk_core_deployment_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_imagedeployments
+    ADD CONSTRAINT core_image_deployment_id_31772dfdcf4b80eb_fk_core_deployment_id FOREIGN KEY (deployment_id) REFERENCES core_deployment(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_imagedeployment_image_id_4a6df22c06603b40_fk_core_image_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_imagedeployments
+    ADD CONSTRAINT core_imagedeployment_image_id_4a6df22c06603b40_fk_core_image_id FOREIGN KEY (image_id) REFERENCES core_image(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_insta_deployment_id_111e2cdd025ec8ef_fk_core_deployment_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_instance
+    ADD CONSTRAINT core_insta_deployment_id_111e2cdd025ec8ef_fk_core_deployment_id FOREIGN KEY (deployment_id) REFERENCES core_deployment(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_instance_creator_id_66a7e8c819d15b29_fk_core_user_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_instance
+    ADD CONSTRAINT core_instance_creator_id_66a7e8c819d15b29_fk_core_user_id FOREIGN KEY (creator_id) REFERENCES core_user(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_instance_flavor_id_61bc3198a5673218_fk_core_flavor_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_instance
+    ADD CONSTRAINT core_instance_flavor_id_61bc3198a5673218_fk_core_flavor_id FOREIGN KEY (flavor_id) REFERENCES core_flavor(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_instance_image_id_5c8c96fe9a61802c_fk_core_image_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_instance
+    ADD CONSTRAINT core_instance_image_id_5c8c96fe9a61802c_fk_core_image_id FOREIGN KEY (image_id) REFERENCES core_image(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_instance_node_id_ae899cb7a62df9a_fk_core_node_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_instance
+    ADD CONSTRAINT core_instance_node_id_ae899cb7a62df9a_fk_core_node_id FOREIGN KEY (node_id) REFERENCES core_node(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_instance_parent_id_20ac3a3c727decb4_fk_core_instance_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_instance
+    ADD CONSTRAINT core_instance_parent_id_20ac3a3c727decb4_fk_core_instance_id FOREIGN KEY (parent_id) REFERENCES core_instance(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_instance_slice_id_2ddcfe06a9e4c985_fk_core_slice_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_instance
+    ADD CONSTRAINT core_instance_slice_id_2ddcfe06a9e4c985_fk_core_slice_id FOREIGN KEY (slice_id) REFERENCES core_slice(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_invoice_account_id_7802a49ab0cec433_fk_core_account_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_invoice
+    ADD CONSTRAINT core_invoice_account_id_7802a49ab0cec433_fk_core_account_id FOREIGN KEY (account_id) REFERENCES core_account(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_ne_template_id_7268a8d58aa4008e_fk_core_networktemplate_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_network
+    ADD CONSTRAINT core_ne_template_id_7268a8d58aa4008e_fk_core_networktemplate_id FOREIGN KEY (template_id) REFERENCES core_networktemplate(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_network_owner_id_1b5a720eac1f1d6c_fk_core_slice_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_network
+    ADD CONSTRAINT core_network_owner_id_1b5a720eac1f1d6c_fk_core_slice_id FOREIGN KEY (owner_id) REFERENCES core_slice(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_network_perm_network_id_79f8a18a0197dd1_fk_core_network_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_network_permitted_slices
+    ADD CONSTRAINT core_network_perm_network_id_79f8a18a0197dd1_fk_core_network_id FOREIGN KEY (network_id) REFERENCES core_network(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_network_permitt_slice_id_7d7e6e1a0b962f45_fk_core_slice_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_network_permitted_slices
+    ADD CONSTRAINT core_network_permitt_slice_id_7d7e6e1a0b962f45_fk_core_slice_id FOREIGN KEY (slice_id) REFERENCES core_slice(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_networkslic_network_id_2823f40a154bc2e6_fk_core_network_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_networkslice
+    ADD CONSTRAINT core_networkslic_network_id_2823f40a154bc2e6_fk_core_network_id FOREIGN KEY (network_id) REFERENCES core_network(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_networkslice_slice_id_801f34a8ab285a0_fk_core_slice_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_networkslice
+    ADD CONSTRAINT core_networkslice_slice_id_801f34a8ab285a0_fk_core_slice_id FOREIGN KEY (slice_id) REFERENCES core_slice(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_node_site_id_28bac05ef1a512ce_fk_core_site_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_node
+    ADD CONSTRAINT core_node_site_id_28bac05ef1a512ce_fk_core_site_id FOREIGN KEY (site_id) REFERENCES core_site(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_nodelab_nodelabel_id_6bbea668080a7ba5_fk_core_nodelabel_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_nodelabel_node
+    ADD CONSTRAINT core_nodelab_nodelabel_id_6bbea668080a7ba5_fk_core_nodelabel_id FOREIGN KEY (nodelabel_id) REFERENCES core_nodelabel(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_nodelabel_node_node_id_b98c651a6265ec0_fk_core_node_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_nodelabel_node
+    ADD CONSTRAINT core_nodelabel_node_node_id_b98c651a6265ec0_fk_core_node_id FOREIGN KEY (node_id) REFERENCES core_node(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_payment_account_id_3cc9ae7e7b925002_fk_core_account_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_payment
+    ADD CONSTRAINT core_payment_account_id_3cc9ae7e7b925002_fk_core_account_id FOREIGN KEY (account_id) REFERENCES core_account(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_port_instance_id_5bdb1ae59ca1dc73_fk_core_instance_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_port
+    ADD CONSTRAINT core_port_instance_id_5bdb1ae59ca1dc73_fk_core_instance_id FOREIGN KEY (instance_id) REFERENCES core_instance(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_port_network_id_655a9dc4ef32f845_fk_core_network_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_port
+    ADD CONSTRAINT core_port_network_id_655a9dc4ef32f845_fk_core_network_id FOREIGN KEY (network_id) REFERENCES core_network(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_program_owner_id_491cb2182952268e_fk_core_user_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_program
+    ADD CONSTRAINT core_program_owner_id_491cb2182952268e_fk_core_user_id FOREIGN KEY (owner_id) REFERENCES core_user(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_re_resource_id_1126f44e743a899d_fk_core_serviceresource_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_reservedresource
+    ADD CONSTRAINT core_re_resource_id_1126f44e743a899d_fk_core_serviceresource_id FOREIGN KEY (resource_id) REFERENCES core_serviceresource(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_reservation_slice_id_4df07726653daed_fk_core_slice_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_reservation
+    ADD CONSTRAINT core_reservation_slice_id_4df07726653daed_fk_core_slice_id FOREIGN KEY (slice_id) REFERENCES core_slice(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_reservedr_instance_id_626caea355f5195e_fk_core_instance_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_reservedresource
+    ADD CONSTRAINT core_reservedr_instance_id_626caea355f5195e_fk_core_instance_id FOREIGN KEY (instance_id) REFERENCES core_instance(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_router_netw_network_id_12bc59c5ca78fdc0_fk_core_network_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_router_networks
+    ADD CONSTRAINT core_router_netw_network_id_12bc59c5ca78fdc0_fk_core_network_id FOREIGN KEY (network_id) REFERENCES core_network(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_router_networ_router_id_3cf4f94bd7970e88_fk_core_router_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_router_networks
+    ADD CONSTRAINT core_router_networ_router_id_3cf4f94bd7970e88_fk_core_router_id FOREIGN KEY (router_id) REFERENCES core_router(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_router_owner_id_13c4ac38c56512c6_fk_core_slice_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_router
+    ADD CONSTRAINT core_router_owner_id_13c4ac38c56512c6_fk_core_slice_id FOREIGN KEY (owner_id) REFERENCES core_slice(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_router_permi_network_id_8ee54284c93cd43_fk_core_network_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY "core_router_permittedNetworks"
+    ADD CONSTRAINT core_router_permi_network_id_8ee54284c93cd43_fk_core_network_id FOREIGN KEY (network_id) REFERENCES core_network(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_router_permit_router_id_3506769cdaf40bb5_fk_core_router_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY "core_router_permittedNetworks"
+    ADD CONSTRAINT core_router_permit_router_id_3506769cdaf40bb5_fk_core_router_id FOREIGN KEY (router_id) REFERENCES core_router(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_s_serviceclass_id_7fa5b55190a88c84_fk_core_serviceclass_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_serviceresource
+    ADD CONSTRAINT core_s_serviceclass_id_7fa5b55190a88c84_fk_core_serviceclass_id FOREIGN KEY ("serviceClass_id") REFERENCES core_serviceclass(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_serviceattr_service_id_5dd88bdc4a289e9e_fk_core_service_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_serviceattribute
+    ADD CONSTRAINT core_serviceattr_service_id_5dd88bdc4a289e9e_fk_core_service_id FOREIGN KEY (service_id) REFERENCES core_service(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_servicepri_role_id_2516e31051d592b9_fk_core_servicerole_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_serviceprivilege
+    ADD CONSTRAINT core_servicepri_role_id_2516e31051d592b9_fk_core_servicerole_id FOREIGN KEY (role_id) REFERENCES core_servicerole(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_servicepriv_service_id_326f2584a82884fb_fk_core_service_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_serviceprivilege
+    ADD CONSTRAINT core_servicepriv_service_id_326f2584a82884fb_fk_core_service_id FOREIGN KEY (service_id) REFERENCES core_service(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_serviceprivilege_user_id_5e78485b5063e04_fk_core_user_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_serviceprivilege
+    ADD CONSTRAINT core_serviceprivilege_user_id_5e78485b5063e04_fk_core_user_id FOREIGN KEY (user_id) REFERENCES core_user(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_sitecredential_site_id_2ede808de256b5ca_fk_core_site_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_sitecredential
+    ADD CONSTRAINT core_sitecredential_site_id_2ede808de256b5ca_fk_core_site_id FOREIGN KEY (site_id) REFERENCES core_site(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_sited_controller_id_30291acda546cff3_fk_core_controller_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_sitedeployment
+    ADD CONSTRAINT core_sited_controller_id_30291acda546cff3_fk_core_controller_id FOREIGN KEY (controller_id) REFERENCES core_controller(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_sited_deployment_id_2073c8bc2ac33aee_fk_core_deployment_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_sitedeployment
+    ADD CONSTRAINT core_sited_deployment_id_2073c8bc2ac33aee_fk_core_deployment_id FOREIGN KEY (deployment_id) REFERENCES core_deployment(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_sitedeployment_site_id_10d760d1d81e2090_fk_core_site_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_sitedeployment
+    ADD CONSTRAINT core_sitedeployment_site_id_10d760d1d81e2090_fk_core_site_id FOREIGN KEY (site_id) REFERENCES core_site(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_siteprivilege_role_id_71e5069ae809cb06_fk_core_siterole_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_siteprivilege
+    ADD CONSTRAINT core_siteprivilege_role_id_71e5069ae809cb06_fk_core_siterole_id FOREIGN KEY (role_id) REFERENCES core_siterole(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_siteprivilege_site_id_33ec92307c1cb3bd_fk_core_site_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_siteprivilege
+    ADD CONSTRAINT core_siteprivilege_site_id_33ec92307c1cb3bd_fk_core_site_id FOREIGN KEY (site_id) REFERENCES core_site(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_siteprivilege_user_id_4a58c40e58eea8c5_fk_core_user_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_siteprivilege
+    ADD CONSTRAINT core_siteprivilege_user_id_4a58c40e58eea8c5_fk_core_user_id FOREIGN KEY (user_id) REFERENCES core_user(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_sl_serviceclass_id_77da7f94b58488b_fk_core_serviceclass_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_slice
+    ADD CONSTRAINT core_sl_serviceclass_id_77da7f94b58488b_fk_core_serviceclass_id FOREIGN KEY ("serviceClass_id") REFERENCES core_serviceclass(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_slice_creator_id_7c5fa82797e0d281_fk_core_user_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_slice
+    ADD CONSTRAINT core_slice_creator_id_7c5fa82797e0d281_fk_core_user_id FOREIGN KEY (creator_id) REFERENCES core_user(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_slice_default_flavor_id_7e9b60d7e92ce276_fk_core_flavor_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_slice
+    ADD CONSTRAINT core_slice_default_flavor_id_7e9b60d7e92ce276_fk_core_flavor_id FOREIGN KEY (default_flavor_id) REFERENCES core_flavor(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_slice_default_image_id_4cc5967fffec96da_fk_core_image_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_slice
+    ADD CONSTRAINT core_slice_default_image_id_4cc5967fffec96da_fk_core_image_id FOREIGN KEY (default_image_id) REFERENCES core_image(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_slice_service_id_56ec7a0b3401bf7c_fk_core_service_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_slice
+    ADD CONSTRAINT core_slice_service_id_56ec7a0b3401bf7c_fk_core_service_id FOREIGN KEY (service_id) REFERENCES core_service(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_slice_site_id_13fe089488dd45_fk_core_site_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_slice
+    ADD CONSTRAINT core_slice_site_id_13fe089488dd45_fk_core_site_id FOREIGN KEY (site_id) REFERENCES core_site(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_slicecredential_slice_id_1c79ffce7dd61f3c_fk_core_slice_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_slicecredential
+    ADD CONSTRAINT core_slicecredential_slice_id_1c79ffce7dd61f3c_fk_core_slice_id FOREIGN KEY (slice_id) REFERENCES core_slice(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_sliceprivile_role_id_1d55e0b0ac43107a_fk_core_slicerole_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_sliceprivilege
+    ADD CONSTRAINT core_sliceprivile_role_id_1d55e0b0ac43107a_fk_core_slicerole_id FOREIGN KEY (role_id) REFERENCES core_slicerole(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_sliceprivilege_slice_id_3fbaadbffeb24835_fk_core_slice_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_sliceprivilege
+    ADD CONSTRAINT core_sliceprivilege_slice_id_3fbaadbffeb24835_fk_core_slice_id FOREIGN KEY (slice_id) REFERENCES core_slice(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_sliceprivilege_user_id_253eeb2ddef0e745_fk_core_user_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_sliceprivilege
+    ADD CONSTRAINT core_sliceprivilege_user_id_253eeb2ddef0e745_fk_core_user_id FOREIGN KEY (user_id) REFERENCES core_user(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_slicetag_slice_id_75dfa2524457256_fk_core_slice_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_slicetag
+    ADD CONSTRAINT core_slicetag_slice_id_75dfa2524457256_fk_core_slice_id FOREIGN KEY (slice_id) REFERENCES core_slice(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_tag_service_id_5e53fc9f784e1c0_fk_core_service_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_tag
+    ADD CONSTRAINT core_tag_service_id_5e53fc9f784e1c0_fk_core_service_id FOREIGN KEY (service_id) REFERENCES core_service(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_te_provider_service_id_6f2ead723387396a_fk_core_service_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_tenant
+    ADD CONSTRAINT core_te_provider_service_id_6f2ead723387396a_fk_core_service_id FOREIGN KEY (provider_service_id) REFERENCES core_service(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_te_subscriber_tenant_id_5c45dc20d190aa0f_fk_core_tenant_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_tenant
+    ADD CONSTRAINT core_te_subscriber_tenant_id_5c45dc20d190aa0f_fk_core_tenant_id FOREIGN KEY (subscriber_tenant_id) REFERENCES core_tenant(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_tena_tenant_root_id_27d6362f903728d9_fk_core_tenantroot_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_tenantrootprivilege
+    ADD CONSTRAINT core_tena_tenant_root_id_27d6362f903728d9_fk_core_tenantroot_id FOREIGN KEY (tenant_root_id) REFERENCES core_tenantroot(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_tenant_subscriber_user_id_2fad15bb074ed3d6_fk_core_user_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_tenant
+    ADD CONSTRAINT core_tenant_subscriber_user_id_2fad15bb074ed3d6_fk_core_user_id FOREIGN KEY (subscriber_user_id) REFERENCES core_user(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_tenantattribut_tenant_id_aef1dc094229bec_fk_core_tenant_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_tenantattribute
+    ADD CONSTRAINT core_tenantattribut_tenant_id_aef1dc094229bec_fk_core_tenant_id FOREIGN KEY (tenant_id) REFERENCES core_tenant(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_tenantro_role_id_56bfa65de5fb299_fk_core_tenantrootrole_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_tenantrootprivilege
+    ADD CONSTRAINT core_tenantro_role_id_56bfa65de5fb299_fk_core_tenantrootrole_id FOREIGN KEY (role_id) REFERENCES core_tenantrootrole(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_tenantrootprivile_user_id_77f85e71ff279b56_fk_core_user_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_tenantrootprivilege
+    ADD CONSTRAINT core_tenantrootprivile_user_id_77f85e71ff279b56_fk_core_user_id FOREIGN KEY (user_id) REFERENCES core_user(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_user_site_id_3cc7d076f7b58a7_fk_core_site_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_user
+    ADD CONSTRAINT core_user_site_id_3cc7d076f7b58a7_fk_core_site_id FOREIGN KEY (site_id) REFERENCES core_site(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_usercredential_user_id_2db1046eae94c01a_fk_core_user_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_usercredential
+    ADD CONSTRAINT core_usercredential_user_id_2db1046eae94c01a_fk_core_user_id FOREIGN KEY (user_id) REFERENCES core_user(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: core_userdashboardview_user_id_66fac29b72c1b321_fk_core_user_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY core_userdashboardview
+    ADD CONSTRAINT core_userdashboardview_user_id_66fac29b72c1b321_fk_core_user_id FOREIGN KEY (user_id) REFERENCES core_user(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: d9aeae61481f9ccd18f57c7b51a38461; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_sitemap
+    ADD CONSTRAINT d9aeae61481f9ccd18f57c7b51a38461 FOREIGN KEY ("hpcService_id") REFERENCES hpc_hpcservice(service_ptr_id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: defaultoriginserver_id_3cb657d79e69f1e9_fk_hpc_originserver_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_cdnprefix
+    ADD CONSTRAINT defaultoriginserver_id_3cb657d79e69f1e9_fk_hpc_originserver_id FOREIGN KEY ("defaultOriginServer_id") REFERENCES hpc_originserver(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: djan_content_type_id_697914295151027a_fk_django_content_type_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY django_admin_log
+    ADD CONSTRAINT djan_content_type_id_697914295151027a_fk_django_content_type_id FOREIGN KEY (content_type_id) REFERENCES django_content_type(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: django_admin_log_user_id_52fdd58701c5f563_fk_core_user_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY django_admin_log
+    ADD CONSTRAINT django_admin_log_user_id_52fdd58701c5f563_fk_core_user_id FOREIGN KEY (user_id) REFERENCES core_user(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: ea3ce8ae9fc3a320680647cef82b1a56; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_serviceprovider
+    ADD CONSTRAINT ea3ce8ae9fc3a320680647cef82b1a56 FOREIGN KEY ("hpcService_id") REFERENCES hpc_hpcservice(service_ptr_id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: h_contentprovider_id_1420a46480bb1aff_fk_hpc_contentprovider_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_contentprovider_users
+    ADD CONSTRAINT h_contentprovider_id_1420a46480bb1aff_fk_hpc_contentprovider_id FOREIGN KEY (contentprovider_id) REFERENCES hpc_contentprovider(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: h_contentprovider_id_2f27d5fdbb2459c8_fk_hpc_contentprovider_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_originserver
+    ADD CONSTRAINT h_contentprovider_id_2f27d5fdbb2459c8_fk_hpc_contentprovider_id FOREIGN KEY ("contentProvider_id") REFERENCES hpc_contentprovider(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: h_contentprovider_id_63639a8e6ca8e2cd_fk_hpc_contentprovider_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_cdnprefix
+    ADD CONSTRAINT h_contentprovider_id_63639a8e6ca8e2cd_fk_hpc_contentprovider_id FOREIGN KEY ("contentProvider_id") REFERENCES hpc_contentprovider(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: h_contentprovider_id_7acf72f284b3b30e_fk_hpc_contentprovider_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_accessmap
+    ADD CONSTRAINT h_contentprovider_id_7acf72f284b3b30e_fk_hpc_contentprovider_id FOREIGN KEY ("contentProvider_id") REFERENCES hpc_contentprovider(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: h_serviceprovider_id_1b9fb41a73ac1b6a_fk_hpc_serviceprovider_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_contentprovider
+    ADD CONSTRAINT h_serviceprovider_id_1b9fb41a73ac1b6a_fk_hpc_serviceprovider_id FOREIGN KEY ("serviceProvider_id") REFERENCES hpc_serviceprovider(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: h_serviceprovider_id_788bfbe86c90f205_fk_hpc_serviceprovider_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_sitemap
+    ADD CONSTRAINT h_serviceprovider_id_788bfbe86c90f205_fk_hpc_serviceprovider_id FOREIGN KEY ("serviceProvider_id") REFERENCES hpc_serviceprovider(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: hp_contentprovider_id_2a37a8e8bee9c03_fk_hpc_contentprovider_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_sitemap
+    ADD CONSTRAINT hp_contentprovider_id_2a37a8e8bee9c03_fk_hpc_contentprovider_id FOREIGN KEY ("contentProvider_id") REFERENCES hpc_contentprovider(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: hpc_contentprovider_us_user_id_480a7cd783fecf37_fk_core_user_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_contentprovider_users
+    ADD CONSTRAINT hpc_contentprovider_us_user_id_480a7cd783fecf37_fk_core_user_id FOREIGN KEY (user_id) REFERENCES core_user(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: hpc_hpcservi_service_ptr_id_1b2f328c77b1554d_fk_core_service_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_hpcservice
+    ADD CONSTRAINT hpc_hpcservi_service_ptr_id_1b2f328c77b1554d_fk_core_service_id FOREIGN KEY (service_ptr_id) REFERENCES core_service(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: hpc_sitemap_cdnprefix_id_3c0b2f75c5a9a81e_fk_hpc_cdnprefix_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY hpc_sitemap
+    ADD CONSTRAINT hpc_sitemap_cdnprefix_id_3c0b2f75c5a9a81e_fk_hpc_cdnprefix_id FOREIGN KEY ("cdnPrefix_id") REFERENCES hpc_cdnprefix(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: requestroute_service_ptr_id_479451a78740d081_fk_core_service_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY requestrouter_requestrouterservice
+    ADD CONSTRAINT requestroute_service_ptr_id_479451a78740d081_fk_core_service_id FOREIGN KEY (service_ptr_id) REFERENCES core_service(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: requestrouter_serv_owner_id_5c71a9586041d2bc_fk_core_service_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY requestrouter_servicemap
+    ADD CONSTRAINT requestrouter_serv_owner_id_5c71a9586041d2bc_fk_core_service_id FOREIGN KEY (owner_id) REFERENCES core_service(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: requestrouter_servic_slice_id_50e57057a561f22f_fk_core_slice_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY requestrouter_servicemap
+    ADD CONSTRAINT requestrouter_servic_slice_id_50e57057a561f22f_fk_core_slice_id FOREIGN KEY (slice_id) REFERENCES core_slice(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: sy_volume_id_id_7dd16c76bfd7b129_fk_syndicate_storage_volume_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY syndicate_storage_volumeslice
+    ADD CONSTRAINT sy_volume_id_id_7dd16c76bfd7b129_fk_syndicate_storage_volume_id FOREIGN KEY (volume_id_id) REFERENCES syndicate_storage_volume(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: syndi_volume_id_3718f5b02d2245ce_fk_syndicate_storage_volume_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY syndicate_storage_volumeaccessright
+    ADD CONSTRAINT syndi_volume_id_3718f5b02d2245ce_fk_syndicate_storage_volume_id FOREIGN KEY (volume_id) REFERENCES syndicate_storage_volume(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: syndicate_st_service_ptr_id_26ca3aeabed50b6d_fk_core_service_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY syndicate_storage_syndicateservice
+    ADD CONSTRAINT syndicate_st_service_ptr_id_26ca3aeabed50b6d_fk_core_service_id FOREIGN KEY (service_ptr_id) REFERENCES core_service(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: syndicate_storage__owner_id_id_3d3e3d492d6cd6b5_fk_core_user_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY syndicate_storage_volumeaccessright
+    ADD CONSTRAINT syndicate_storage__owner_id_id_3d3e3d492d6cd6b5_fk_core_user_id FOREIGN KEY (owner_id_id) REFERENCES core_user(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: syndicate_storage__owner_id_id_7a99f36bf51f2c78_fk_core_user_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY syndicate_storage_volume
+    ADD CONSTRAINT syndicate_storage__owner_id_id_7a99f36bf51f2c78_fk_core_user_id FOREIGN KEY (owner_id_id) REFERENCES core_user(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: syndicate_storage_slice_id_id_1c80c36535559ad6_fk_core_slice_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY syndicate_storage_slicesecret
+    ADD CONSTRAINT syndicate_storage_slice_id_id_1c80c36535559ad6_fk_core_slice_id FOREIGN KEY (slice_id_id) REFERENCES core_slice(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: syndicate_storage_slice_id_id_36fa39a9ae458538_fk_core_slice_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY syndicate_storage_volumeslice
+    ADD CONSTRAINT syndicate_storage_slice_id_id_36fa39a9ae458538_fk_core_slice_id FOREIGN KEY (slice_id_id) REFERENCES core_slice(id) DEFERRABLE INITIALLY DEFERRED;
+
+
+--
+-- Name: public; Type: ACL; Schema: -; Owner: postgres
+--
+
+REVOKE ALL ON SCHEMA public FROM PUBLIC;
+REVOKE ALL ON SCHEMA public FROM postgres;
+GRANT ALL ON SCHEMA public TO postgres;
+GRANT ALL ON SCHEMA public TO PUBLIC;
+
+
+--
+-- PostgreSQL database dump complete
+--
+
diff --git a/frontend/xos.yaml b/frontend/xos.yaml
new file mode 100644
index 0000000..caa4b86
--- /dev/null
+++ b/frontend/xos.yaml
@@ -0,0 +1,47 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Onboard the exampleservice
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    xos:
+      type: tosca.nodes.XOS
+      properties:
+        ui_port: 9999
+        bootstrap_ui_port: 9998
+        docker_project_name: frontend
+        db_container_name: frontendbs_xos_db_1
+        frontend_only: true
+
+    /opt/xos/xos_configuration/xos_common_config:
+      type: tosca.nodes.XOSVolume
+      properties:
+          host_path: { path_join: [ SELF, CONFIG_DIR, ../common/xos_common_config, ENV_VAR ] }
+          read_only: false
+      requirements:
+          - xos:
+             node: xos
+             relationship: tosca.relationships.UsedByXOS
+
+    /opt/xos/xos_configuration/xos_vtn_config:
+      type: tosca.nodes.XOSVolume
+      properties:
+          host_path: { path_join: [ SELF, CONFIG_DIR, ../cord-pod/files/xos_vtn_config, ENV_VAR ] }
+          read_only: true
+      requirements:
+          - xos:
+              node: xos
+              relationship: tosca.relationships.UsedByXOS
+
+    /opt/xos/core/xoslib/static:
+      type: tosca.nodes.XOSVolume
+      properties:
+          host_path: { path_join: [ SELF, CONFIG_DIR, ../../core/xoslib/static/, ENV_VAR ] }
+          read_only: false
+      requirements:
+          - xos:
+              node: xos
+              relationship: tosca.relationships.UsedByXOS
\ No newline at end of file
diff --git a/opencloud/Makefile b/opencloud/Makefile
new file mode 100644
index 0000000..03168ed
--- /dev/null
+++ b/opencloud/Makefile
@@ -0,0 +1,36 @@
+xos:
+	sudo docker-compose up -d
+	bash ./wait_for_xos.sh
+	sudo docker-compose run xos python /opt/xos/tosca/run.py none /opt/xos/configurations/common/fixtures.yaml
+	sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/opencloud/opencloud.yaml
+
+containers:
+	cd ../../../containers/xos; make devel
+	cd ../../../containers/synchronizer; make
+
+stop:
+	sudo docker-compose stop
+
+showlogs:
+	sudo docker-compose logs
+
+rm: stop
+	sudo docker-compose rm
+
+ps:
+	sudo docker-compose ps
+
+enter-xos:
+	sudo docker exec -it devel_xos_1 bash
+
+enter-synchronizer:
+	sudo docker exec -it devel_xos_synchronizer_openstack_1 bash
+
+upgrade_pkgs:
+	sudo pip install httpie --upgrade
+
+rebuild_xos:
+	make -C ../../../containers/xos devel
+
+rebuild_synchronizer:
+	make -C ../../../containers/synchronizer
diff --git a/opencloud/README.md b/opencloud/README.md
new file mode 100644
index 0000000..97449ed
--- /dev/null
+++ b/opencloud/README.md
@@ -0,0 +1,24 @@
+# XOS OpenCloud Portal
+
+This configuration can be used to bring up XOS on the OpenCloud portal.  It launches
+XOS in three Docker containers (development GUI, Synchronizer, database) and configures XOS
+with the `opencloud.yaml` TOSCA file in this directory.  *docker-compose* is used to manage
+the containers.
+
+## Docker Helpers
+
+Stop the containers: `make stop`
+
+Restart the containers: `make stop; make`
+
+Delete the containers and relaunch them: `make rm; make`
+
+Build the containers from scratch using the local XOS source tree: `make containers`
+
+View logs: `make showlogs`
+
+See what containers are running: `make ps`
+
+Open a shell on the XOS container: `make enter-xos`
+
+Open a shell on the Synchronizer container: `make enter-synchronizer`
diff --git a/opencloud/cdn-content.yaml b/opencloud/cdn-content.yaml
new file mode 100644
index 0000000..ebf6b82
--- /dev/null
+++ b/opencloud/cdn-content.yaml
@@ -0,0 +1,223 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Template for deploying a single server with predefined properties.
+
+imports:
+   - custom_types/xos.yaml
+   - custom_types/cdn.yaml
+
+topology_template:
+  node_templates:
+    HyperCache:
+      type: tosca.nodes.CDNService
+      properties:
+          # HyperCache service must already exist before running this recipe
+          no-create: true
+          no-delete: true
+          no-update: true
+
+    # Setup the CDN Service Provider
+
+    main_service_provider:
+        type: tosca.nodes.ServiceProvider
+        requirements:
+           - hpc_service:
+                 node: HyperCache
+                 relationship: tosca.relationships.MemberOfService
+
+    # Wall Street Journal Content Provider
+
+    wsj_content:
+        type: tosca.nodes.ContentProvider
+        requirements:
+            - service_provider:
+                  node: main_service_provider
+                  relationship: tosca.relationships.MemberOfServiceProvider
+
+    www.wsj.com:
+        type: tosca.nodes.CDNPrefix
+        requirements:
+             - content_provider:
+                   node: wsj_content
+                   relationship: tosca.relationships.MemberOfContentProvider
+             - default_origin_server:
+                   node: http_www.wsj.com
+                   relationship: tosca.relationships.DefaultOriginServer
+
+    si.wsj.net:
+        type: tosca.nodes.CDNPrefix
+        requirements:
+             - content_provider:
+                   node: wsj_content
+                   relationship: tosca.relationships.MemberOfContentProvider
+             - default_origin_server:
+                   node: http_si.wsj.net
+                   relationship: tosca.relationships.DefaultOriginServer
+
+    s.wsj.net:
+        type: tosca.nodes.CDNPrefix
+        requirements:
+             - content_provider:
+                   node: wsj_content
+                   relationship: tosca.relationships.MemberOfContentProvider
+             - default_origin_server:
+                   node: http_s.wsj.net
+                   relationship: tosca.relationships.DefaultOriginServer
+
+    ore.wsj.net:
+        type: tosca.nodes.CDNPrefix
+        requirements:
+             - content_provider:
+                   node: wsj_content
+                   relationship: tosca.relationships.MemberOfContentProvider
+             - default_origin_server:
+                   node: http_ore.wsj.net
+                   relationship: tosca.relationships.DefaultOriginServer
+
+    http_www.wsj.com:
+        type: tosca.nodes.OriginServer
+        requirements:
+             - content_provider:
+                   node: wsj_content
+                   relationship: tosca.relationships.MemberOfContentProvider
+
+    http_si.wsj.net:
+        type: tosca.nodes.OriginServer
+        requirements:
+             - content_provider:
+                   node: wsj_content
+                   relationship: tosca.relationships.MemberOfContentProvider
+
+    http_s.wsj.net:
+        type: tosca.nodes.OriginServer
+        requirements:
+             - content_provider:
+                   node: wsj_content
+                   relationship: tosca.relationships.MemberOfContentProvider
+
+    http_ore.wsj.net:
+        type: tosca.nodes.OriginServer
+        requirements:
+             - content_provider:
+                   node: wsj_content
+                   relationship: tosca.relationships.MemberOfContentProvider
+
+    # ON.Lab content provider
+
+    on_lab_content:
+        type: tosca.nodes.ContentProvider
+        requirements:
+            - service_provider:
+                  node: main_service_provider
+                  relationship: tosca.relationships.MemberOfServiceProvider
+
+    # Create CDN prefix onlab.vicci.org
+    onlab.vicci.org:

+        type: tosca.nodes.CDNPrefix

+        requirements:

+             - content_provider:

+                   node: on_lab_content

+                   relationship: tosca.relationships.MemberOfContentProvider
+
+    http_onos-videos.s3.amazonaws.com:
+        type: tosca.nodes.OriginServer

+        requirements:

+             - content_provider:

+                   node: on_lab_content

+                   relationship: tosca.relationships.MemberOfContentProvider
+
+    # Create origin server s3-us-west-1.amazonaws.com
+    http_s3-us-west-1.amazonaws.com:

+        type: tosca.nodes.OriginServer

+        requirements:

+             - content_provider:

+                   node: on_lab_content

+                   relationship: tosca.relationships.MemberOfContentProvider

+

+    # Create origin server s3.amazonaws.com

+    http_s3.amazonaws.com:

+        type: tosca.nodes.OriginServer

+        requirements:

+             - content_provider:

+                   node: on_lab_content

+                   relationship: tosca.relationships.MemberOfContentProvider
+
+    # Test Content Provider
+
+    testcp2:
+        type: tosca.nodes.ContentProvider

+        requirements:

+            - service_provider:

+                  node: main_service_provider

+                  relationship: tosca.relationships.MemberOfServiceProvider
+
+    http_www.cs.arizona.edu:
+        type: tosca.nodes.OriginServer

+        requirements:

+             - content_provider:

+                   node: testcp2

+                   relationship: tosca.relationships.MemberOfContentProvider
+
+    test-cdn.opencloud.us:
+        type: tosca.nodes.CDNPrefix

+        requirements:

+             - content_provider:

+                   node: testcp2

+                   relationship: tosca.relationships.MemberOfContentProvider

+

+             - default_origin_server:

+                   node: http_www.cs.arizona.edu

+                   relationship: tosca.relationships.DefaultOriginServer
+
+    # Health Checks
+
+    healthcheck_dns_onlab.vicci.org:
+        type: tosca.nodes.HpcHealthCheck
+        requirements:
+           - hpc_service:
+                 node: HyperCache
+                 relationship: tosca.relationships.MemberOfService
+        properties:
+           kind: dns
+           resource_name: onlab.vicci.org
+
+    healthcheck_dns_test-cdn.opencloud.us:
+        type: tosca.nodes.HpcHealthCheck
+        requirements:
+           - hpc_service:
+                 node: HyperCache
+                 relationship: tosca.relationships.MemberOfService
+        properties:
+           kind: dns
+           resource_name: test-cdn.opencloud.us
+
+    healthcheck_http_test-cdn-index:
+        type: tosca.nodes.HpcHealthCheck
+        requirements:
+           - hpc_service:
+                 node: HyperCache
+                 relationship: tosca.relationships.MemberOfService
+        properties:
+           kind: http
+           resource_name: test-cdn.opencloud.us:/
+           result_contains: Lowenthal
+
+    healthcheck_http_onlab_onos_image:
+        type: tosca.nodes.HpcHealthCheck
+        requirements:
+           - hpc_service:
+                 node: HyperCache
+                 relationship: tosca.relationships.MemberOfService
+        properties:
+           kind: http
+           resource_name: onlab.vicci.org:/onos/vm/onos-tutorial-1.1.0r220-ovf.zip
+
+    healthcheck_http_onlab_mininet_image:
+        type: tosca.nodes.HpcHealthCheck
+        requirements:
+           - hpc_service:
+                 node: HyperCache
+                 relationship: tosca.relationships.MemberOfService
+        properties:
+           kind: http
+           resource_name: onlab.vicci.org:/mininet-vm/mininet-2.1.0-130919-ubuntu-13.04-server-amd64-ovf.zip
diff --git a/opencloud/cdn-opencloud.yaml b/opencloud/cdn-opencloud.yaml
new file mode 100644
index 0000000..f66e0f2
--- /dev/null
+++ b/opencloud/cdn-opencloud.yaml
@@ -0,0 +1,77 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Template for deploying a single server with predefined properties.
+
+imports:
+   - custom_types/xos.yaml
+   - custom_types/cdn.yaml
+
+topology_template:
+  node_templates:
+    HyperCache:
+      type: tosca.nodes.CDNService
+      description: Content Delivery Network
+      properties:
+          view_url: /admin/hpc/hpcservice/$id$/
+          icon_url: /static/primarycons_blue/network.png
+
+    onlab:
+      type: tosca.nodes.Site
+      properties:
+          # Assume the onlab site exists, and don't touch it
+          no-create: true
+          no-update: true
+          no-delete: true
+
+    onlab_cmi:
+      description: CMI Slice
+      type: tosca.nodes.Slice
+      properties:
+          exposed_ports: tcp 8003, tcp 8004, tcp 8140
+      requirements:
+          - cdn_service:
+              node: HyperCache
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: onlab
+              relationship: tosca.relationships.MemberOfSite
+
+    onlab_hpc:
+      description: HyperCache Slice
+      type: tosca.nodes.Slice
+      properties:
+          exposed_ports: tcp 2120:2128, tcp 3200:3209, tcp 8006, tcp 8009, tcp 8015, tcp 80
+      requirements:
+          - cdn_service:
+              node: HyperCache
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: onlab
+              relationship: tosca.relationships.MemberOfSite
+
+    onlab_dnsredir:
+      description: HyperCache Slice
+      type: tosca.nodes.Slice
+      properties:
+          exposed_ports: udp 53541, tcp 8016
+      requirements:
+          - cdn_service:
+              node: HyperCache
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: onlab
+              relationship: tosca.relationships.MemberOfSite
+
+    onlab_dnsdemux:
+      description: HyperCache Slice
+      type: tosca.nodes.Slice
+      properties:
+          exposed_ports: udp 53, tcp 8017
+      requirements:
+          - cdn_service:
+              node: HyperCache
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: onlab
+              relationship: tosca.relationships.MemberOfSite
+
diff --git a/opencloud/cdn-syndicate-content.yaml b/opencloud/cdn-syndicate-content.yaml
new file mode 100644
index 0000000..06dc623
--- /dev/null
+++ b/opencloud/cdn-syndicate-content.yaml
@@ -0,0 +1,52 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Template for deploying a single server with predefined properties.
+
+imports:
+   - custom_types/xos.yaml
+   - custom_types/cdn.yaml
+
+topology_template:
+  node_templates:
+    HyperCache:
+      type: tosca.nodes.CDNService
+      properties:
+          # HyperCache service must already exist before running this recipe
+          no-create: true
+          no-delete: true
+          no-update: true
+
+    # Setup the Syndicate Devel Service Provider
+
+    main_service_provider:
+        type: tosca.nodes.ServiceProvider
+        requirements:
+           - hpc_service:
+                 node: HyperCache
+                 relationship: tosca.relationships.MemberOfService
+
+    syndicate_devel:
+        type: tosca.nodes.ContentProvider

+        requirements:

+            - service_provider:

+                  node: main_service_provider

+                  relationship: tosca.relationships.MemberOfServiceProvider
+
+#    http_node2.cs.arizona.edu:
+#        type: tosca.nodes.OriginServer

+#        requirements:

+#             - content_provider:

+#                   node: syndicate_devel

+#                   relationship: tosca.relationships.MemberOfContentProvider
+
+    syndicate-devel.opencloud.us:
+        type: tosca.nodes.CDNPrefix

+        requirements:

+             - content_provider:

+                   node: syndicate_devel

+                   relationship: tosca.relationships.MemberOfContentProvider

+

+#             - default_origin_server:

+#                   node: http_node2.cs.arizona.edu

+#                   relationship: tosca.relationships.DefaultOriginServer
+
diff --git a/opencloud/docker-compose.yml b/opencloud/docker-compose.yml
new file mode 100644
index 0000000..8e4445a
--- /dev/null
+++ b/opencloud/docker-compose.yml
@@ -0,0 +1,73 @@
+xos_db:
+    image: xosproject/xos-postgres
+    expose:
+        - "5432"
+
+xos_synchronizer_openstack:
+    image: xosproject/xos-synchronizer-openstack
+    command: bash -c "update-ca-certificates; sleep 120; python /opt/xos/synchronizers/openstack/xos-synchronizer.py"
+    #command: sleep 86400
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: openstack
+    links:
+        - xos_db
+    volumes:
+        - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config:ro
+        - /usr/local/share/ca-certificates:/usr/local/share/ca-certificates:ro
+        - ./files/xos_opencloud_config:/opt/xos/xos_configuration/xos_opencloud_config:ro
+        - ./images:/opt/xos/images:ro
+    log_driver: "json-file"
+    log_opt:
+            max-size: "100k"
+            max-file: "5"
+
+xos_synchronizer_hpc:
+    image: xosproject/xos-synchronizer-openstack
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/hpc/hpc-synchronizer.py -C /opt/xos/synchronizers/hpc/hpc_synchronizer_config"
+    #command: sleep 86400
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: hpc
+    links:
+        - xos_db
+    volumes:
+        - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config:ro
+        - /usr/local/share/ca-certificates:/usr/local/share/ca-certificates:ro
+        - ./files/xos_opencloud_config:/opt/xos/xos_configuration/xos_opencloud_config:ro
+    log_driver: "json-file"
+    log_opt:
+            max-size: "100k"
+            max-file: "5"
+
+xos_watcher_hpc:
+    image: xosproject/xos-synchronizer-openstack
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/hpc/hpc_watcher.py"
+    #command: sleep 86400
+    labels:
+        org.xosproject.kind: watcher
+        org.xosproject.target: hpc
+    links:
+        - xos_db
+    volumes:
+        - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config:ro
+        - /usr/local/share/ca-certificates:/usr/local/share/ca-certificates:ro
+        - ./files/xos_opencloud_config:/opt/xos/xos_configuration/xos_opencloud_config:ro
+
+# FUTURE
+#xos_swarm_synchronizer:
+#    image: xosproject/xos-swarm-synchronizer
+#    labels:
+#        org.xosproject.kind: synchronizer
+#        org.xosproject.target: swarm
+
+xos:
+    image: xosproject/xos
+    command: python /opt/xos/manage.py runserver 0.0.0.0:8000 --insecure --makemigrations
+    ports:
+        - "80:8000"
+    links:
+        - xos_db
+    volumes:
+      - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config:ro
+      - ./files/xos_opencloud_config:/opt/xos/xos_configuration/xos_opencloud_config:ro
diff --git a/opencloud/files/xos_opencloud_config b/opencloud/files/xos_opencloud_config
new file mode 100644
index 0000000..62291b6
--- /dev/null
+++ b/opencloud/files/xos_opencloud_config
@@ -0,0 +1,3 @@
+[server]
+restapi_hostname=portal.opencloud.us
+restapi_port=80
diff --git a/opencloud/opencloud.yaml b/opencloud/opencloud.yaml
new file mode 100644
index 0000000..c71369a
--- /dev/null
+++ b/opencloud/opencloud.yaml
@@ -0,0 +1,1661 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Adds OpenCloud Sites, Deployments, and Controllers.
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+
+# Nodes
+    trusty-server-multi-nic:
+      type: tosca.nodes.Image
+      properties:
+         disk_format: QCOW2
+         container_format: BARE
+
+# network template
+    public-shared-ipv4:
+      type: tosca.nodes.NetworkTemplate
+      properties:
+          visibility: private
+          translation: NAT
+          shared_network_name: nat-net
+
+# deokoyments
+    backbone:
+      type: tosca.nodes.Deployment
+      properties:
+          flavors: m1.large, m1.medium, m1.small
+      requirements:
+          - image:
+              node: trusty-server-multi-nic
+              relationship: tosca.relationships.SupportsImage
+
+    campus:
+      type: tosca.nodes.Deployment
+      properties:
+          flavors: m1.large, m1.medium, m1.small
+      requirements:
+          - image:
+              node: trusty-server-multi-nic
+              relationship: tosca.relationships.SupportsImage
+
+    ec2:
+      type: tosca.nodes.Deployment
+      properties:
+          flavors: m1.large, m1.medium, m1.small
+      requirements:
+          - image:
+              node: trusty-server-multi-nic
+              relationship: tosca.relationships.SupportsImage
+
+# controllers
+    backbone-atl:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: backbone
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Juno
+          auth_url: http://localhost:35357/v2.0/
+          admin_user: admin
+          admin_password: admin
+          admin_tenant: admin
+          domain: Default
+
+    backbone-chi:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: backbone
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Juno
+          auth_url: http://localhost:35357/v2.0/
+          admin_user: admin
+          admin_password: admin
+          admin_tenant: admin
+          domain: Default
+
+    backbone-hou:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: backbone
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Juno
+          auth_url: http://localhost:35357/v2.0/
+          admin_user: admin
+          admin_password: admin
+          admin_tenant: admin
+          domain: Default
+
+    backbone-kan:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: backbone
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Juno
+          auth_url: http://localhost:35357/v2.0/
+          admin_user: admin
+          admin_password: admin
+          admin_tenant: admin
+          domain: Default
+
+    backbone-lax:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: backbone
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Juno
+          auth_url: http://localhost:35357/v2.0/
+          admin_user: admin
+          admin_password: admin
+          admin_tenant: admin
+          domain: Default
+
+    backbone-nyc:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: backbone
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Juno
+          auth_url: http://localhost:35357/v2.0/
+          admin_user: admin
+          admin_password: admin
+          admin_tenant: admin
+          domain: Default
+
+    backbone-slc:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: backbone
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Juno
+          auth_url: http://localhost:35357/v2.0/
+          admin_user: admin
+          admin_password: admin
+          admin_tenant: admin
+          domain: Default
+
+    backbone-sea:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: backbone
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Juno
+          auth_url: http://localhost:35357/v2.0/
+          admin_user: admin
+          admin_password: admin
+          admin_tenant: admin
+          domain: Default
+
+    backbone-sng:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: backbone
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Juno
+          auth_url: http://localhost:35357/v2.0/
+          admin_user: admin
+          admin_password: admin
+          admin_tenant: admin
+          domain: Default
+
+    backbone-wdc:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: backbone
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Juno
+          auth_url: http://localhost:35357/v2.0/
+          admin_user: admin
+          admin_password: admin
+          admin_tenant: admin
+          domain: Default
+
+    campus-arizona:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: campus
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Juno
+          auth_url: http://localhost:35357/v2.0/
+          admin_user: admin
+          admin_password: admin
+          admin_tenant: admin
+          domain: Default
+
+    campus-princeton:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: campus
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Juno
+          auth_url: http://localhost:35357/v2.0/
+          admin_user: admin
+          admin_password: admin
+          admin_tenant: admin
+          domain: Default
+
+    campus-stanford:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: campus
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Juno
+          auth_url: http://localhost:35357/v2.0/
+          admin_user: admin
+          admin_password: admin
+          admin_tenant: admin
+          domain: Default
+
+    campus-washington:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: campus
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Juno
+          auth_url: http://localhost:35357/v2.0/
+          admin_user: admin
+          admin_password: admin
+          admin_tenant: admin
+          domain: Default
+
+    campus-gatech:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: campus
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Juno
+          auth_url: http://localhost:35357/v2.0/
+          admin_user: admin
+          admin_password: admin
+          admin_tenant: admin
+          domain: Default
+
+    campus-internet2:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: campus
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Juno
+          auth_url: http://localhost:35357/v2.0/
+          admin_user: admin
+          admin_password: admin
+          admin_tenant: admin
+          domain: Default
+
+    campus-singapore:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: campus
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Juno
+          auth_url: http://localhost:35357/v2.0/
+          admin_user: admin
+          admin_password: admin
+          admin_tenant: admin
+          domain: Default
+
+    campus-onlab:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: campus
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Juno
+          auth_url: http://localhost:35357/v2.0/
+          admin_user: admin
+          admin_password: admin
+          admin_tenant: admin
+          domain: Default
+
+# Sites
+
+    i2-atl:
+      type: tosca.nodes.Site
+      properties:
+          display_name: I2-atl
+          site_url: http://opencloud.us/
+          hosts_nodes: false 
+      requirements:
+          - deployment:
+               node: backbone
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: backbone-atl
+                       relationship: tosca.relationships.UsesController
+
+    i2-chi:
+      type: tosca.nodes.Site
+      properties:
+          display_name: I2-chi
+          site_url: http://opencloud.us/
+          hosts_nodes: false 
+      requirements:
+          - deployment:
+               node: backbone
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: backbone-chi
+                       relationship: tosca.relationships.UsesController
+
+    i2-hou:
+      type: tosca.nodes.Site
+      properties:
+          display_name: I2-hou
+          site_url: http://opencloud.us/
+          hosts_nodes: false 
+      requirements:
+          - deployment:
+               node: backbone
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: backbone-hou
+                       relationship: tosca.relationships.UsesController
+   
+    i2-kas:
+      type: tosca.nodes.Site
+      properties:
+          display_name: I2-kas
+          site_url: http://opencloud.us/
+          hosts_nodes: false 
+      requirements:
+          - deployment:
+               node: backbone
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: backbone-kan
+                       relationship: tosca.relationships.UsesController
+
+    i2-lax:
+      type: tosca.nodes.Site
+      properties:
+          display_name: I2-lax
+          site_url: http://opencloud.us/
+          hosts_nodes: false 
+      requirements:
+          - deployment:
+               node: backbone
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: backbone-lax
+                       relationship: tosca.relationships.UsesController
+
+    i2-nyc:
+      type: tosca.nodes.Site
+      properties:
+          display_name: I2-nyc
+          site_url: http://opencloud.us/
+          hosts_nodes: false 
+      requirements:
+          - deployment:
+               node: backbone
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: backbone-nyc
+                       relationship: tosca.relationships.UsesController
+
+    i2-slc:
+      type: tosca.nodes.Site
+      properties:
+          display_name: I2-slc
+          site_url: http://opencloud.us/
+          hosts_nodes: false 
+      requirements:
+          - deployment:
+               node: backbone
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: backbone-slc
+                       relationship: tosca.relationships.UsesController
+
+    i2-sea:
+      type: tosca.nodes.Site
+      properties:
+          display_name: I2-seae
+          site_url: http://opencloud.us/
+          hosts_nodes: false 
+      requirements:
+          - deployment:
+               node: backbone
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: backbone-sea
+                       relationship: tosca.relationships.UsesController
+
+    i2-sng:
+      type: tosca.nodes.Site
+      properties:
+          display_name: I2-sng
+          site_url: http://opencloud.us/
+          hosts_nodes: false 
+      requirements:
+          - deployment:
+               node: backbone
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: backbone-sng
+                       relationship: tosca.relationships.UsesController
+
+    i2-wdc:
+      type: tosca.nodes.Site
+      properties:
+          display_name: I2-wdc
+          site_url: http://opencloud.us/
+          hosts_nodes: false 
+      requirements:
+          - deployment:
+               node: backbone
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: backbone-wdc
+                       relationship: tosca.relationships.UsesController
+
+    princeton:
+      type: tosca.nodes.Site
+      properties:
+          display_name: Princeton
+          site_url: http://opencloud.us/
+          hosts_nodes: true
+      requirements:
+          - deployment:
+               node: campus
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: campus-princeton
+                       relationship: tosca.relationships.UsesController
+
+    stanford:
+      type: tosca.nodes.Site
+      properties:
+          display_name: Stanford
+          site_url: http://opencloud.us/
+          hosts_nodes: true 
+      requirements:
+          - deployment:
+               node: campus
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: campus-stanford
+                       relationship: tosca.relationships.UsesController
+
+    washington:
+      type: tosca.nodes.Site
+      properties:
+          display_name: Washington
+          site_url: http://opencloud.us/
+          hosts_nodes: false 
+      requirements:
+          - deployment:
+               node: campus
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: campus-washington
+                       relationship: tosca.relationships.UsesController
+
+    gtech:
+      type: tosca.nodes.Site
+      properties:
+          display_name: GTech
+          site_url: http://opencloud.us/
+          hosts_nodes: false 
+      requirements:
+          - deployment:
+               node: campus
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: campus-gatech
+                       relationship: tosca.relationships.UsesController
+
+    arizona:
+      type: tosca.nodes.Site
+      properties:
+          display_name: Arizona
+          site_url: http://opencloud.us/
+          hosts_nodes: false 
+      requirements:
+          - deployment:
+               node: campus
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: campus-arizona
+                       relationship: tosca.relationships.UsesController
+
+    internet2:
+      type: tosca.nodes.Site
+      properties:
+          display_name: Internet2
+          site_url: http://opencloud.us/
+          hosts_nodes: false 
+      requirements:
+          - deployment:
+               node: campus
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: campus-internet2
+                       relationship: tosca.relationships.UsesController
+
+    singapore:
+      type: tosca.nodes.Site
+      properties:
+          display_name: Singapore
+          site_url: http://opencloud.us/
+          hosts_nodes: false 
+      requirements:
+          - deployment:
+               node: campus
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: campus-singapore
+                       relationship: tosca.relationships.UsesController
+
+    onlab:
+      type: tosca.nodes.Site
+      properties:
+          display_name: ON.Lab
+          site_url: http://opencloud.us/
+          hosts_nodes: false 
+      requirements:
+          - deployment:
+               node: campus
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: campus-onlab
+                       relationship: tosca.relationships.UsesController
+
+# Users
+    padmin@vicci.org:
+      type: tosca.nodes.User
+      requirements:
+          - site:
+              node: princeton
+              relationship: tosca.relationships.MemberOfSite
+      properties:
+          is_admin: true
+          is_active: true
+          firstname: XOS
+          lastname: admin
+          password: letmein
+
+    scott@onlab.us:
+      type: tosca.nodes.User
+      requirements:
+          - site:
+              node: onlab
+              relationship: tosca.relationships.MemberOfSite
+      properties:
+          is_admin: true
+          is_active: true
+          firstname: Scott
+          lastname: Baker
+          password: letmein
+
+    llp@onlab.us:
+      type: tosca.nodes.User
+      requirements:
+          - site:
+              node: onlab
+              relationship: tosca.relationships.MemberOfSite
+      properties:
+          is_admin: true
+          is_active: true
+          firstname: Larry
+          lastname: Peterson
+          password: letmein
+
+    luca@onlab.us:
+      type: tosca.nodes.User
+      requirements:
+          - site:
+              node: onlab
+              relationship: tosca.relationships.MemberOfSite
+      properties:
+          is_admin: true
+          is_active: true
+          firstname: Luca
+          lastname: Prete
+          password: letmein
+
+    sapanb@cs.princeton.edu:
+      type: tosca.nodes.User
+      requirements:
+          - site:
+              node: onlab
+              relationship: tosca.relationships.MemberOfSite
+      properties:
+          is_admin: true
+          is_active: true
+          firstname: Sapan
+          lastname: Bhatia
+          password: letmein
+
+    tmack@cs.princeton.edu:
+      type: tosca.nodes.User
+      requirements:
+          - site:
+              node: princeton
+              relationship: tosca.relationships.MemberOfSite
+      properties:
+          is_admin: true
+          is_active: true
+          firstname: Tony
+          lastname: Mack
+          password: letmein
+
+    acb@cs.princeton.edu:
+      type: tosca.nodes.User
+      requirements:
+          - site:
+              node: princeton
+              relationship: tosca.relationships.MemberOfSite
+      properties:
+          is_admin: true
+          is_active: true
+          firstname: Andy
+          lastname: Bavier
+          password: letmein
+
+    mhw@cs.princeton.edu:
+      type: tosca.nodes.User
+      requirements:
+          - site:
+              node: princeton
+              relationship: tosca.relationships.MemberOfSite
+      properties:
+          is_admin: true
+          is_active: true
+          firstname: Mike
+          lastname: Wawrzoniak
+          password: letmein
+
+    jcnelson@cs.princeton.edu:
+      type: tosca.nodes.User
+      requirements:
+          - site:
+              node: princeton
+              relationship: tosca.relationships.MemberOfSite
+      properties:
+          is_admin: true
+          is_active: true
+          firstname: Jude
+          lastname: Nelson
+          password: letmein
+
+    rnobrega@internet2.edu:
+      type: tosca.nodes.User
+      requirements:
+          - site:
+              node: internet2
+              relationship: tosca.relationships.MemberOfSite
+      properties:
+          is_admin: true
+          is_active: true
+          firstname: Ryan
+          lastname: Nobrega
+          password: letmein
+
+    matt@internet2.edu:
+      type: tosca.nodes.User
+      requirements:
+          - site:
+              node: internet2
+              relationship: tosca.relationships.MemberOfSite
+      properties:
+          is_admin: true
+          is_active: true
+          firstname: Matthew
+          lastname: Zekauskas
+          password: letmein
+
+    jhh@cs.arizona.edu:
+      type: tosca.nodes.User
+      requirements:
+          - site:
+              node: arizona
+              relationship: tosca.relationships.MemberOfSite
+      properties:
+          is_admin: true
+          is_active: true
+          firstname: John
+          lastname: Hartman
+          password: letmein
+
+
+# Slices
+    princeton_publicdata:
+      type: tosca.nodes.Slice
+      requirements:
+          - site:
+              node: princeton
+              relationship: tosca.relationships.MemberOfSite
+
+# Services
+    syndicate:
+      type: tosca.nodes.Service
+      capabilities:
+          scalable:
+              properties:
+                  max_instances: 25
+                  min_instances: 1
+                  default_instances: 1
+# Nodes
+    node37.princeton.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: princeton
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node39.princeton.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: princeton
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node41.princeton.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: princeton
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node43.princeton.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: princeton
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node45.princeton.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: princeton
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node49.princeton.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: princeton
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node51.princeton.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: princeton
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node52.princeton.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: princeton
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node54.princeton.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: princeton
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node55.princeton.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: princeton
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node57.princeton.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: princeton
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node59.princeton.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: princeton
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node65.princeton.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: princeton
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node66.princeton.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: princeton
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node67.princeton.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: princeton
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node68.princeton.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: princeton
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node69.princeton.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: princeton
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node2.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node3.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node5.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node6.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node7.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node8.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node9.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node10.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node11.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node12.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node13.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node14.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node15.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node16.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node17.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node18.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node19.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node20.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node21.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node22.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node23.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node24.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node25.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node26.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node27.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node28.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node29.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node30.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node31.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node32.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node33.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node34.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node35.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node37.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node38.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node39.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node40.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node41.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node42.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node43.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node44.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node45.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node46.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node47.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node48.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node49.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node50.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node51.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node52.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node54.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node55.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node56.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node57.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node58.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node59.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node60.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node61.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node62.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node63.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node64.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node67.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node68.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node69.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
+
+
+    node70.stanford.vicci.org:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: campus
+            relationship: tosca.relationships.MemberOfDeployment
diff --git a/opencloud/wait_for_xos.sh b/opencloud/wait_for_xos.sh
new file mode 100644
index 0000000..4f486af
--- /dev/null
+++ b/opencloud/wait_for_xos.sh
@@ -0,0 +1,12 @@
+#! /bin/bash
+echo "Waiting for XOS to come up"
+until http 0.0.0.0:80 &> /dev/null
+do
+    sleep 1
+    RUNNING_CONTAINER=`sudo docker ps|grep "xos"|awk '{print $$NF}'`
+    if [[ $RUNNING_CONTAINER == "" ]]; then
+        echo Container may have failed. check with \"make showlogs\'
+        exit 1
+    fi
+done
+echo "XOS is ready"
diff --git a/test-standalone/Makefile b/test-standalone/Makefile
new file mode 100644
index 0000000..626495a
--- /dev/null
+++ b/test-standalone/Makefile
@@ -0,0 +1,137 @@
+include ../common/Makedefs
+
+MYIP:=$(shell hostname -i)
+CONFIG_DIR:=$(shell pwd)
+DOCKER_COMPOSE_YML=./onboarding-docker-compose/docker-compose.yml
+BOOTSTRAP_YML=./docker-compose-bootstrap.yml
+DOCKER_PROJECT=teststandalone
+BOOTSTRAP_PROJECT=teststandalonebs
+XOS_BOOTSTRAP_PORT=9998
+XOS_UI_PORT=9999
+DB_NAME=$(BOOTSTRAP_PROJECT)_xos_db_1
+
+define TRUNCATE_FN
+	CREATE OR REPLACE FUNCTION truncate_tables(username IN VARCHAR) RETURNS void AS $$$$
+	DECLARE
+			statements CURSOR FOR
+					SELECT tablename FROM pg_tables
+					WHERE tableowner = username AND schemaname = 'public';
+	BEGIN
+			FOR stmt IN statements LOOP
+					EXECUTE 'TRUNCATE TABLE ' || quote_ident(stmt.tablename) || ' CASCADE;';
+			END LOOP;
+	END;
+	$$$$ LANGUAGE plpgsql;
+endef
+export TRUNCATE_FN
+
+prepare: xos
+	sudo docker exec -i teststandalone_xos_ui_1 bash -c "cd /opt/xos/tests/api; npm install --production"
+	sudo docker exec teststandalone_xos_ui_1 pip install dredd_hooks
+
+xos: prereqs dirs download_services bootstrap onboarding
+
+prereqs:
+	sudo make -f ../common/Makefile.prereqs
+
+dirs:
+	# if this directory doesn't exist, then docker-compose will create it with root permission
+	mkdir -p key_import
+	mkdir -p onboarding-docker-compose
+
+bootstrap:
+	echo "[BOOTSTRAP]"
+	sudo rm -f onboarding-docker-compose/docker-compose.yml
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) up -d
+	bash ../common/wait_for_xos_port.sh 9998
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) run -e CONFIG_DIR=$(CONFIG_DIR) xos_bootstrap_ui python /opt/xos/tosca/run.py none - < xos.yaml
+
+download_services:
+	make -f ../common/Makefile.services
+
+update_services:
+	make -f ../common/Makefile.services update
+
+onboarding:
+	echo "[ONBOARDING]"
+	bash ../common/wait_for_onboarding_ready.sh 9998 xos
+	sudo bash -c "echo somekey > key_import/vsg_rsa"
+	sudo bash -c "echo somekey > key_import/vsg_rsa.pub"
+	sudo bash -c "echo somekey > key_import/volt_rsa"
+	sudo bash -c "echo somekey > key_import/volt_rsa.pub"
+	sudo bash -c "echo somekey > key_import/onos_rsa"
+	sudo bash -c "echo somekey > key_import/onos_rsa.pub"
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) run xos_bootstrap_ui python /opt/xos/tosca/run.py None - < ../common/disable-onboarding.yaml
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) run xos_bootstrap_ui python /opt/xos/tosca/run.py None - < $(SERVICE_DIR)/vrouter/xos/vrouter-onboard.yaml
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) run xos_bootstrap_ui python /opt/xos/tosca/run.py None - < $(SERVICE_DIR)/olt/xos/volt-onboard.yaml
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) run xos_bootstrap_ui python /opt/xos/tosca/run.py None - < $(SERVICE_DIR)/vsg/xos/vsg-onboard.yaml
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) run xos_bootstrap_ui python /opt/xos/tosca/run.py None - < $(SERVICE_DIR)/vtr/xos/vtr-onboard.yaml
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) run xos_bootstrap_ui python /opt/xos/tosca/run.py None - < $(SERVICE_DIR)/onos-service/xos/onos-onboard.yaml
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) run xos_bootstrap_ui python /opt/xos/tosca/run.py None - < ../common/enable-onboarding.yaml
+	bash ../common/wait_for_onboarding_ready.sh 9998 services/vrouter
+	bash ../common/wait_for_onboarding_ready.sh 9998 services/volt
+	bash ../common/wait_for_onboarding_ready.sh 9998 services/vsg
+	bash ../common/wait_for_onboarding_ready.sh 9998 services/vtr
+	bash ../common/wait_for_onboarding_ready.sh 9998 services/onos
+	bash ../common/wait_for_onboarding_ready.sh 9998 xos
+	bash ../common/wait_for_xos_port.sh 9999
+
+restore-initial-db-status:
+	sudo docker exec $(DB_NAME) psql -U postgres -d xos -c "$$TRUNCATE_FN" >/dev/null 2>&1
+	sudo docker exec $(DB_NAME) psql -U postgres -d xos -c "SELECT truncate_tables('postgres');" >/dev/null 2>&1
+	sudo docker exec $(DB_NAME) psql -U postgres -d xos -c "SELECT setval('core_tenant_id_seq', 1)" >/dev/null 2>&1
+	sudo docker exec $(DB_NAME) psql -U postgres -d xos -c "SELECT setval('core_deployment_id_seq', 1)" >/dev/null 2>&1
+	sudo docker exec $(DB_NAME) psql -U postgres -d xos -c "SELECT setval('core_flavor_id_seq', 1)" >/dev/null 2>&1
+	sudo docker exec $(DB_NAME) psql -U postgres -d xos -c "SELECT setval('core_service_id_seq', 1)" >/dev/null 2>&1
+	sudo docker-compose -p $(DOCKER_PROJECT) -f $(DOCKER_COMPOSE_YML) run xos_ui python /opt/xos/manage.py --noobserver --nomodelpolicy loaddata /opt/xos/core/fixtures/core_initial_data.json
+	sudo docker-compose -p $(DOCKER_PROJECT) -f $(DOCKER_COMPOSE_YML) run xos_ui python /opt/xos/tosca/run.py none - < ../common/fixtures.yaml
+	sudo docker-compose -p $(DOCKER_PROJECT) -f $(DOCKER_COMPOSE_YML) run xos_ui python /opt/xos/tosca/run.py none - < ../common/mydeployment.yaml
+	sudo docker-compose -p $(DOCKER_PROJECT) -f $(DOCKER_COMPOSE_YML) run xos_ui python /opt/xos/tosca/run.py padmin@vicci.org - < ../frontend/sample.yaml
+
+	# sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/cord-pod/setup.yaml
+	# sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/cord-pod/nodes.yaml
+	# sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/cord-pod/images.yaml
+	sudo docker-compose -p $(DOCKER_PROJECT) -f $(DOCKER_COMPOSE_YML) run xos_ui python /opt/xos/tosca/run.py padmin@vicci.org - < ../cord-pod/mgmt-net.yaml
+	sudo docker-compose -p $(DOCKER_PROJECT) -f $(DOCKER_COMPOSE_YML) run xos_ui python /opt/xos/tosca/run.py padmin@vicci.org - < services.yaml
+	sudo docker-compose -p $(DOCKER_PROJECT) -f $(DOCKER_COMPOSE_YML) run xos_ui python /opt/xos/tosca/run.py padmin@vicci.org - < ../cord-pod/cord-volt-devices.yaml
+
+
+test: restore-initial-db-status
+	# RUN TESTS
+	sudo docker cp $(XOS_DIR)/apiary.apib teststandalone_xos_ui_1:/opt/xos/tests/api/apiary.apib
+	sudo docker exec -i teststandalone_xos_ui_1 bash -c "cd /opt/xos/tests/api; npm test"
+
+test-tosca: restore-initial-db-status
+	sudo docker-compose -p $(DOCKER_PROJECT) -f $(DOCKER_COMPOSE_YML) run xos_ui bash -c "cd /opt/xos/tosca/tests; python ./alltests.py"
+
+test-ui: restore-initial-db-status
+	sudo docker exec -u root -i teststandalone_xos_1 bash -c "cd /opt/xos/tests/ui-e2e; python xos-e2e-test.py"
+
+base-container: 
+	make -f ../common/Makefile.containers xos_base
+
+devel-container: base-container
+	make -f ../common/Makefile.containers xos_devel synchronizer onboarding_synchronizer
+
+containers: devel-container
+	make -f ../common/Makefile.containers xos_test
+
+stop:
+	test ! -s $(DOCKER_COMPOSE_YML) || sudo docker-compose -p $(DOCKER_PROJECT) -f $(DOCKER_COMPOSE_YML) stop
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) stop
+
+showlogs:
+	sudo docker-compose logs
+
+rm: stop
+	test ! -s $(DOCKER_COMPOSE_YML) || sudo docker-compose -p $(DOCKER_PROJECT) -f $(DOCKER_COMPOSE_YML) rm -f
+	sudo docker-compose -p $(BOOTSTRAP_PROJECT) -f $(BOOTSTRAP_YML) rm -f
+
+docker-clean:
+	sudo docker rm -f $(shell sudo docker ps -aq)
+
+enter-xos:
+	sudo docker exec -ti teststandalone_xos_ui_1 bash
+
+enter-xos-db:
+	sudo docker exec -ti $(DB_NAME) bash
diff --git a/test-standalone/README.md b/test-standalone/README.md
new file mode 100644
index 0000000..32e7c95
--- /dev/null
+++ b/test-standalone/README.md
@@ -0,0 +1,11 @@
+# API Test Configuration
+
+This configuration is intended to be used to test the API,
+to use it:
+
+- `make containers`
+- `make`
+
+Then anytime is needed `make test` (`xos/api` folder is shared with the container)
+
+Note that this test will be automatically executed by (Jenkins)[https://jenkins.opencord.org/]
\ No newline at end of file
diff --git a/test-standalone/docker-compose-bootstrap.yml b/test-standalone/docker-compose-bootstrap.yml
new file mode 100644
index 0000000..857db96
--- /dev/null
+++ b/test-standalone/docker-compose-bootstrap.yml
@@ -0,0 +1,35 @@
+xos_db:
+    image: xosproject/xos-postgres
+    expose:
+        - "5432"
+
+xos_bootstrap_ui:
+    image: xosproject/xos
+    command: python /opt/xos/manage.py runserver 0.0.0.0:9998 --insecure --makemigrations
+    ports:
+        - "9998:9998"
+    links:
+        - xos_db
+    volumes:
+      - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config
+      - ../vtn/files/xos_vtn_config:/opt/xos/xos_configuration/xos_vtn_config:ro
+      - ../../xos_services:/opt/xos_services
+
+xos_synchronizer_onboarding:
+    image: xosproject/xos-synchronizer-onboarding
+    command: bash -c "cd /opt/xos/synchronizers/onboarding; ./run.sh"
+#    command: sleep 86400
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: onboarding
+    links:
+        - xos_db
+    volumes:
+        - /var/run/docker.sock:/var/run/docker.sock
+        - ./key_import:/opt/xos/key_import:ro
+        - ./onboarding-docker-compose:/opt/xos/synchronizers/onboarding/docker-compose
+        - ../../xos_services:/opt/xos_services
+    log_driver: "json-file"
+    log_opt:
+            max-size: "100k"
+            max-file: "5"
diff --git a/test-standalone/services.yaml b/test-standalone/services.yaml
new file mode 100644
index 0000000..40e0127
--- /dev/null
+++ b/test-standalone/services.yaml
@@ -0,0 +1,258 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Just enough Tosca to get the vSG slice running on the CORD POD
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    # CORD Services
+    service#vtr:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /admin/vtr/vtrservice/$id$/
+          kind: vTR
+          replaces: service_vtr
+
+    service#volt:
+      type: tosca.nodes.VOLTService
+      requirements:
+          - vsg_tenant:
+              node: service#vsg
+              relationship: tosca.relationships.TenantOfService
+      properties:
+          view_url: /admin/cord/voltservice/$id$/
+          kind: vOLT
+          replaces: service_volt
+
+    addresses_vsg:
+      type: tosca.nodes.AddressPool
+      properties:
+          addresses: 10.168.0.0/24
+          gateway_ip: 10.168.0.1
+          gateway_mac: 02:42:0a:a8:00:01
+
+    addresses_exampleservice-public:
+      type: tosca.nodes.AddressPool
+      properties:
+          addresses: 10.168.1.0/24
+          gateway_ip: 10.168.1.1
+          gateway_mac: 02:42:0a:a8:00:01
+
+    service#vsg:
+      type: tosca.nodes.VSGService
+      requirements:
+          - vrouter_tenant:
+              node: service#vrouter
+              relationship: tosca.relationships.TenantOfService
+      properties:
+          view_url: /admin/cord/vsgservice/$id$/
+          # backend_network_label: hpc_client
+          private_key_fn: /opt/xos/synchronizers/vcpe/vcpe_private_key
+#          node_label: label_vsg
+          replaces: service_vsg
+
+    service#vrouter:
+      type: tosca.nodes.VRouterService
+      properties:
+          view_url: /admin/vrouter/vrouterservice/$id$/
+          replaces: service_vrouter
+      requirements:
+          - addresses_vsg:
+              node: addresses_vsg
+              relationship: tosca.relationships.ProvidesAddresses
+          - addresses_service1:
+              node: addresses_exampleservice-public
+              relationship: tosca.relationships.ProvidesAddresses
+
+#     service#fabric:
+#       type: tosca.nodes.FabricService
+#       properties:
+#           view_url: /admin/fabric/fabricservice/$id$/
+#           replaces: service_fabric
+
+#     service#ONOS_Fabric:
+#       type: tosca.nodes.ONOSService
+#       requirements:
+#       properties:
+#           kind: onos
+#           view_url: /admin/onos/onosservice/$id$/
+#           no_container: true
+#           rest_hostname: onos-fabric
+#           replaces: service_ONOS_Fabric
+
+#     service#ONOS_CORD:
+#       type: tosca.nodes.ONOSService
+#       properties:
+#           no-delete: true
+#           no-create: false
+#           no-update: true
+
+#     # vOLT_ONOS_app:
+#     #   type: tosca.nodes.ONOSvOLTApp
+#     #   requirements:
+#     #       - onos_tenant:
+#     #           node: service#ONOS_CORD
+#     #           relationship: tosca.relationships.TenantOfService
+#     #       - volt_service:
+#     #           node: service#volt
+#     #           relationship: tosca.relationships.UsedByService
+#     #   properties:
+#     #       install_dependencies: onos-ext-notifier-1.0-SNAPSHOT.oar, onos-ext-volt-event-publisher-1.0-SNAPSHOT.oar
+#     #       dependencies: org.onosproject.openflow-base, org.onosproject.olt, org.ciena.onos.ext_notifier, org.ciena.onos.volt_event_publisher
+#     #       autogenerate: volt-network-cfg
+
+#     # vRouter_ONOS_app:
+#     #   type: tosca.nodes.ONOSvRouterApp
+#     #   requirements:
+#     #       - onos_tenant:
+#     #           node: service#ONOS_Fabric
+#     #           relationship: tosca.relationships.TenantOfService
+#     #       - vrouter_service:
+#     #           node: service#vrouter
+#     #           relationship: tosca.relationships.UsedByService
+#     #   properties:
+#     #       dependencies: org.onosproject.vrouter
+#     #       autogenerate: vrouter-network-cfg
+
+#     Private:
+#       type: tosca.nodes.NetworkTemplate
+
+#     management:
+#       type: tosca.nodes.network.Network.XOS
+#       properties:
+#           no-create: true
+#           no-delete: true
+#           no-update: true
+
+#     image#vsg-1.0:
+#       type: tosca.nodes.Image
+
+#     mysite:
+#       type: tosca.nodes.Site
+
+#     label_vsg:
+#       type: tosca.nodes.NodeLabel
+
+#     # Networks required by the CORD setup
+#     mysite_vsg-access:
+#       type: tosca.nodes.network.Network
+#       properties:
+#           ip_version: 4
+#       requirements:
+#           - network_template:
+#               node: Private
+#               relationship: tosca.relationships.UsesNetworkTemplate
+#           - owner:
+#               node: mysite_vsg
+#               relationship: tosca.relationships.MemberOfSlice
+#           - connection:
+#               node: mysite_vsg
+#               relationship: tosca.relationships.ConnectsToSlice
+
+#     # CORD Slices
+#     mysite_vsg:
+#       description: vSG Controller Slice
+#       type: tosca.nodes.Slice
+#       properties:
+#           network: noauto
+#       requirements:
+#           - vsg_service:
+#               node: service#vsg
+#               relationship: tosca.relationships.MemberOfService
+#           - site:
+#               node: mysite
+#               relationship: tosca.relationships.MemberOfSite
+#           - management:
+#               node: management
+#               relationship: tosca.relationships.ConnectsToNetwork
+#           - image:
+#               node: image#vsg-1.0
+#               relationship: tosca.relationships.DefaultImage
+
+#     # Let's add a user who can be administrator of the household
+#     johndoe@myhouse.com:
+#       type: tosca.nodes.User
+#       properties:
+#           password: letmein
+#           firstname: john
+#           lastname: doe
+#       requirements:
+#           - site:
+#               node: mysite
+#               relationship: tosca.relationships.MemberOfSite
+#           - dependency:
+#                 node: mysite_vsg
+#                 relationship: tosca.relationships.DependsOn
+
+#     # A subscriber
+#     My House:
+#        type: tosca.nodes.CORDSubscriber
+#        properties:
+#            service_specific_id: 123
+#            firewall_enable: false
+#            cdn_enable: false
+#            url_filter_enable: false
+#            url_filter_level: R
+#        requirements:
+#           - house_admin:
+#               node: johndoe@myhouse.com
+#               relationship: tosca.relationships.AdminPrivilege
+
+#     Mom's PC:
+#        type: tosca.nodes.CORDUser
+#        properties:
+#            mac: 01:02:03:04:05:06
+#            level: PG_13
+#        requirements:
+#            - household:
+#                node: My House
+#                relationship: tosca.relationships.SubscriberDevice
+
+#     Dad's PC:
+#        type: tosca.nodes.CORDUser
+#        properties:
+#            mac: 90:E2:BA:82:F9:75
+#            level: PG_13
+#        requirements:
+#            - household:
+#                node: My House
+#                relationship: tosca.relationships.SubscriberDevice
+
+#     Jack's Laptop:
+#        type: tosca.nodes.CORDUser
+#        properties:
+#            mac: 68:5B:35:9D:91:D5
+#            level: PG_13
+#        requirements:
+#            - household:
+#                node: My House
+#                relationship: tosca.relationships.SubscriberDevice
+
+#     Jill's Laptop:
+#        type: tosca.nodes.CORDUser
+#        properties:
+#            mac: 34:36:3B:C9:B6:A6
+#            level: PG_13
+#        requirements:
+#            - household:
+#                node: My House
+#                relationship: tosca.relationships.SubscriberDevice
+
+#     My Volt:
+#         type: tosca.nodes.VOLTTenant
+#         properties:
+#             service_specific_id: 123
+#             s_tag: 222
+#             c_tag: 111
+#         requirements:
+#             - provider_service:
+#                 node: service#volt
+#                 relationship: tosca.relationships.MemberOfService
+#             - subscriber:
+#                 node: My House
+#                 relationship: tosca.relationships.BelongsToSubscriber
+#             - dependency:
+#                 node: mysite_vsg
+#                 relationship: tosca.relationships.DependsOn
diff --git a/test-standalone/xos.yaml b/test-standalone/xos.yaml
new file mode 100644
index 0000000..85bdd02
--- /dev/null
+++ b/test-standalone/xos.yaml
@@ -0,0 +1,28 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Onboard the exampleservice
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    xos:
+      type: tosca.nodes.XOS
+      properties:
+        ui_port: 9999
+        bootstrap_ui_port: 9998
+        docker_project_name: teststandalone
+        db_container_name: teststandalonebs_xos_db_1
+        frontend_only: true
+        source_ui_image: xosproject/xos-test
+
+    /opt/xos/xos_configuration/xos_common_config:
+      type: tosca.nodes.XOSVolume
+      properties:
+          host_path: { path_join: [ SELF, CONFIG_DIR, ../common/xos_common_config, ENV_VAR ] }
+          read_only: false
+      requirements:
+          - xos:
+             node: xos
+             relationship: tosca.relationships.UsedByXOS