Initial cut at CORD POD configuration
diff --git a/xos/configurations/cord-pod/Makefile b/xos/configurations/cord-pod/Makefile
new file mode 100644
index 0000000..af65aca
--- /dev/null
+++ b/xos/configurations/cord-pod/Makefile
@@ -0,0 +1,22 @@
+.PHONY: xos
+xos: nodes.yaml images.yaml vtn_network_cfg_json
+ sudo docker-compose up -d
+ ../xos/configurations/common/wait_for_xos_port.sh 80
+ sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /root/setup/setup.yaml
+ sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /root/setup/nodes.yaml
+ sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /root/setup/images.yaml
+ sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/tosca/samples/vtn.yaml
+
+nodes.yaml:
+ export SETUPDIR=.; bash ../common/make-nodes-yaml.sh
+
+images.yaml:
+ export SETUPDIR=.; bash ../common/make-images-yaml.sh
+
+vtn_network_cfg_json:
+ export SETUPDIR=.; bash ./make-vtn-networkconfig-json.sh
+
+.PHONY: local_containers
+local_containers:
+ cd ../../../containers/xos; make devel
+ cd ../../../containers/synchronizer; make
diff --git a/xos/configurations/cord-pod/NOTES.txt b/xos/configurations/cord-pod/NOTES.txt
new file mode 100644
index 0000000..1dd6b5a
--- /dev/null
+++ b/xos/configurations/cord-pod/NOTES.txt
@@ -0,0 +1,6 @@
+Notes on setup
+
+Requirements:
+* admin-openrc.sh: Admin credentials for your OpenStack cloud
+* id_rsa[.pub]: Keypair for use by the various services
+* node_key: Private key that allows root login to the compute nodes
diff --git a/xos/configurations/cord-pod/README.md b/xos/configurations/cord-pod/README.md
new file mode 100644
index 0000000..0fcdb13
--- /dev/null
+++ b/xos/configurations/cord-pod/README.md
@@ -0,0 +1,77 @@
+# XOS Docker Images
+
+## Introduction
+
+ XOS is comprised of 3 core services:
+
+ * A database backend (postgres)
+ * A webserver front end (django)
+ * A synchronizer daemon that interacts with the openstack backend.
+
+We have created separate dockerfiles for each of these services, making it
+easier to build the services independently and also deploy and run them in
+isolated environments.
+
+#### Database Container
+
+To build the database container:
+
+```
+$ cd postgresql; make build
+```
+
+#### XOS Container
+
+To build the XOS webserver container:
+
+```
+$ cd xos; make build
+```
+
+#### Synchronizer Container
+
+The Synchronizer shares many of the same dependencies as the XOS container. The
+synchronizer container takes advantage of this by building itself on top of the
+XOS image. This means you must build the XOS image before building the
+synchronizer image. Assuming you have already built the XOS container,
+executing the following will build the Synchronizer container:
+
+```
+$ cd synchronizer; make build
+```
+
+#### Solution Compose File
+
+[Docker Compose](https://docs.docker.com/compose/) is a tool for defining and
+running multi-container Docker applications. With Compose, you use a Compose
+file to configure your application’s services. Then, using a single command, you
+create, start, scale, and manage all the services from your configuration.
+
+Included is a compose file in *YAML* format with content defined by the [Docker
+Compose Format](https://docs.docker.com/compose/compose-file/). With the compose
+file a complete XOS solution based on Docker containers can be instantiated
+using a single command. To start the instance you can use the command:
+
+```
+$ docker-compose up -d
+```
+
+You should now be able to access the login page by visiting
+`http://localhost:8000` and log in using the default `padmin@vicci.org` account
+with password `letmein`.
+
+#### Configuring XOS for OpenStack
+
+If you have your own OpenStack cluster, and you would like to configure XOS to
+control it, copy the `admin-openrc.sh` credentials file for your cluster to
+this directory. Make sure that OpenStack commands work from the local machine
+using the credentials, e.g., `source ./admin-openrc.sh; nova list`. Then run:
+
+```
+$ make
+```
+
+XOS will be launched (the Makefile will run the `docker-compose up -d` command
+for you) and configured with the nodes and images available in your
+OpenStack cloud. You can then log in to XOS as described above and start creating
+slices and instances.
diff --git a/xos/configurations/cord-pod/admin-openrc.sh b/xos/configurations/cord-pod/admin-openrc.sh
new file mode 100644
index 0000000..f27fdac
--- /dev/null
+++ b/xos/configurations/cord-pod/admin-openrc.sh
@@ -0,0 +1,6 @@
+# Replace with the OpenStack admin credentials for your cluster
+export OS_TENANT_NAME=admin
+export OS_USERNAME=admin
+export OS_PASSWORD=admin
+export OS_AUTH_URL=http://localhost:35357/v2.0
+
diff --git a/xos/configurations/cord-pod/docker-compose.yml b/xos/configurations/cord-pod/docker-compose.yml
new file mode 100644
index 0000000..37ce4d0
--- /dev/null
+++ b/xos/configurations/cord-pod/docker-compose.yml
@@ -0,0 +1,47 @@
+xos_db:
+ image: xosproject/xos-postgres
+ expose:
+ - "5432"
+
+xos_synchronizer_openstack:
+ command: bash -c "sleep 120; python /opt/xos/synchronizers/openstack/xos-synchronizer.py"
+ image: xosproject/xos-synchronizer-openstack
+ labels:
+ org.xosproject.kind: synchronizer
+ org.xosproject.target: openstack
+ links:
+ - xos_db
+ volumes:
+ - .:/root/setup:ro
+ - ../vtn/files/xos_vtn_config:/opt/xos/xos_configuration/xos_vtn_config:ro
+
+xos_synchronizer_onos:
+ image: xosproject/xos-synchronizer-openstack
+ command: bash -c "python /opt/xos/synchronizers/onos/onos-synchronizer.py -C /opt/xos/synchronizers/onos/onos_synchronizer_config"
+ labels:
+ org.xosproject.kind: synchronizer
+ org.xosproject.target: onos
+ links:
+ - xos_db
+ volumes:
+ - .:/root/setup:ro
+ - ./id_rsa:/opt/xos/synchronizers/onos/onos_key:ro # private key
+
+# FUTURE
+#xos_swarm_synchronizer:
+# image: xosproject/xos-swarm-synchronizer
+# labels:
+# org.xosproject.kind: synchronizer
+# org.xosproject.target: swarm
+
+xos:
+ command: python /opt/xos/manage.py runserver 0.0.0.0:80 --insecure --makemigrations
+ image: xosproject/xos
+ links:
+ - xos_db
+ ports:
+ - "80:80"
+ volumes:
+ - .:/root/setup:ro
+ - ../vtn/files/xos_vtn_config:/opt/xos/xos_configuration/xos_vtn_config:ro
+ - ./id_rsa.pub:/opt/xos/synchronizers/onos/onos_key.pub:ro
diff --git a/xos/configurations/cord-pod/make-vtn-networkconfig-json.sh b/xos/configurations/cord-pod/make-vtn-networkconfig-json.sh
new file mode 100755
index 0000000..cf271a1
--- /dev/null
+++ b/xos/configurations/cord-pod/make-vtn-networkconfig-json.sh
@@ -0,0 +1,71 @@
+FN=$SETUPDIR/vtn-network-cfg.json
+
+echo "Writing to $FN"
+
+rm -f $FN
+
+cat >> $FN <<EOF
+{
+ "apps" : {
+ "org.onosproject.cordvtn" : {
+ "cordvtn" : {
+ "gatewayMac" : "00:00:00:00:00:01",
+ "nodes" : [
+EOF
+
+NODES=$( sudo bash -c "source $SETUPDIR/admin-openrc.sh ; nova hypervisor-list" |grep -v ID|grep -v +|awk '{print $4}' )
+
+# also configure ONOS to manage the nm node
+NM="neutron-gateway"
+NODES="$NODES $NM"
+
+NODECOUNT=0
+for NODE in $NODES; do
+ ((NODECOUNT++))
+done
+
+I=0
+for NODE in $NODES; do
+ echo $NODE
+ NODEIP=`getent hosts $NODE | awk '{ print $1 }'`
+
+ PHYPORT=eth0
+ LOCALIP=$NODEIP
+
+ ((I++))
+ cat >> $FN <<EOF
+ {
+ "hostname": "$NODE",
+ "ovsdbIp": "$NODEIP",
+ "ovsdbPort": "6641",
+ "bridgeId": "of:000000000000000$I",
+ "phyPortName": "$PHYPORT",
+ "localIp": "$LOCALIP"
+EOF
+ if [[ "$I" -lt "$NODECOUNT" ]]; then
+ echo " }," >> $FN
+ else
+ echo " }" >> $FN
+ fi
+done
+
+# get the openstack admin password and username
+source $SETUPDIR/admin-openrc.sh
+NEUTRON_URL=`keystone endpoint-get --service network|grep publicURL|awk '{print $4}'`
+
+cat >> $FN <<EOF
+ ]
+ }
+ },
+ "org.onosproject.openstackswitching" : {
+ "openstackswitching" : {
+ "do_not_push_flows" : "true",
+ "neutron_server" : "$NEUTRON_URL/v2.0/,
+ "keystone_server" : "$OS_AUTH_URL",
+ "user_name" : "$OS_USERNAME",
+ "password" : "$OS_PASSWORD"
+ }
+ }
+ }
+}
+EOF
diff --git a/xos/configurations/cord-pod/setup.yaml b/xos/configurations/cord-pod/setup.yaml
new file mode 100644
index 0000000..c13f0eb
--- /dev/null
+++ b/xos/configurations/cord-pod/setup.yaml
@@ -0,0 +1,61 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >
+ * Adds OpenCloud Sites, Deployments, and Controllers.
+
+imports:
+ - custom_types/xos.yaml
+
+topology_template:
+ node_templates:
+
+ MyDeployment:
+ type: tosca.nodes.Deployment
+ properties:
+ flavors: m1.large, m1.medium, m1.small
+
+ MyOpenStack:
+ type: tosca.nodes.Controller
+ requirements:
+ - deployment:
+ node: MyDeployment
+ relationship: tosca.relationships.ControllerDeployment
+ properties:
+ backend_type: OpenStack
+ version: Kilo
+ auth_url: { get_script_env: [ SELF, adminrc, OS_AUTH_URL, LOCAL_FILE] }
+ admin_user: { get_script_env: [ SELF, adminrc, OS_USERNAME, LOCAL_FILE] }
+ admin_password: { get_script_env: [ SELF, adminrc, OS_PASSWORD, LOCAL_FILE] }
+ admin_tenant: { get_script_env: [ SELF, adminrc, OS_TENANT_NAME, LOCAL_FILE] }
+ domain: Default
+ artifacts:
+ adminrc: /root/setup/admin-openrc.sh
+
+ mysite:
+ type: tosca.nodes.Site
+ properties:
+ display_name: MySite
+ site_url: http://xosproject.org/
+ requirements:
+ - deployment:
+ node: MyDeployment
+ relationship: tosca.relationships.SiteDeployment
+ requirements:
+ - controller:
+ node: MyOpenStack
+ relationship: tosca.relationships.UsesController
+
+ # This user already exists in XOS with this password
+ # It's an example of how to create new users
+ padmin@vicci.org:
+ type: tosca.nodes.User
+ requirements:
+ - site:
+ node: mysite
+ relationship: tosca.relationships.MemberOfSite
+ properties:
+ is_admin: true
+ is_active: true
+ firstname: XOS
+ lastname: admin
+ password: letmein
diff --git a/xos/configurations/cord-pod/vtn-setup.yml b/xos/configurations/cord-pod/vtn-setup.yml
new file mode 100644
index 0000000..e8bb962
--- /dev/null
+++ b/xos/configurations/cord-pod/vtn-setup.yml
@@ -0,0 +1,60 @@
+---
+- hosts: neutron-api
+ sudo: yes
+ vars:
+ vtn_host: node2.juju2.xos-pg0.clemson.cloudlab.us
+ tasks:
+
+ # Most of this should happen in the neutron-api charm
+ # Make a local copy and deploy from there for starters
+ # * Use latest copy of neutron-api charm
+ # * Add an "onos-vtn" core plugin
+ # * Do the rest of tehse steps when the "onos-vtn" plugin is selected
+ # * Can we add a "vtn-host" argument to the charm?
+ - apt: name={{ item }} state=installed
+ with_items:
+ - python-pip
+
+ - pip: name={{ item }} state=latest
+ with_items:
+ - setuptools
+ - pip
+ - testrepository
+ - git: repo=https://github.com/openstack/networking-onos.git
+ dest=/srv/networking-onos
+ - shell: cd /srv/networking-onos; python setup.py install
+
+ # Edit /usr/local/etc/neutron/plugins/ml2/conf_onos.ini
+ - ini_file: dest=/usr/local/etc/neutron/plugins/ml2/conf_onos.ini
+ section=onos option=url_path value=http://{{ vtn_host }}:8181/onos/openstackswitching
+ - ini_file: dest=/usr/local/etc/neutron/plugins/ml2/conf_onos.ini
+ section=onos option=username value=karaf
+ - ini_file: dest=/usr/local/etc/neutron/plugins/ml2/conf_onos.ini
+ section=onos option=password value=karaf
+
+ # Edit /etc/neutron/neutron.conf
+# - ini_file: dest=/etc/neutron/neutron.conf
+# section=DEFAULT option=core_plugin value=neutron.plugins.ml2.plugin.Ml2Plugin
+
+ # Edit /etc/neutron/plugins/ml2/ml2_conf.ini
+ # DOING IT THIS WAY WILL CONFLICT WITH JUJU!
+ - ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini
+ section=ml2 option=tenant_network_types value=vxlan
+ - ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini
+ section=ml2 option=type_drivers value=vxlan
+ - ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini
+ section=ml2 option=mechanism_drivers value=onos_ml2
+
+ # Already present
+ #- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini
+ # section=ml2_type_vxlan option=vni_ranges value=1001:2000
+
+ - service: name=neutron-server state=stopped enabled=no
+ # Run neutron-server with extra config file
+ # DOING IT THIS WAY WILL CONFLICT WITH JUJU!
+ - copy: src=files/neutron-supervisor.conf dest=/etc/supervisor/conf.d/
+ - shell: supervisorctl reload
+
+# - shell: ../../scripts/destroy-all-networks.sh
+ - shell: cd ../cord/dataplane; bash ./generate-bm.sh > hosts-bm
+ - shell: cd ../cord/dataplane; ansible-playbook -i hosts-bm dataplane-vtn.yaml