Merge pull request #4 from open-cloud/roles

Roles
diff --git a/ansible.cfg b/ansible.cfg
index d1c3f7f..7ee9c04 100644
--- a/ansible.cfg
+++ b/ansible.cfg
@@ -3,4 +3,6 @@
 
 [defaults]
 callback_whitelist = profile_tasks
-deprecation_warnings = False
+host_key_checking = false
+forks=20
+
diff --git a/bootstrap.sh b/bootstrap.sh
deleted file mode 100755
index b8e7f4f..0000000
--- a/bootstrap.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-sudo apt-get update
-sudo apt-get install -y software-properties-common git mosh dnsutils
-sudo add-apt-repository -y ppa:ansible/ansible
-sudo apt-get update
-sudo apt-get install -y ansible
-[ -e ~/.ssh/id_rsa ] || ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
-cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
diff --git a/cord-single-playbook.yml b/cord-single-playbook.yml
new file mode 100644
index 0000000..499721d
--- /dev/null
+++ b/cord-single-playbook.yml
@@ -0,0 +1,47 @@
+---
+# CORD playbook, for installing a single-node CORD test-environment on cloudlab
+
+- name: Include vars
+  hosts: all
+  tasks:
+  - include_vars: vars/cord_defaults.yml
+  - include_vars: vars/cord.yml
+  - include_vars: vars/cord_keystone.yml
+
+- name: DNS Server and apt-cacher-ng Setup
+  hosts: head
+  become: yes
+  roles:
+    - dns-nsd
+    - dns-unbound
+    - apt-cacher-ng
+
+- name: Configure all hosts to use DNS server
+  hosts: all
+  become: yes
+  roles:
+    - dns-configure
+
+- name: Prep systems
+  hosts: all
+  become: yes
+  roles:
+    - common-prep
+    - cloudlab-prep
+
+- name: Configure head node, create VM's
+  hosts: head
+  roles:
+    - { role: head-prep, become: yes }
+    - { role: config-virt, become: yes }
+    - create-vms
+
+- name: Set up VM's, juju, simulate fabric
+  hosts: head
+  roles:
+    - xos-vm-install
+    - onos-vm-install
+    - juju-setup
+    - docker-compose
+    - simulate-fabric
+
diff --git a/docs/install_opencloud_site.md b/docs/install_opencloud_site.md
index 26c50f1..daaa23b 100644
--- a/docs/install_opencloud_site.md
+++ b/docs/install_opencloud_site.md
@@ -1,16 +1,16 @@
-**Introduction**
+## Introduction
 
-The following steps areis required in order to bring up a new opencloud sites.
+The following steps are required in order to bring up a new OpenCloud sites.
 
 1. Allocate servers
 
-2. (Re)IInstall Uubuntu
+2. Install Uubuntu
 
-3. Install openstack controller & compute nodes
+3. Install OpenStack controller & compute nodes
 
-4. Add site’s openstack controller to xos
+4. Add site’s OpenStack controller to xos
 
-**Allocate Servers**
+## Allocate Servers
 
 **It may happen that for different reasons that few servers are offline. **Allocating servers involves finding those nodes that are offline and bringing them back online. In most cases just rebooting the nodes will bring them back online. Sometimes they may be offline for hardware malfunctions or maintenance. In that case someone would need to provide help, locally from the facility.
 
@@ -18,7 +18,7 @@
 
 Note: For example, for the Stanford cluster, the script should be located I’ve installed the ipmi-cmd.sh on node4.stanford.vicci.org. You should be able to reboot nodes from there.
 
-**Install Ubuntu**
+## Install Ubuntu
 
 Opencloud nodes are expected to be Ubuntu 14.x.
 
@@ -42,6 +42,14 @@
 
 After reboot, the machine should go through the Ubuntu installation automatically. At the end of the process, the ones registered as administrators should be notified of the successfully installation. If you’re not an official opencloud.us administrator, just try to log into the machines again after 20-30 mins form the reboot.
 
+3. Update Ubuntu
+
+```
+sudo apt-get update
+sudo apt-get dist-upgrade
+```
+
+
 **Install Openstack**
 
 Ansible is a software that enables easy centralized configuration and management of a set of machines.
diff --git a/enable-virt-dell.yml b/enable-virt-dell.yml
deleted file mode 100644
index 2e84c05..0000000
--- a/enable-virt-dell.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-# Enable virtualization on Dell R410s
----
-- hosts: onlab-compute
-  sudo: yes
-  tasks:
-  - name: Update files
-    copy: src=files/etc/apt/sources.list.d/linux.dell.com.sources.list
-      dest=/etc/apt/sources.list.d/linux.dell.com.sources.list
-
-  - shell: gpg --keyserver pool.sks-keyservers.net --recv-key 1285491434D8786F
-
-  - shell: gpg -a --export 1285491434D8786F | sudo apt-key add -
-
-  - name: Update apt cache
-    apt: upgrade=yes update_cache=yes
-
-  - name: Install packages
-    apt: name=dtk-scripts state=present
-
-  - name: Enable virtualization in BIOS
-    shell: /opt/dell/toolkit/bin/syscfg --virtualization=enable
diff --git a/filter_plugins/format_string.py b/filter_plugins/format_string.py
new file mode 100644
index 0000000..8210d89
--- /dev/null
+++ b/filter_plugins/format_string.py
@@ -0,0 +1,16 @@
+from jinja2.utils import soft_unicode
+
+def format_string(string, pattern):
+    """
+    formats the string with the value passed to it
+    basicaly the reverse order of standard "format()"
+    """
+    return soft_unicode(pattern) % (string)
+
+class FilterModule(object):
+
+    def filters(self):
+        return {
+            'format_string': format_string,
+        }
+
diff --git a/filter_plugins/genmac.py b/filter_plugins/genmac.py
new file mode 100644
index 0000000..1338703
--- /dev/null
+++ b/filter_plugins/genmac.py
@@ -0,0 +1,29 @@
+import hashlib
+import netaddr
+
+def genmac(value, prefix='', length=12):
+    '''
+    deterministically generates a "random" MAC with a configurable prefix
+    '''
+
+    # from: http://serverfault.com/questions/40712/what-range-of-mac-addresses-can-i-safely-use-for-my-virtual-machines
+    if prefix == '' :
+        mac_prefix = "0ac04d" # random "cord"-esque
+
+    # deterministically generate a value
+    h = hashlib.new('sha1')
+    h.update(value)
+
+    # build/trim MAC
+    mac_string = (mac_prefix + h.hexdigest())[0:length]
+
+    return netaddr.EUI(mac_string)
+
+class FilterModule(object):
+    ''' MAC generation filter '''
+    filter_map = {
+        'genmac': genmac,
+    }
+
+    def filters(self):
+         return self.filter_map
diff --git a/group_vars/arizona.yml b/group_vars/arizona.yml
new file mode 100644
index 0000000..d945d36
--- /dev/null
+++ b/group_vars/arizona.yml
@@ -0,0 +1,5 @@
+---
+# file: group_vars/arizona.yml
+
+mgmt_net_prefix: 192.168.102
+cloudlab: false
diff --git a/group_vars/cloudlab.yml b/group_vars/cloudlab.yml
new file mode 100644
index 0000000..340ac15
--- /dev/null
+++ b/group_vars/cloudlab.yml
@@ -0,0 +1,6 @@
+---
+# file: group_vars/cloudlab.yml
+
+mgmt_net_prefix: 192.168.100
+cloudlab: true
+
diff --git a/group_vars/cord-test.yml b/group_vars/cord-test.yml
new file mode 100644
index 0000000..9836d41
--- /dev/null
+++ b/group_vars/cord-test.yml
@@ -0,0 +1,4 @@
+---
+# file: group_vars/cord-test.yml
+
+
diff --git a/group_vars/cord.yml b/group_vars/cord.yml
new file mode 100644
index 0000000..3b1cf22
--- /dev/null
+++ b/group_vars/cord.yml
@@ -0,0 +1,3 @@
+---
+# file: group_vars/cord.yml
+
diff --git a/group_vars/princeton.yml b/group_vars/princeton.yml
new file mode 100644
index 0000000..7c1e6ba
--- /dev/null
+++ b/group_vars/princeton.yml
@@ -0,0 +1,6 @@
+---
+# file: group_vars/princeton.yml
+
+mgmt_net_prefix: 192.168.100
+cloudlab: false
+
diff --git a/group_vars/singapore.yml b/group_vars/singapore.yml
new file mode 100644
index 0000000..85ba602
--- /dev/null
+++ b/group_vars/singapore.yml
@@ -0,0 +1,5 @@
+---
+# file: group_vars/singapore.yml
+
+mgmt_net_prefix: 192.168.103
+cloudlab: false
diff --git a/group_vars/stanford.yml b/group_vars/stanford.yml
new file mode 100644
index 0000000..fee7f1c
--- /dev/null
+++ b/group_vars/stanford.yml
@@ -0,0 +1,6 @@
+---
+# file: group_vars/stanford.yml
+
+mgmt_net_prefix: 192.168.101
+cloudlab: false
+
diff --git a/inventory/arizona b/inventory/arizona
new file mode 100644
index 0000000..161f143
--- /dev/null
+++ b/inventory/arizona
@@ -0,0 +1,12 @@
+[all:vars]
+ansible_ssh_user=ubuntu
+
+[head]
+node1.cs.arizona.edu
+
+[compute]
+node2.cs.arizona.edu
+node3.cs.arizona.edu
+node4.cs.arizona.edu
+node5.cs.arizona.edu
+node6.cs.arizona.edu
diff --git a/inventory/aztest b/inventory/aztest
new file mode 100644
index 0000000..e8ff1fa
--- /dev/null
+++ b/inventory/aztest
@@ -0,0 +1,10 @@
+[all:vars]
+ansible_ssh_user=ubuntu
+
+[head]
+node09.opencloud.cs.arizona.edu
+
+[compute]
+node10.opencloud.cs.arizona.edu
+node11.opencloud.cs.arizona.edu
+node12.opencloud.cs.arizona.edu
diff --git a/inventory/cloudlab b/inventory/cloudlab
new file mode 100644
index 0000000..4dc377f
--- /dev/null
+++ b/inventory/cloudlab
@@ -0,0 +1,8 @@
+[all:vars]
+ansible_ssh_user=ubuntu
+
+[head]
+ctl.install.xos-pg0.clemson.cloudlab.us
+
+[compute]
+cp-1.install.xos-pg0.clemson.cloudlab.us
diff --git a/inventory/cord b/inventory/cord
new file mode 100644
index 0000000..1d94507
--- /dev/null
+++ b/inventory/cord
@@ -0,0 +1,25 @@
+[all:vars]
+ansible_ssh_user=cord
+
+[head]
+localhost connection=local
+
+[compute]
+node1
+node2
+node3
+
+[openstack]
+mysql
+rabbitmq-server
+keystone
+glance
+nova-cloud-controller
+openstack-dashboard
+ceilometer
+nagios
+neutron-api
+
+[openstack:vars]
+ansible_ssh_user=ubuntu
+
diff --git a/inventory/cord-test b/inventory/cord-test
new file mode 100644
index 0000000..29c1dc3
--- /dev/null
+++ b/inventory/cord-test
@@ -0,0 +1,24 @@
+[all:vars]
+ansible_ssh_user=ubuntu
+test_setup=true
+
+[head]
+localhost connection=local
+
+[compute]
+nova-compute
+
+[openstack]
+mysql
+rabbitmq-server
+keystone
+glance
+nova-cloud-controller
+openstack-dashboard
+ceilometer
+nagios
+neutron-api
+
+[openstack:vars]
+ansible_ssh_user=ubuntu
+
diff --git a/hawaii-hosts b/inventory/hawaii
similarity index 100%
rename from hawaii-hosts
rename to inventory/hawaii
diff --git a/inventory/princeton b/inventory/princeton
new file mode 100644
index 0000000..821d605
--- /dev/null
+++ b/inventory/princeton
@@ -0,0 +1,24 @@
+[all:vars]
+ansible_ssh_user=ubuntu
+
+[head]
+node70.princeton.vicci.org
+
+[compute]
+node37.princeton.vicci.org
+node39.princeton.vicci.org
+node41.princeton.vicci.org
+node43.princeton.vicci.org
+node45.princeton.vicci.org
+node49.princeton.vicci.org
+node51.princeton.vicci.org
+node52.princeton.vicci.org
+node54.princeton.vicci.org
+node55.princeton.vicci.org
+node57.princeton.vicci.org
+node59.princeton.vicci.org
+node65.princeton.vicci.org
+node66.princeton.vicci.org
+node67.princeton.vicci.org
+node68.princeton.vicci.org
+node69.princeton.vicci.org
diff --git a/inventory/singapore b/inventory/singapore
new file mode 100644
index 0000000..eb2164b
--- /dev/null
+++ b/inventory/singapore
@@ -0,0 +1,11 @@
+[all:vars]
+ansible_ssh_user=ubuntu
+
+[head]
+opencloud0.sing.internet2.edu
+
+[compute]
+opencloud1.sing.internet2.edu
+opencloud2.sing.internet2.edu
+opencloud3.sing.internet2.edu
+
diff --git a/inventory/single-localhost b/inventory/single-localhost
new file mode 100644
index 0000000..18e7e0d
--- /dev/null
+++ b/inventory/single-localhost
@@ -0,0 +1,6 @@
+[all:vars]
+ansible_ssh_user=ubuntu
+
+[head]
+localhost
+
diff --git a/inventory/stanford b/inventory/stanford
new file mode 100644
index 0000000..5b3068f
--- /dev/null
+++ b/inventory/stanford
@@ -0,0 +1,70 @@
+[all:vars]
+ansible_ssh_user=ubuntu
+
+[head]
+node1.stanford.vicci.org
+
+[compute]
+node2.stanford.vicci.org
+node3.stanford.vicci.org
+node5.stanford.vicci.org
+node6.stanford.vicci.org
+node7.stanford.vicci.org
+node8.stanford.vicci.org
+node9.stanford.vicci.org
+node10.stanford.vicci.org
+node11.stanford.vicci.org
+node12.stanford.vicci.org
+node13.stanford.vicci.org
+node14.stanford.vicci.org
+node15.stanford.vicci.org
+node16.stanford.vicci.org
+node17.stanford.vicci.org
+node18.stanford.vicci.org
+node19.stanford.vicci.org
+node20.stanford.vicci.org
+node21.stanford.vicci.org
+node22.stanford.vicci.org
+node23.stanford.vicci.org
+node24.stanford.vicci.org
+node25.stanford.vicci.org
+node26.stanford.vicci.org
+node27.stanford.vicci.org
+node28.stanford.vicci.org
+node29.stanford.vicci.org
+node30.stanford.vicci.org
+node31.stanford.vicci.org
+node32.stanford.vicci.org
+node33.stanford.vicci.org
+node34.stanford.vicci.org
+node35.stanford.vicci.org
+node37.stanford.vicci.org
+node38.stanford.vicci.org
+node39.stanford.vicci.org
+node40.stanford.vicci.org
+node41.stanford.vicci.org
+node42.stanford.vicci.org
+node43.stanford.vicci.org
+node44.stanford.vicci.org
+node45.stanford.vicci.org
+node46.stanford.vicci.org
+node47.stanford.vicci.org
+node48.stanford.vicci.org
+node49.stanford.vicci.org
+node50.stanford.vicci.org
+node52.stanford.vicci.org
+node54.stanford.vicci.org
+node55.stanford.vicci.org
+node57.stanford.vicci.org
+node58.stanford.vicci.org
+node59.stanford.vicci.org
+node60.stanford.vicci.org
+node61.stanford.vicci.org
+node62.stanford.vicci.org
+node63.stanford.vicci.org
+node64.stanford.vicci.org
+node67.stanford.vicci.org
+node68.stanford.vicci.org
+node69.stanford.vicci.org
+node70.stanford.vicci.org
+
diff --git a/unc-hosts b/inventory/unc
similarity index 100%
rename from unc-hosts
rename to inventory/unc
diff --git a/arizona-compute.yml b/legacy/arizona-compute.yml
similarity index 100%
rename from arizona-compute.yml
rename to legacy/arizona-compute.yml
diff --git a/arizona-hosts b/legacy/arizona-hosts
similarity index 100%
rename from arizona-hosts
rename to legacy/arizona-hosts
diff --git a/arizona-setup.yml b/legacy/arizona-setup.yml
similarity index 100%
rename from arizona-setup.yml
rename to legacy/arizona-setup.yml
diff --git a/cloudlab-hosts b/legacy/cloudlab-hosts
similarity index 100%
rename from cloudlab-hosts
rename to legacy/cloudlab-hosts
diff --git a/cord-hosts b/legacy/cord-hosts
similarity index 100%
rename from cord-hosts
rename to legacy/cord-hosts
diff --git a/cord-post-install.yml b/legacy/cord-post-install.yml
similarity index 100%
rename from cord-post-install.yml
rename to legacy/cord-post-install.yml
diff --git a/cord-test-hosts b/legacy/cord-test-hosts
similarity index 100%
rename from cord-test-hosts
rename to legacy/cord-test-hosts
diff --git a/hawaii-compute.yml b/legacy/hawaii-compute.yml
similarity index 100%
rename from hawaii-compute.yml
rename to legacy/hawaii-compute.yml
diff --git a/hawaii-setup.yml b/legacy/hawaii-setup.yml
similarity index 100%
rename from hawaii-setup.yml
rename to legacy/hawaii-setup.yml
diff --git a/princeton-hosts b/legacy/princeton-hosts
similarity index 100%
rename from princeton-hosts
rename to legacy/princeton-hosts
diff --git a/princeton-setup.yml b/legacy/princeton-setup.yml
similarity index 100%
rename from princeton-setup.yml
rename to legacy/princeton-setup.yml
diff --git a/singapore-compute.yml b/legacy/singapore-compute.yml
similarity index 100%
rename from singapore-compute.yml
rename to legacy/singapore-compute.yml
diff --git a/singapore-hosts b/legacy/singapore-hosts
similarity index 100%
rename from singapore-hosts
rename to legacy/singapore-hosts
diff --git a/singapore-setup.yml b/legacy/singapore-setup.yml
similarity index 100%
rename from singapore-setup.yml
rename to legacy/singapore-setup.yml
diff --git a/stanford-compute.yml b/legacy/stanford-compute.yml
similarity index 100%
rename from stanford-compute.yml
rename to legacy/stanford-compute.yml
diff --git a/stanford-hosts b/legacy/stanford-hosts
similarity index 100%
rename from stanford-hosts
rename to legacy/stanford-hosts
diff --git a/stanford-setup.yml b/legacy/stanford-setup.yml
similarity index 100%
rename from stanford-setup.yml
rename to legacy/stanford-setup.yml
diff --git a/tasks/vm-ips.yml b/legacy/tasks/vm-ips.yml
similarity index 100%
rename from tasks/vm-ips.yml
rename to legacy/tasks/vm-ips.yml
diff --git a/unc-compute.yml b/legacy/unc-compute.yml
similarity index 100%
rename from unc-compute.yml
rename to legacy/unc-compute.yml
diff --git a/unc-setup.yml b/legacy/unc-setup.yml
similarity index 100%
rename from unc-setup.yml
rename to legacy/unc-setup.yml
diff --git a/library/juju_facts.py b/library/juju_facts.py
new file mode 100644
index 0000000..3ba02c8
--- /dev/null
+++ b/library/juju_facts.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+
+import json
+import subprocess
+
+def dict_keys_dash_to_underscore(dashed):
+    underscored = dict((k.replace('-','_'),v) for k,v in dashed.items())
+    return underscored
+
+juju_status_json = subprocess.check_output("juju status --format=json", shell=True)
+juju_status = json.loads(juju_status_json)
+
+juju_machines = {}
+for index, data in juju_status['machines'].iteritems():
+    data_underscore = dict_keys_dash_to_underscore(data)
+    juju_machines[data_underscore["dns_name"]] = data_underscore
+    juju_machines[data_underscore["dns_name"]]["machine_id"] = index
+
+juju_compute_nodes = {}
+if 'nova-compute' in juju_status['services']:
+    for name, data in juju_status['services']['nova-compute']['units'].iteritems():
+        juju_compute_nodes[data['public-address']] = data
+
+print json.dumps({
+    "changed": True,
+    "ansible_facts" : {
+        "juju_environment": juju_status['environment'],
+        "juju_machines": juju_machines,
+        "juju_services": juju_status['services'],
+        "juju_compute_nodes": juju_compute_nodes,
+    },
+})
+
diff --git a/opencloud-multi-playbook.yml b/opencloud-multi-playbook.yml
new file mode 100644
index 0000000..e3d9428
--- /dev/null
+++ b/opencloud-multi-playbook.yml
@@ -0,0 +1,49 @@
+---
+# aztest playbook, for installing an OpenCloud site
+
+- name: Include Configuration
+  hosts: all
+  tasks:
+  - include_vars: vars/opencloud_defaults.yml
+  - include_vars: vars/aztest.yml
+  - include_vars: vars/aztest_keystone.yml
+
+- name: Prep systems, and enable virtualization
+  hosts: all
+  become: yes
+  roles:
+    - common-prep
+    - dell-virt
+
+- name: DNS Server Setup
+  hosts: head
+  become: yes
+  roles:
+    - dns-nsd
+    - dns-unbound
+
+- name: Configure all hosts to use DNS server
+  hosts: all
+  become: yes
+  roles:
+    - dns-configure
+
+- name: Configure head node, create VM's, and start Juju setup
+  hosts: head
+  roles:
+    - { role: head-prep, become: yes }
+    - { role: config-virt, become: yes }
+    - juju-user-prep
+    - juju-setup
+
+- name: Configure compute nodes
+  hosts: compute
+  become: yes
+  roles:
+    - compute-prep
+
+- name: Configure Openstack using Juju
+  hosts: head
+  roles:
+    - juju-openstack-config
+
diff --git a/roles/apt-cacher-ng/defaults/main.yml b/roles/apt-cacher-ng/defaults/main.yml
new file mode 100644
index 0000000..84d55a4
--- /dev/null
+++ b/roles/apt-cacher-ng/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# apt-cacher-ng/defaults/main.yml
+
+apt_cacher_name: 127.0.0.1
+apt_cacher_port: 3142
+
diff --git a/roles/apt-cacher-ng/handlers/main.yml b/roles/apt-cacher-ng/handlers/main.yml
new file mode 100644
index 0000000..cdc9735
--- /dev/null
+++ b/roles/apt-cacher-ng/handlers/main.yml
@@ -0,0 +1,7 @@
+---
+# apt-cacher-ng/handlers/main.yml
+
+- name: restart apt-cacher-ng
+  service:
+    name=apt-cacher-ng
+    state=restarted
diff --git a/roles/apt-cacher-ng/tasks/main.yml b/roles/apt-cacher-ng/tasks/main.yml
new file mode 100644
index 0000000..a8b9d90
--- /dev/null
+++ b/roles/apt-cacher-ng/tasks/main.yml
@@ -0,0 +1,27 @@
+---
+# roles/apt-cacher-ng/tasks
+
+- name: Install apt-cacher-ng with apt
+  apt:
+    name={{ item }}
+    update_cache=yes
+    cache_valid_time=3600
+  with_items:
+    - apt-cacher-ng
+
+- name: Configure apt-cacher-ng to pass through ssl repos
+  when: apt_ssl_sites is defined
+  lineinfile:
+    'dest=/etc/apt-cacher-ng/acng.conf
+    insertafter="^# PassThroughPattern"
+    regexp="^PassThroughPattern"
+    line="PassThroughPattern: ({{ apt_ssl_sites | join(\"|\")}}):443$"'
+  notify:
+    restart apt-cacher-ng
+
+- name: Configure local system to use apt-cacher-ng
+  template:
+    src=02apt-cacher-ng.j2
+    dest=/etc/apt/apt.conf.d/02apt-cacher-ng
+    mode=0644 owner=root group=root
+
diff --git a/roles/apt-cacher-ng/templates/02apt-cacher-ng.j2 b/roles/apt-cacher-ng/templates/02apt-cacher-ng.j2
new file mode 100644
index 0000000..46740fb
--- /dev/null
+++ b/roles/apt-cacher-ng/templates/02apt-cacher-ng.j2
@@ -0,0 +1 @@
+Acquire::http { Proxy "http://{{ apt_cacher_name }}:{{ apt_cacher_port }}"; };
diff --git a/roles/cloudlab-prep/defaults/main.yml b/roles/cloudlab-prep/defaults/main.yml
new file mode 100644
index 0000000..1b3f00e
--- /dev/null
+++ b/roles/cloudlab-prep/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# roles/cloudlab-prep/defaults/main.yml
+
+cloudlab_extrafs:
+  - /var/lib/uvtool/libvirt/images
+  #  - /var/lib/nova
diff --git a/roles/cloudlab-prep/tasks/main.yml b/roles/cloudlab-prep/tasks/main.yml
new file mode 100644
index 0000000..5dc9c2c
--- /dev/null
+++ b/roles/cloudlab-prep/tasks/main.yml
@@ -0,0 +1,16 @@
+---
+# roles/cloudlab-prep/tasks/main.yml
+
+- name: create directories to mount extra filesystems
+  file:
+    dest={{ item }}
+    state=directory
+  with_items: "{{ cloudlab_extrafs }}"
+
+
+# FIXME: does not create lost+found, -q makes it fail without error...
+- name: Set up extra disk space
+  command: /usr/testbed/bin/mkextrafs -qf {{ item }}
+    creates={{ item }}/lost+found
+  with_items: "{{ cloudlab_extrafs }}"
+
diff --git a/roles/common-prep/handlers/main.yml b/roles/common-prep/handlers/main.yml
new file mode 100644
index 0000000..537ccb3
--- /dev/null
+++ b/roles/common-prep/handlers/main.yml
@@ -0,0 +1,19 @@
+---
+# file: roles/common-prep/handlers/main.yml
+
+# from https://support.ansible.com/hc/en-us/articles/201958037-Reboot-a-server-and-wait-for-it-to-come-back
+- name: restart host
+  shell: sleep 2 && shutdown -r now "Ansible updates triggered"
+  async: 1
+  poll: 0
+  ignore_errors: true
+
+# wait 1m, then try to contact machine for 10m
+- name: wait for host
+  become: false
+  local_action:
+     wait_for host={{ inventory_hostname }}
+     port=22
+     delay=60 timeout=600
+     state=started
+
diff --git a/roles/common-prep/tasks/main.yml b/roles/common-prep/tasks/main.yml
new file mode 100644
index 0000000..409a179
--- /dev/null
+++ b/roles/common-prep/tasks/main.yml
@@ -0,0 +1,35 @@
+---
+# file: roles/common-prep/tasks/main.yml
+
+- name: Upgrade system to current using apt
+  apt:
+    upgrade=dist
+    update_cache=yes
+    cache_valid_time=3600
+
+- stat:
+    path=/var/run/reboot-required
+  register: reboot-required
+
+- name: reboot if required
+  when: reboot-required.exists is defined
+  debug: msg="System will reboot"
+  notify:
+    - restart host
+    - wait for host
+
+- name: Install standard packages
+  apt:
+    pkg={{ item }}
+    state=present
+    update_cache=yes
+    cache_valid_time=3600
+  with_items:
+   - tmux
+   - vim
+
+- name: Enable vim syntax highlighting
+  lineinfile: dest=/etc/vim/vimrc
+    regexp="^\s*syntax on"
+    line="syntax on"
+
diff --git a/roles/compute-prep/handlers/main.yml b/roles/compute-prep/handlers/main.yml
new file mode 100644
index 0000000..eee1556
--- /dev/null
+++ b/roles/compute-prep/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+# file: roles/compute-prep/handlers/main.yml
+
+- name: run rc.local
+  command: /etc/rc.local
diff --git a/roles/compute-prep/tasks/main.yml b/roles/compute-prep/tasks/main.yml
new file mode 100644
index 0000000..a62305f
--- /dev/null
+++ b/roles/compute-prep/tasks/main.yml
@@ -0,0 +1,30 @@
+---
+# file: roles/compute-prep/tasks/main.yml
+
+- name: Install packages
+  apt:
+    name={{ item }}
+    state=latest
+    update_cache=yes
+    cache_valid_time=3600
+  with_items:
+    - python-yaml
+
+- name: Add head node ubuntu user key
+  authorized_key:
+    user=ubuntu
+    key="{{ hostvars[groups['head'][0]]['sshkey']['stdout'] }}"
+
+- name: Add route via /etc/rc.local
+  template:
+    src=rc.local.j2
+    dest=/etc/rc.local
+    mode=0755
+  notify:
+    - run rc.local
+
+- name: Create /var/lib/nova dir
+  file:
+    path=/var/lib/nova
+    state=directory
+
diff --git a/roles/compute-prep/templates/rc.local.j2 b/roles/compute-prep/templates/rc.local.j2
new file mode 100644
index 0000000..85d9dec
--- /dev/null
+++ b/roles/compute-prep/templates/rc.local.j2
@@ -0,0 +1,19 @@
+#!/bin/sh -e
+#
+# rc.local
+#
+# This script is executed at the end of each multiuser runlevel.
+# Make sure that the script will "exit 0" on success or any other
+# value on error.
+#
+# In order to enable or disable this script just change the execution
+# bits.
+
+{% set head_host = groups['head'][0] -%}
+{% set head_ip = hostvars[head_host]['ansible_default_ipv4']['address'] -%}
+{% set virt_network = virt_nets[0]['ipv4_prefix'] ~ '.0' -%}
+{% set virt_netmask = "255.255.255.0" -%}
+
+route add -net {{ virt_network }} netmask {{ virt_netmask }} gw {{ head_ip }} || true
+
+exit 0
diff --git a/roles/config-virt/defaults/main.yml b/roles/config-virt/defaults/main.yml
new file mode 100644
index 0000000..a134d20
--- /dev/null
+++ b/roles/config-virt/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# roles/config-virt/defaults/main.yml
+
+virt_nets:
+  - mgmtbr
+
diff --git a/roles/config-virt/handlers/main.yml b/roles/config-virt/handlers/main.yml
new file mode 100644
index 0000000..325f21d
--- /dev/null
+++ b/roles/config-virt/handlers/main.yml
@@ -0,0 +1,11 @@
+---
+# roles/config-virt-net/handlers/tasks.yml
+
+- name: reload libvirt-bin
+  service:
+    name=libvirt-bin
+    state=restarted
+
+- name: run qemu hook
+  command: /etc/libvirt/hooks/qemu start start
+
diff --git a/roles/config-virt/tasks/main.yml b/roles/config-virt/tasks/main.yml
new file mode 100644
index 0000000..da7b239
--- /dev/null
+++ b/roles/config-virt/tasks/main.yml
@@ -0,0 +1,68 @@
+---
+# roles/config-virt/tasks/main.yml
+
+- name: Get ubuntu image for uvtool
+  command: uvt-simplestreams-libvirt sync --source http://cloud-images.ubuntu.com/daily \
+    release={{ ansible_distribution_release }} arch=amd64
+  async: 1200
+  poll: 0
+  register: uvt_sync
+
+- name: collect libvirt network facts
+  virt_net:
+    command=facts
+
+- name: Tear down libvirt's default network
+  when: ansible_libvirt_networks["default"] is defined
+  virt_net:
+    command={{ item }}
+    name=default
+  with_items:
+    - destroy
+    - undefine
+
+# note, this isn't idempotent, so may need manual fixing if it changes
+- name: define libvirt networks IP/DHCP/DNS settings
+  virt_net:
+    name=xos-{{ item.name }}
+    command=define
+    xml='{{ lookup("template", "virt_net.xml.j2") }}'
+  with_items: '{{ virt_nets }}'
+
+- name: collect libvirt network facts after defining new network
+  virt_net:
+    command=facts
+
+- name: start libvirt networks
+  when: ansible_libvirt_networks["xos-{{ item.name }}"].state != "active"
+  virt_net:
+    name=xos-{{ item.name }}
+    command=create
+  with_items: '{{ virt_nets }}'
+
+- name: have libvirt networks autostart
+  when: ansible_libvirt_networks["xos-{{ item.name }}"].autostart != "yes"
+  virt_net:
+    name=xos-{{ item.name }}
+    autostart=yes
+  with_items: '{{ virt_nets }}'
+
+- name: Have libvirt enable port forwarding to VM's
+  become: yes
+  template:
+    src={{ item }}.j2
+    dest=/etc/libvirt/hooks/{{ item }}
+    mode=0755 owner=root
+  with_items:
+    - daemon
+    - qemu
+  notify:
+    - reload libvirt-bin
+    - run qemu hook
+
+- name: Wait for uvt-kvm image to be available
+  async_status: jid={{ uvt_sync.ansible_job_id }}
+  register: uvt_sync_result
+  until: uvt_sync_result.finished
+  delay: 10
+  retries: 120
diff --git a/roles/config-virt/templates/daemon.j2 b/roles/config-virt/templates/daemon.j2
new file mode 100644
index 0000000..852aef6
--- /dev/null
+++ b/roles/config-virt/templates/daemon.j2
@@ -0,0 +1,40 @@
+#!/bin/sh
+
+SHELL="/bin/bash"
+
+NIC=$( route|grep default|awk '{print $NF}' )
+
+NAME="${1}"
+OP="${2}"
+SUBOP="${3}"
+ARGS="${4}"
+
+add_port_fwd_rule() {
+    DPORT=$1
+    VMIP=$2
+    TOPORT=$3
+
+    iptables -t nat -C PREROUTING -p tcp -i $NIC --dport $DPORT -j DNAT --to-destination $VMIP:$TOPORT
+    if [ "$?" -ne 0 ]
+    then
+        iptables -t nat -A PREROUTING -p tcp -i $NIC --dport $DPORT -j DNAT --to-destination $VMIP:$TOPORT
+    fi
+}
+
+if [ "$OP" = "start" ] || [ "$OP" = "reload" ]
+then
+    iptables -t nat -F
+
+{% for vm in head_vm_list -%}
+{% if vm.forwarded_ports is defined -%}
+{% set vm_net = ( virt_nets | selectattr("head_vms", "defined") | first ) %}
+{% for port in vm.forwarded_ports -%}
+    add_port_fwd_rule {{ port.ext }} "{{ vm_net.ipv4_prefix }}.{{ vm.ipv4_last_octet }}" {{ port.int }}
+{% endfor -%}
+{% endif -%}
+{% endfor -%}
+
+    # Also flush the filter table before rules re-added
+    iptables -F
+fi
+
diff --git a/roles/config-virt/templates/qemu.j2 b/roles/config-virt/templates/qemu.j2
new file mode 100644
index 0000000..9d20379
--- /dev/null
+++ b/roles/config-virt/templates/qemu.j2
@@ -0,0 +1,46 @@
+#!/bin/sh
+
+SHELL="/bin/bash"
+
+NIC=$( route|grep default|awk '{print $NF}' )
+PORTAL=$( dig +short portal.opencloud.us | tail -1 )
+
+SUBNET=$( ip addr show $NIC|grep "inet "|awk '{print $2}' )
+{% set vm_net = ( virt_nets | selectattr("head_vms", "defined") | first ) %}
+PRIVATENET=$( ip addr show {{ vm_net.name }} |grep "inet "|awk '{print $2}' )
+
+NAME="${1}"
+OP="${2}"
+SUBOP="${3}"
+ARGS="${4}"
+
+add_rule() {
+    CHAIN=$1
+    ARGS=$2
+    iptables -C $CHAIN $ARGS
+    if [ "$?" -ne 0 ]
+    then
+        iptables -I $CHAIN 1 $ARGS
+    fi
+}
+
+add_local_access_rules() {
+    add_rule "FORWARD" "-s $SUBNET -j ACCEPT"
+    # Don't NAT traffic from service VMs destined to the local subnet
+    add_rule "POSTROUTING" "-t nat -s $PRIVATENET -d $SUBNET -j RETURN"
+}
+
+add_portal_access_rules() {
+    add_rule "FORWARD" "-s $PORTAL -j ACCEPT"
+}
+
+add_web_access_rules() {
+    add_rule "FORWARD" "-p tcp --dport 80 -j ACCEPT"
+}
+
+if [ "$OP" = "start" ]
+then
+	add_local_access_rules
+	add_portal_access_rules
+	add_web_access_rules
+fi
diff --git a/roles/config-virt/templates/virt_net.xml.j2 b/roles/config-virt/templates/virt_net.xml.j2
new file mode 100644
index 0000000..ad043e9
--- /dev/null
+++ b/roles/config-virt/templates/virt_net.xml.j2
@@ -0,0 +1,28 @@
+<network>
+  <name>xos-{{ item.name }}</name>
+  <bridge name="{{ item.name }}"/>
+  <forward/>
+  <domain name="{{ site_suffix }}" localonly="no"/>
+  <dns>
+{% if unbound_listen_on_default %}
+{% for host in groups['head'] %}
+  <forwarder addr="{{ hostvars[host].ansible_default_ipv4.address }}"/>
+{% endfor %}
+{% endif %}
+{% if dns_servers is defined %}
+{% for ns in dns_servers %}
+  <forwarder addr="{{ ns }}"/>
+{% endfor %}
+{% endif %}
+  </dns>
+  <ip address="{{ item.ipv4_prefix }}.1" netmask="255.255.255.0">
+    <dhcp>
+      <range start="{{ item.ipv4_prefix }}.2" end="{{ item.ipv4_prefix }}.254"/>
+{% if item.head_vms %}
+{% for vm in head_vm_list %}
+      <host name='{{ vm.name }}' ip='{{ item.ipv4_prefix }}.{{ vm.ipv4_last_octet }}'/>
+{% endfor %}
+{% endif %}
+    </dhcp>
+  </ip>
+</network>
diff --git a/roles/create-vms/files/docker-install-playbook.yml b/roles/create-vms/files/docker-install-playbook.yml
new file mode 100644
index 0000000..23703af
--- /dev/null
+++ b/roles/create-vms/files/docker-install-playbook.yml
@@ -0,0 +1,60 @@
+---
+# Installs docker with apt, docker-compose with pip, adds user to group
+# Must be run as root
+
+- hosts: docker
+  remote_user: ubuntu
+  become: yes
+
+  tasks:
+
+    # https://docs.docker.com/engine/installation/linux/ubuntulinux/
+    - name: Prereqs and SSL support for apt for SSL
+      apt:
+        name={{ item }}
+        update_cache=yes
+        cache_valid_time=3600
+      with_items:
+        - apt-transport-https
+        - ca-certificates
+        - python-pip
+
+    - name: Trust docker apt key
+      apt_key:
+        keyserver=pool.sks-keyservers.net
+        id=58118E89F3A912897C070ADBF76221572C52609D
+
+    - name: Add docker apt repo
+      apt_repository:
+        repo="deb https://apt.dockerproject.org/repo ubuntu-trusty main"
+
+    - name: Install docker
+      apt:
+        update_cache=yes
+        cache_valid_time=3600
+        name=docker-engine
+
+    - name: Install docker-compose from web
+      get_url:
+        url=https://github.com/docker/compose/releases/download/1.7.1/docker-compose-Linux-x86_64
+        checksum=sha256:3166bb74bc648e68c3154bc704fddf6bccf59f03a0c90fc48aefac034535e4ae
+        dest=/usr/local/bin/docker-compose
+        owner=root mode=0755
+
+    # This installs a bunch of prereqs that currently breaks SSL and CA's
+    # https://docs.docker.com/compose/install/#install-using-pip
+    #- name: Install docker-compose from PyPi
+    #  pip:
+    #    name=docker-compose
+
+    - name: Make ubuntu user part of the Docker group
+      user:
+        name="ubuntu"
+        groups="docker"
+        append=yes
+
+    - name: restart Docker daemon to get new group membership
+      service:
+        name=docker
+        state=restarted
+
diff --git a/roles/create-vms/tasks/main.yml b/roles/create-vms/tasks/main.yml
new file mode 100644
index 0000000..7be6c92
--- /dev/null
+++ b/roles/create-vms/tasks/main.yml
@@ -0,0 +1,64 @@
+---
+# file: create-vms/tasks/main.yml
+
+- name: create Virtual Machines with uvt-kvm
+  shell: uvt-kvm create {{ item.name }} release={{ ansible_distribution_release }} \
+    --cpu={{ item.cpu }} --memory={{ item.memMB }} --disk={{ item.diskGB }} --bridge="mgmtbr"
+    creates=/var/lib/uvtool/libvirt/images/{{ item.name }}.qcow
+  with_items: "{{ head_vm_list }}"
+
+- name: Have VMs autostart on reboot
+  become: yes
+  virt:
+    name={{ item.name }}
+    command=autostart
+  with_items: "{{ head_vm_list }}"
+
+- name: wait for VM's to come up
+  wait_for:
+    host={{ item.name }}
+    port=22
+  with_items: "{{ head_vm_list }}"
+
+- name: Create /etc/ansible/hosts file
+  become: yes
+  template:
+    src=ansible_hosts.j2
+    dest=/etc/ansible/hosts
+
+- name: Verify that we can log into every VM
+  command: ansible services -m ping -u ubuntu
+
+- name: Have VM's use the apt-cache
+  command: ansible services -b -u ubuntu -m lineinfile -a "dest=/etc/apt/apt.conf.d/02apt-cacher-ng create=yes mode=0644 owner=root group=root regexp='^Acquire' line='Acquire::http { Proxy \"http://{{ apt_cacher_name }}:{{ apt_cacher_port | default('3142') }}\"; };'"
+
+- name: Update software in all the VMs
+  command: ansible services -m apt -b -u ubuntu -a "upgrade=dist update_cache=yes cache_valid_time=3600"
+
+- name: Create VM's eth0 interface config file for DNS config via resolvconf program
+  template:
+    src=eth0.cfg.j2
+    dest={{ ansible_user_dir }}/eth0.cfg
+
+- name: Copy eth0 interface config file to all VMs
+  command: ansible services -b -u ubuntu -m copy -a "src={{ ansible_user_dir }}/eth0.cfg dest=/etc/network/interfaces.d/eth0.cfg owner=root group=root mode=0644"
+
+- name: Restart eth0 interface on all VMs
+  command: ansible services -b -u ubuntu -m shell -a "ifdown eth0 ; ifup eth0"
+
+- name: Verify that we can log into every VM after restarting network interfaces
+  command: ansible services -m ping -u ubuntu
+
+# sshkey is registered in head-prep task
+- name: Enable root ssh login on VM's that require it
+  command: ansible {{ item.name }} -b -u ubuntu -m authorized_key -a "user='root' key='{{ sshkey.stdout }}'"
+  with_items: "{{ head_vm_list | selectattr('root_ssh_login', 'defined') | list }}"
+
+- name: Copy over docker installation playbook
+  copy:
+    src="docker-install-playbook.yml"
+    dest="{{ ansible_user_dir }}/docker-install-playbook.yml"
+
+- name: Install docker in VM's that require it
+  command: ansible-playbook "{{ ansible_user_dir }}/docker-install-playbook.yml"
+
diff --git a/roles/create-vms/templates/ansible_hosts.j2 b/roles/create-vms/templates/ansible_hosts.j2
new file mode 100644
index 0000000..fdf6eae
--- /dev/null
+++ b/roles/create-vms/templates/ansible_hosts.j2
@@ -0,0 +1,13 @@
+[localhost]
+127.0.0.1 hostname={{ ansible_fqdn }}
+
+[services]
+{% for vm in head_vm_list -%}
+{{ vm.name }}
+{% endfor -%}
+
+[docker]
+{% for vm in head_vm_list | selectattr('docker_path', 'defined') -%}
+{{ vm.name }}
+{% endfor -%}
+
diff --git a/roles/create-vms/templates/eth0.cfg.j2 b/roles/create-vms/templates/eth0.cfg.j2
new file mode 100644
index 0000000..94c1062
--- /dev/null
+++ b/roles/create-vms/templates/eth0.cfg.j2
@@ -0,0 +1,12 @@
+# The primary network interface
+auto eth0
+iface eth0 inet dhcp
+{% if unbound_listen_on_default %}
+    dns-nameservers{% for host in groups['head'] %} {{ hostvars[host].ansible_default_ipv4.address }}{% endfor %} 
+{% endif %}
+{% if dns_servers is defined %}
+    dns-nameservers{% for ns in dns_servers %} {{ ns }}{% endfor %} 
+{% endif %}
+{% if dns_search is defined %}
+    dns-search{% for searchdom in dns_search %} {{ searchdom }}{% endfor %}
+{% endif %}
diff --git a/roles/dell-virt/tasks/main.yml b/roles/dell-virt/tasks/main.yml
new file mode 100644
index 0000000..cfc60a6
--- /dev/null
+++ b/roles/dell-virt/tasks/main.yml
@@ -0,0 +1,28 @@
+---
+# file: roles/dell-virt/prep/tasks/main.yml
+
+- name: Trust the Dell apt repository
+  apt_key:
+    keyserver=pool.sks-keyservers.net
+    id=1285491434D8786F
+
+- name: Add Dell apt repo
+  apt_repository:
+    repo="deb http://linux.dell.com/repo/community/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} openmanage"
+
+- name: Install BIOS configuration scripts
+  apt:
+    update_cache=yes
+    cache_valid_time=3600
+    name=dtk-scripts
+
+# KVM virtualization is either "kvm_intel" or "kvm_amd" on amd64
+- name: Check to see if processor KVM virtualization module loaded in kernel
+  shell: lsmod | grep kvm_
+  ignore_errors: true
+  register: virtualization_enabled
+
+- name: Enable virtualization in BIOS
+  command: /opt/dell/toolkit/bin/syscfg --virtualization=enable
+  when: virtualization_enabled|failed
+
diff --git a/roles/dns-configure/defaults/main.yml b/roles/dns-configure/defaults/main.yml
new file mode 100644
index 0000000..defbf98
--- /dev/null
+++ b/roles/dns-configure/defaults/main.yml
@@ -0,0 +1,11 @@
+---
+# roles/dns-configure/defaults
+
+# Define this to set dns servers manually
+#dns_servers:
+#  - 8.8.8.8
+#  - 8.8.4.4
+
+# Set this to search domain suffixes
+# dns_search: {}
+
diff --git a/roles/dns-configure/tasks/main.yml b/roles/dns-configure/tasks/main.yml
new file mode 100644
index 0000000..7aff9f4
--- /dev/null
+++ b/roles/dns-configure/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+# roles/dns-configure/tasks.yml
+
+- name: Configure resolv.conf to use nameservers
+  template:
+    src="resolv.conf.j2"
+    dest="/etc/resolv.conf"
+    mode=0644 owner=root group=root
+
diff --git a/roles/dns-configure/templates/resolv.conf.j2 b/roles/dns-configure/templates/resolv.conf.j2
new file mode 100644
index 0000000..27d8ec7
--- /dev/null
+++ b/roles/dns-configure/templates/resolv.conf.j2
@@ -0,0 +1,14 @@
+# resolv.conf (ansible managed)
+{% if unbound_listen_on_default %}
+{% for host in groups['head'] %}
+nameserver {{ hostvars[host].ansible_default_ipv4.address }}
+{% endfor %}
+{% endif %}
+{% if dns_servers is defined %}
+{% for ns in dns_servers %}
+nameserver {{ ns }}
+{% endfor %}
+{% endif %}
+{% if dns_search is defined %}
+search{% for searchdom in dns_search %} {{ searchdom }}{% endfor %}
+{% endif %}
diff --git a/roles/dns-nsd/defaults/main.yml b/roles/dns-nsd/defaults/main.yml
new file mode 100644
index 0000000..5dcbddf
--- /dev/null
+++ b/roles/dns-nsd/defaults/main.yml
@@ -0,0 +1,10 @@
+---
+
+nsd_ip: 127.0.0.1
+nsd_conf: "/var/nsd/etc/nsd.conf"
+nsd_zonesdir: "/var/nsd/zones"
+nsd_group: "nsd"
+
+# default DNS TTL
+dns_ttl: 3600
+
diff --git a/roles/dns-nsd/handlers/main.yml b/roles/dns-nsd/handlers/main.yml
new file mode 100644
index 0000000..559cc55
--- /dev/null
+++ b/roles/dns-nsd/handlers/main.yml
@@ -0,0 +1,8 @@
+---
+#file: roles/dns-nsd/handlers/main.yml
+
+- name: reload-nsd
+  service: name=nsd state=reloaded
+
+- name: restart-nsd
+  service: name=nsd state=restarted
diff --git a/roles/dns-nsd/tasks/main.yml b/roles/dns-nsd/tasks/main.yml
new file mode 100644
index 0000000..0df2533
--- /dev/null
+++ b/roles/dns-nsd/tasks/main.yml
@@ -0,0 +1,42 @@
+---
+#file: roles/dns-nsd/tasks/main.yml
+
+# OS specific vars
+- include_vars: "{{ ansible_os_family }}.yml"
+
+# Debian specific installation
+- include: nsd-Debian.yml
+  when: ansible_os_family == 'Debian'
+
+- name: Ensure that zones directory exists
+  file:
+    name={{ nsd_zonesdir }}
+    state=directory
+    mode=0755 owner=root group={{ nsd_group }}
+
+- name: Create nsd.conf from template
+  template:
+    src=nsd.conf.j2
+    dest={{ nsd_conf }}
+    mode=0644 owner=root group={{ nsd_group }}
+  notify:
+    - restart-nsd
+
+- name: create forward zonefiles from template
+  template:
+    src=zone.forward.j2
+    dest={{ nsd_zonesdir }}/{{ item.name }}.forward
+    mode=0644 owner=root group={{ nsd_group }}
+  with_items: '{{ nsd_zones }}'
+  notify:
+    - reload-nsd
+
+- name: create reverse zonefiles from template
+  template:
+    src=zone.reverse.j2
+    dest={{ nsd_zonesdir }}/{{ item.name }}.reverse
+    mode=0644 owner=root group={{ nsd_group }}
+  with_items: '{{ nsd_zones }}'
+  notify:
+    - reload-nsd
+
diff --git a/roles/dns-nsd/tasks/nsd-Debian.yml b/roles/dns-nsd/tasks/nsd-Debian.yml
new file mode 100644
index 0000000..ca31790
--- /dev/null
+++ b/roles/dns-nsd/tasks/nsd-Debian.yml
@@ -0,0 +1,11 @@
+---
+
+- name: Install nsd
+  apt:
+    name={{ item }}
+    state=present
+    update_cache=yes
+    cache_valid_time=3600
+  with_items:
+    - nsd
+
diff --git a/roles/dns-nsd/templates/nsd.conf.j2 b/roles/dns-nsd/templates/nsd.conf.j2
new file mode 100644
index 0000000..29ba41a
--- /dev/null
+++ b/roles/dns-nsd/templates/nsd.conf.j2
@@ -0,0 +1,28 @@
+# nsd.conf
+# configured by Ansible!
+
+server:
+  hide-version: yes
+## bind to a specific address/port
+  ip-address: {{ nsd_ip }}
+## port number
+  port: {{ nsd_port|default(53) }} 
+  server-count: 1
+  ip4-only: yes
+  zonesdir: {{ nsd_zonesdir }}
+
+remote-control:
+  control-enable: yes
+
+# zones to load
+{% for zone in nsd_zones %}
+zone:
+  name: {{ zone.name }}
+  zonefile: {{ zone.name }}.forward
+
+zone:
+  name: {{ (zone.ipv4_first_octets ~ ".0") | ipaddr('revdns') | regex_replace('^0\.','') }} 
+  zonefile: {{ zone.name }}.reverse
+
+{% endfor %}
+
diff --git a/roles/dns-nsd/templates/zone.forward.j2 b/roles/dns-nsd/templates/zone.forward.j2
new file mode 100644
index 0000000..895d8a3
--- /dev/null
+++ b/roles/dns-nsd/templates/zone.forward.j2
@@ -0,0 +1,40 @@
+;## NSD authoritative only DNS
+;## FORWARD Zone
+
+$ORIGIN {{ item.name }}. ; default zone domain
+$TTL {{ item.ttl | default(dns_ttl) }} ; default time to live
+
+@ IN SOA {{ item.soa }}.{{ item.name }}. admin.{{ item.name }}. (
+         {{ item.serial | default(ansible_date_time.epoch) }}   ; Serial, must be incremented every time you change this file
+         3600        ; Refresh [1hr]
+         600         ; Retry [10m]
+         3600        ; Expire [1hr]
+         60          ; Min TTL [1m]
+         )
+
+; Name Servers
+{% for ns in item.ns %}
+        IN      NS      {{ ns.name ~ '.' ~ item.name }}.
+{% endfor %}
+
+;A and CNAME records
+{% if name_on_public_interface is defined %}
+{{ name_on_public_interface }}    IN    A    {{ ansible_default_ipv4.address }}
+{% endif %}
+{% if item.aliases is defined %}
+{% for alias in item.aliases %}
+{{ alias.name }}    IN    CNAME    {{ alias.dest }}
+{% endfor %}
+{% endif %}
+
+; Set from list of nodes
+{% set nodes = vars[item.nodelist] %}
+{% for node in nodes %}
+{{ node.name }}    IN    A    {{ item.ipv4_first_octets ~ "." ~ node.ipv4_last_octet }}
+{% if node.aliases is defined %}
+{% for alias in node.aliases %}
+{{ alias }}    IN    CNAME    {{ node.name }}
+{% endfor %}
+{% endif %}
+{% endfor %}
+
diff --git a/roles/dns-nsd/templates/zone.reverse.j2 b/roles/dns-nsd/templates/zone.reverse.j2
new file mode 100644
index 0000000..f327d4b
--- /dev/null
+++ b/roles/dns-nsd/templates/zone.reverse.j2
@@ -0,0 +1,21 @@
+;## NSD authoritative only DNS
+;## REVERSE Zone for {{ item.name }}
+
+$ORIGIN {{ item.name }}. ; default zone domain
+$TTL {{ item.ttl | default(dns_ttl) }} ; default time to live
+
+
+{{ (item.ipv4_first_octets ~ ".0") | ipaddr('revdns') | regex_replace('^0\.','') }} IN SOA {{ item.soa }}.{{ item.name }}. admin.{{ item.name }}. (
+         {{ item.serial | default(ansible_date_time.epoch) }}   ; Serial, must be incremented every time you change this file
+         3600        ; Refresh [1hr]
+         600         ; Retry [10m]
+         3600        ; Expire [1hr]
+         60          ; Min TTL [1m]
+         )
+
+{% set nodes = vars[item.nodelist] %}
+
+;PTR records
+{% for node in nodes %}
+{{ (item.ipv4_first_octets ~ "." ~ node.ipv4_last_octet) | ipaddr('revdns') }} IN PTR {{ node.name }}
+{% endfor %}
diff --git a/roles/dns-nsd/vars/Debian.yml b/roles/dns-nsd/vars/Debian.yml
new file mode 100644
index 0000000..eef2bd6
--- /dev/null
+++ b/roles/dns-nsd/vars/Debian.yml
@@ -0,0 +1,6 @@
+---
+
+nsd_conf: "/etc/nsd/nsd.conf"
+nsd_zonesdir: "/var/lib/nsd/zones"
+nsd_group: "nsd"
+
diff --git a/roles/dns-unbound/defaults/main.yml b/roles/dns-unbound/defaults/main.yml
new file mode 100644
index 0000000..d0553b1
--- /dev/null
+++ b/roles/dns-unbound/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+
+unbound_conf: "/var/unbound/etc/unbound.conf"
+unbound_group: "wheel"
+
diff --git a/roles/dns-unbound/handlers/main.yml b/roles/dns-unbound/handlers/main.yml
new file mode 100644
index 0000000..6ad5de0
--- /dev/null
+++ b/roles/dns-unbound/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+#file: roles/dns-unbound/handlers/main.yml
+
+- name: restart-unbound
+  service: name=unbound state=restarted
diff --git a/roles/dns-unbound/tasks/main.yml b/roles/dns-unbound/tasks/main.yml
new file mode 100644
index 0000000..2666538
--- /dev/null
+++ b/roles/dns-unbound/tasks/main.yml
@@ -0,0 +1,19 @@
+---
+#file: roles/dns-unbound/tasks/main.yml
+
+# OS specific vars
+- include_vars: "{{ ansible_os_family }}.yml"
+
+# Debian specific installation
+- include: unbound-Debian.yml
+  when: ansible_os_family == 'Debian'
+
+- name: create unbound.conf from template
+  template:
+    src=unbound.conf.j2
+    dest={{ unbound_conf }}
+    mode=0644 owner=root group={{ unbound_group }}
+    # validate='unbound-checkconf %s' - can't use, checks path, not just config.
+  notify:
+   - restart-unbound
+
diff --git a/roles/dns-unbound/tasks/unbound-Debian.yml b/roles/dns-unbound/tasks/unbound-Debian.yml
new file mode 100644
index 0000000..f878858
--- /dev/null
+++ b/roles/dns-unbound/tasks/unbound-Debian.yml
@@ -0,0 +1,11 @@
+---
+
+- name: Install unbound
+  apt:
+    name={{ item }}
+    state=present
+    update_cache=yes
+    cache_valid_time=3600
+  with_items:
+    - unbound
+
diff --git a/roles/dns-unbound/templates/unbound.conf.j2 b/roles/dns-unbound/templates/unbound.conf.j2
new file mode 100644
index 0000000..ff5ccbd
--- /dev/null
+++ b/roles/dns-unbound/templates/unbound.conf.j2
@@ -0,0 +1,54 @@
+# unbound.conf (configured by Ansible)
+
+server:
+{% if unbound_listen_on_default %}
+  interface: {{ ansible_default_ipv4.address }}  
+{% endif %}
+{% if unbound_interfaces is defined %}
+{% for cidr_ipv4 in unbound_interfaces %}
+  interface: {{ cidr_ipv4 | ipaddr('address') }}
+{% endfor %}
+{% endif %}
+  verbosity: 1
+  port: 53
+  do-ip4: yes
+  do-udp: yes
+  do-tcp: yes
+
+  # allow from localhost
+  access-control: 127.0.0.0/24 allow
+
+{% if unbound_listen_on_default %}
+  # allow from default interfaces
+  access-control: {{ ansible_default_ipv4.address }}/{{ (ansible_default_ipv4.address ~ "/" ~ ansible_default_ipv4.netmask) | ipaddr('prefix') }} allow
+{% endif %}
+
+{% if unbound_interfaces is defined %}
+  # allow from local networks
+{% for cidr_ipv4 in unbound_interfaces %}
+  access-control: {{ cidr_ipv4 }} allow
+{% endfor %}
+{% endif %}
+
+{% if nsd_zones is defined %}
+# allow unbound to query localhost, where nsd is listening
+do-not-query-localhost: no
+
+# allow reverse queries for RFC1918 addresses
+{% for zone in nsd_zones %}
+local-zone: "{{ zone.name_reverse_unbound }}." nodefault
+{% endfor %}
+
+# stub-zones zones that nsd is serving
+{% for zone in nsd_zones %}
+stub-zone:
+  name: "{{ zone.name }}"
+  stub-addr: {{ nsd_ip | default("127.0.0.1") }}
+
+stub-zone:
+  name: "{{ zone.name_reverse_unbound }}."
+  stub-addr: {{ nsd_ip | default("127.0.0.1") }}
+
+{% endfor %}
+{% endif %}
+
diff --git a/roles/dns-unbound/vars/Debian.yml b/roles/dns-unbound/vars/Debian.yml
new file mode 100644
index 0000000..1edb86c
--- /dev/null
+++ b/roles/dns-unbound/vars/Debian.yml
@@ -0,0 +1,5 @@
+---
+
+unbound_conf: "/etc/unbound/unbound.conf"
+unbound_group: "unbound"
+
diff --git a/roles/docker-compose/tasks/main.yml b/roles/docker-compose/tasks/main.yml
new file mode 100644
index 0000000..6385517
--- /dev/null
+++ b/roles/docker-compose/tasks/main.yml
@@ -0,0 +1,33 @@
+---
+# docker-compose/tasks/main.yml
+
+- name: Wait for onos_setup_playbook to complete
+  async_status: jid={{ onos_setup_playbook.ansible_job_id }}
+  register: onos_setup_playbook_result
+  until: onos_setup_playbook_result.finished
+  delay: 10
+  retries: 120
+
+- name: Copy SSL Certs to ONOS so docker-compose can find it
+  command: ansible onos-cord-1 -u ubuntu -m copy \
+    -a "src=/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt dest=~/cord/xos-certs.crt"
+
+- name: Build ONOS image with docker-compose
+  command: ansible onos-cord-1 -u ubuntu -m command \
+    -a "docker-compose build chdir=cord"
+
+- name: Start ONOS
+  command: ansible onos-cord-1 -u ubuntu -m command \
+    -a "docker-compose up -d chdir=cord"
+
+- name: Wait for xos_setup_playbook to complete
+  async_status: jid={{ xos_setup_playbook.ansible_job_id }}
+  register: xos_setup_playbook_result
+  until: xos_setup_playbook_result.finished
+  delay: 10
+  retries: 120
+
+- name: Copy admin-openrc.sh into XOS container
+  command: ansible xos-1 -u ubuntu -m copy \
+    -a "src=~/admin-openrc.sh dest=~/xos/xos/configurations/{{ xos_configuration }}"
+
diff --git a/roles/head-prep/files/ansible.cfg b/roles/head-prep/files/ansible.cfg
new file mode 100644
index 0000000..7fa4a86
--- /dev/null
+++ b/roles/head-prep/files/ansible.cfg
@@ -0,0 +1,4 @@
+[defaults]
+host_key_checking = false
+forks=20
+
diff --git a/roles/head-prep/tasks/main.yml b/roles/head-prep/tasks/main.yml
new file mode 100644
index 0000000..768390e
--- /dev/null
+++ b/roles/head-prep/tasks/main.yml
@@ -0,0 +1,65 @@
+---
+# file: roles/head-prep/tasks/main.yml
+
+- name:  Install prerequisites for using PPA repos
+  apt:
+    name={{ item }}
+    update_cache=yes
+    cache_valid_time=3600
+  with_items:
+    - python-pycurl
+    - software-properties-common
+
+- name: Add Ansible/Juju repositories
+  apt_repository:
+    repo={{ item }}
+  with_items:
+     - "ppa:juju/stable"
+     - "ppa:ansible/ansible"
+
+- name: Install packages
+  apt:
+    name={{ item }}
+    state=latest
+    update_cache=yes
+    cache_valid_time=3600
+  with_items:
+    - ansible
+    - uvtool
+    - git
+    - bzr
+    - juju-core
+    - python-novaclient
+    - python-neutronclient
+    - python-keystoneclient
+    - python-glanceclient
+    - virt-top
+
+- name: Prep user account by adding to libvirtd group and generating SSH key
+  user:
+    name={{ ansible_user_id }}
+    generate_ssh_key=yes
+    groups="libvirtd" append=yes
+
+- name: Register public key in variable
+  shell: cat {{ ansible_user_dir }}/.ssh/id_rsa.pub
+  register: sshkey
+
+- name: Add public key to this user account
+  authorized_key:
+    user={{ ansible_user_id }}
+    key="{{ sshkey.stdout }}"
+
+- name: Disable host key checking in ~/.ssh/config
+  lineinfile:
+    dest={{ ansible_user_dir }}/.ssh/config
+    line="StrictHostKeyChecking no"
+    create=yes
+    owner={{ ansible_user_id }} mode=0600
+
+- name: Disable host key checking in ~/.ansible.cfg
+  copy:
+    src=ansible.cfg
+    dest={{ ansible_user_dir }}/.ansible.cfg
+    owner={{ ansible_user_id }} mode=0644
+
diff --git a/roles/juju-openstack-config/defaults/main.yml b/roles/juju-openstack-config/defaults/main.yml
new file mode 100644
index 0000000..4a0158f
--- /dev/null
+++ b/roles/juju-openstack-config/defaults/main.yml
@@ -0,0 +1,20 @@
+---
+# roles/juju-setup/defaults/main.yml
+
+openstack_version: kilo
+
+openstack_cfg_path: /usr/local/src/openstack.cfg
+
+compute_relations:
+  - name: nova-compute
+    relations: [ "glance", "nova-cloud-controller", "neutron-openvswitch", "nagios", "nrpe", ]
+
+  - name: "nova-compute:shared-db"
+    relations: [ "mysql:shared-db", ]
+
+  - name: "nova-compute:amqp"
+    relations: [ "rabbitmq-server:amqp", ]
+
+  - name: ntp
+    relations: [ "nova-compute", ]
+
diff --git a/roles/juju-openstack-config/files/network-setup.sh b/roles/juju-openstack-config/files/network-setup.sh
new file mode 100755
index 0000000..05e4c12
--- /dev/null
+++ b/roles/juju-openstack-config/files/network-setup.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+source ~/admin-openrc.sh
+
+function create-flat-net {
+    NAME=$1
+    neutron net-show $NAME-net 2>&1 > /dev/null
+    if [ "$?" -ne 0 ]
+    then
+	neutron net-create --provider:physical_network=$NAME --provider:network_type=flat --shared $NAME-net
+    fi
+}
+
+function create-subnet {
+    NAME=$1
+    CIDR=$2
+    GW=$3
+
+    neutron subnet-show $NAME-net 2>&1 > /dev/null
+    if [ "$?" -ne 0 ]
+    then
+	neutron subnet-create $NAME-net --name $NAME-net $CIDR --gateway=$GW --disable-dhcp
+    fi
+}
+
+function create-subnet-no-gateway {
+    NAME=$1
+    CIDR=$2
+
+    neutron subnet-show $NAME-net 2>&1 > /dev/null
+    if [ "$?" -ne 0 ]
+    then
+	neutron subnet-create $NAME-net --name $NAME-net $CIDR --no-gateway --disable-dhcp
+    fi
+}
+
+create-flat-net nat
+create-subnet nat 172.16.0.0/16 172.16.0.1
+
+create-flat-net ext
diff --git a/roles/juju-openstack-config/tasks/main.yml b/roles/juju-openstack-config/tasks/main.yml
new file mode 100644
index 0000000..8075013
--- /dev/null
+++ b/roles/juju-openstack-config/tasks/main.yml
@@ -0,0 +1,51 @@
+---
+# roles/juju-openstack-config/main/tasks.yml
+
+- name: add compute nodes
+  command: juju add-machine ssh:ubuntu@{{ item }}
+  with_items: "{{ groups['compute'] | difference( juju_machines.keys() ) }}"
+  register: added_compute_nodes
+
+# run this again, so add-machine items will be in the juju_compute_nodes list
+- name: Obtain Juju Facts after adding compute nodes
+  when: added_compute_nodes
+  juju_facts:
+
+# the crazy [ ] in the with-items is so that jinja compares arrays of strings,
+# rather than strings of characters
+- name: add-unit nova-compute to first compute node
+  command: "juju deploy nova-compute --to {{ juju_machines[item]['machine_id'] }} --config={{ openstack_cfg_path }}"
+  with_items: "{{ [ groups['compute'][0] ] | difference( juju_compute_nodes.keys() ) }}"
+  register: added_first_nova_compute
+
+# run this again, so first nova compute will be in the juju_compute_nodes list
+- name: Obtain Juju Facts nova-compute deploy
+  juju_facts:
+  when: added_first_nova_compute
+
+- name: add-unit nova-compute to other compute nodes
+  command: "juju add-unit nova-compute --to {{ juju_machines[item]['machine_id'] }}"
+  with_items: "{{ groups['compute'] | difference( juju_compute_nodes.keys() )  }}"
+
+- name: Create relations to compute
+  command: "juju add-relation '{{ item.0.name }}' '{{ item.1 }}'"
+  register: compute_relation
+  failed_when: "compute_relation|failed and 'relation already exists' not in compute_relation.stderr"
+  with_subelements:
+    - "{{ compute_relations }}"
+    - relations
+
+# need to ansible-ify these
+- name: Copy credentials file to nova-cloud-controller
+  command: "scp {{ ansible_user_dir }}/admin-openrc.sh ubuntu@nova-cloud-controller:"
+
+- name: Copy network setup script
+  become: yes
+  copy:
+    src=network-setup.sh
+    dest=/usr/local/src/network-setup.sh
+    mode=0644 owner=root
+
+- name: Run network setup script
+  command: ansible nova-cloud-controller-1 -m script -u ubuntu -a "/usr/local/src/network-setup.sh"
+
diff --git a/roles/juju-setup/defaults/main.yml b/roles/juju-setup/defaults/main.yml
new file mode 100644
index 0000000..bbbb92e
--- /dev/null
+++ b/roles/juju-setup/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+
+juju_config_name: opencloud
+juju_config_path: /usr/local/src/juju_config.yml
+
+charm_versions: {}
diff --git a/roles/juju-setup/files/daemon b/roles/juju-setup/files/daemon
new file mode 100644
index 0000000..8d9102b
--- /dev/null
+++ b/roles/juju-setup/files/daemon
@@ -0,0 +1,40 @@
+#!/bin/sh
+
+SHELL="/bin/bash"
+
+NIC=$( route|grep default|awk '{print $NF}' )
+
+NAME="${1}"
+OP="${2}"
+SUBOP="${3}"
+ARGS="${4}"
+
+add_port_fwd_rule() {
+    DPORT=$1
+    VM=$2
+    TOPORT=$3
+
+    VMIP=$( getent ahosts $VM|head -1|awk '{print $1}' )
+    iptables -t nat -C PREROUTING -p tcp -i $NIC --dport $DPORT -j DNAT --to-destination $VMIP:$TOPORT
+    if [ "$?" -ne 0 ]
+    then
+        iptables -t nat -A PREROUTING -p tcp -i $NIC --dport $DPORT -j DNAT --to-destination $VMIP:$TOPORT
+    fi
+}
+
+if [ "$OP" = "start" ] || [ "$OP" = "reload" ]
+then
+    iptables -t nat -F
+    add_port_fwd_rule 35357 keystone 35357
+    add_port_fwd_rule 4990 keystone 4990
+    add_port_fwd_rule 5000 keystone 5000
+    add_port_fwd_rule 8774 nova-cloud-controller 8774
+    add_port_fwd_rule 9696 neutron-api 9696
+    add_port_fwd_rule 9292 glance 9292
+    add_port_fwd_rule 8080 openstack-dashboard 80
+    add_port_fwd_rule 3128 nagios 80
+    add_port_fwd_rule 8777 ceilometer 8777
+
+    # Also flush the filter table before rules re-added
+    iptables -F
+fi
diff --git a/roles/juju-setup/tasks/main.yml b/roles/juju-setup/tasks/main.yml
new file mode 100644
index 0000000..db8f25d
--- /dev/null
+++ b/roles/juju-setup/tasks/main.yml
@@ -0,0 +1,108 @@
+---
+# roles/juju-setup/main/tasks.yml
+
+- name: Initialize Juju
+  command: juju generate-config
+    creates={{ ansible_user_dir }}/.juju/environments.yaml
+
+- name: Create Juju config file from template
+  template:
+    src=environments.yaml.j2
+    dest={{ ansible_user_dir }}/.juju/environments.yaml
+
+- name: Bootstrap Juju
+  command: juju bootstrap
+    creates={{ ansible_user_dir }}/.juju/environments/manual.jenv
+
+- name: Copy over juju-config.yml for configuring Juju services
+  become: yes
+  template:
+    src={{ juju_config_name }}_juju_config.yml.j2
+    dest={{ juju_config_path }}
+
+# Code for this is in library/juju_facts.py
+- name: Obtain Juju Facts for creating machines
+  juju_facts:
+
+- name: Pause to let Juju settle before adding machines
+  pause:
+    prompt="Waiting for Juju..."
+    seconds=20
+
+# For setwise operations on desired vs Juju state:
+# list of active juju_machines names: juju_machines.keys()
+# list of active juju_services names: juju_services.keys()
+
+- name: Add machines to Juju
+  command: "juju add-machine ssh:{{ item }}"
+  with_items: "{{ head_vm_list | map(attribute='service') | list | reject('undefined') | map('format_string', '%s.'~site_suffix ) | difference( juju_machines.keys() ) }}"
+
+# run this again, so machines will be in the juju_machines list
+- name: Obtain Juju Facts after machine creation
+  juju_facts:
+
+- name: Deploy services that are hosted in their own VM
+  command: "juju deploy {{ charm_versions[item] | default(item) }} --to {{ juju_machines[item~'.'~site_suffix]['machine_id'] }} --config={{ juju_config_path }}"
+  with_items: "{{ vm_service_list | difference( juju_services.keys() ) }}"
+
+- name: Deploy mongodb to ceilometer VM
+  command: "juju deploy {{ charm_versions['mongodb'] | default('mongodb') }} --to {{ juju_machines['ceilometer.'~site_suffix]['machine_id'] }} --config={{ juju_config_path }}"
+  when: juju_services['mongodb'] is undefined
+
+- name: Deploy services that don't have their own VM
+  command: "juju deploy {{ charm_versions[item] | default(item) }} --config={{ juju_config_path }}"
+  with_items: "{{ standalone_service_list | difference( juju_services.keys() ) }}"
+
+- name: Create relations between services
+  command: "juju add-relation '{{ item.0.name }}' '{{ item.1 }}'"
+  register: juju_relation
+  failed_when: "juju_relation|failed and 'relation already exists' not in juju_relation.stderr"
+  with_subelements:
+    - "{{ service_relations }}"
+    - relations
+
+# run another time, so services will be in juju_services list
+- name: Obtain Juju Facts after service creation
+  juju_facts:
+
+# 900s = 15m. Usually takes 10-12m on cloudlab for relations to come up
+# Only checks for first port in list
+- name: Wait for juju services on VM's have open ports
+  wait_for:
+    host={{ item.name }}
+    port={{ item.forwarded_ports[0].int }}
+    timeout=900
+  with_items: "{{ head_vm_list | selectattr('forwarded_ports', 'defined') | list }}"
+
+# secondary wait, as waiting on ports isn't enough. Probably only need one of these...
+# 80*15s = 1200s = 20m max wait
+- name: Wait for juju services to start
+  action: command juju status --format=summary
+  register: juju_summary
+  until: juju_summary.stdout.find("pending:") == -1
+  retries: 80
+  delay: 15
+
+- name: Create admin-openrc.sh credentials file
+  template:
+   src=admin-openrc.sh.j2
+   dest={{ ansible_user_dir }}/admin-openrc.sh
+
+
+- name: Copy nova-cloud-controller CA certificate to head
+  command: juju scp {{ juju_services['nova-cloud-controller']['units'].keys()[0] }}:/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt {{ ansible_user_dir }}
+
+- name: Copy cert to system location
+  become: yes
+  command: cp {{ ansible_user_dir }}/keystone_juju_ca_cert.crt /usr/local/share/ca-certificates
+
+- name: update-ca-certificates
+  become: yes
+  command: update-ca-certificates
+
+- name: Move cert to all service VM's
+  command: ansible services -b -u ubuntu -m copy -a "src={{ ansible_user_dir }}/keystone_juju_ca_cert.crt dest=/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt owner=root group=root mode=0644"
+
+- name: update-ca-certificates in service VM's
+  command: ansible services -b -u ubuntu -m command -a "update-ca-certificates"
+
diff --git a/roles/juju-setup/templates/admin-openrc.sh.j2 b/roles/juju-setup/templates/admin-openrc.sh.j2
new file mode 100644
index 0000000..86eb230
--- /dev/null
+++ b/roles/juju-setup/templates/admin-openrc.sh.j2
@@ -0,0 +1,6 @@
+export OS_USERNAME=admin
+export OS_PASSWORD={{ keystone_admin_password }}
+export OS_TENANT_NAME=admin
+export OS_AUTH_URL=https://keystone.{{ site_suffix }}:5000/v2.0
+export OS_REGION_NAME=RegionOne
+export REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt
diff --git a/roles/juju-setup/templates/cord_juju_config.yml.j2 b/roles/juju-setup/templates/cord_juju_config.yml.j2
new file mode 100644
index 0000000..a549769
--- /dev/null
+++ b/roles/juju-setup/templates/cord_juju_config.yml.j2
@@ -0,0 +1,59 @@
+---
+# juju configuration file for CORD deployments
+
+ceilometer:
+  openstack-origin: "cloud:trusty-kilo"
+
+ceilometer-agent: {}
+
+glance:
+  openstack-origin: "cloud:trusty-kilo"
+  ha-mcastport: 5402
+
+keystone:
+  admin-password: "{{ keystone_admin_password }}"
+  os-public-hostname: "keystone.{{ site_suffix }}"
+  ha-mcastport: 5403
+  https-service-endpoints: "True"
+  openstack-origin: "cloud:trusty-kilo"
+  use-https: "yes"
+
+mongodb: {}
+
+nagios: {}
+
+neutron-api:
+  neutron-plugin: onosvtn
+  onos-vtn-ip: onos-cord
+  neutron-security-groups: "True"
+  openstack-origin: "cloud:trusty-kilo"
+  overlay-network-type: vxlan
+
+neutron-openvswitch: {}
+
+nova-cloud-controller:
+  config-flags: "force_config_drive=always"
+  console-access-protocol: novnc
+  network-manager: Neutron
+  openstack-origin: "cloud:trusty-kilo"
+
+nova-compute:
+  virt-type: kvm
+  config-flags: "firewall_driver=nova.virt.firewall.NoopFirewallDriver"
+  disable-neutron-security-groups: "True"
+  openstack-origin: "cloud:trusty-kilo"
+
+nrpe: {}
+
+ntp: {}
+
+openstack-dashboard:
+  openstack-origin: "cloud:trusty-kilo"
+
+percona-cluster:
+   max-connections: 20000
+
+rabbitmq-server:
+  ssl: "on"
+
+
diff --git a/roles/juju-setup/templates/environments.yaml.j2 b/roles/juju-setup/templates/environments.yaml.j2
new file mode 100644
index 0000000..519adaf
--- /dev/null
+++ b/roles/juju-setup/templates/environments.yaml.j2
@@ -0,0 +1,10 @@
+default: manual
+environments:
+    manual:
+        type: manual
+        bootstrap-host: juju.{{ site_suffix }}
+        bootstrap-user: ubuntu
+        default-series: {{ ansible_distribution_release }}
+        enable-os-refresh-update: false
+        enable-os-upgrade: false
+
diff --git a/roles/juju-setup/templates/opencloud_juju_config.yml.j2 b/roles/juju-setup/templates/opencloud_juju_config.yml.j2
new file mode 100644
index 0000000..7911828
--- /dev/null
+++ b/roles/juju-setup/templates/opencloud_juju_config.yml.j2
@@ -0,0 +1,63 @@
+---
+# juju configuration file for OpenCloud deployments
+
+ceilometer: {}
+
+ceilometer-agent: {}
+
+glance:
+  openstack-origin: "cloud:trusty-kilo"
+
+keystone:
+  admin-password: "{{ keystone_admin_password }}"
+  os-public-hostname: "keystone.{{ site_suffix }}"
+  use-https: "yes"
+  https-service-endpoints: "True"
+  openstack-origin: "cloud:trusty-kilo"
+
+mongodb: {}
+
+nagios: {}
+
+neutron-api:
+  flat-network-providers: "*"
+  openstack-origin: "cloud:trusty-kilo"
+  vlan-ranges: "physnet1:1000:2000 nat"
+
+neutron-gateway:
+  bridge-mappings: "physnet1:br-data nat:br-nat"
+  flat-network-providers: "*"
+  instance-mtu: "1400"
+  openstack-origin: "cloud:trusty-kilo"
+  vlan-ranges: "physnet1:1000:2000 nat"
+
+neutron-openvswitch:
+  bridge-mappings: "physnet1:br-data nat:br-nat"
+  disable-security-groups: "True"
+  flat-network-providers: "*"
+  vlan-ranges: "physnet1:1000:2000 nat"
+
+nova-cloud-controller:
+  console-access-protocol: "novnc"
+  network-manager: "Neutron"
+  openstack-origin: "cloud:trusty-kilo"
+
+nova-compute:
+  virt-type: kvm
+  config-flags: "firewall_driver=nova.virt.firewall.NoopFirewallDriver"
+  disable-neutron-security-groups: "True"
+  openstack-origin: "cloud:trusty-kilo"
+
+nrpe: {}
+
+ntp:
+  source: "0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org"
+
+openstack-dashboard:
+  openstack-origin: "cloud:trusty-kilo"
+
+percona-cluster: {}
+
+rabbitmq-server:
+  ssl: "on"
+
diff --git a/roles/onos-vm-install/defaults/main.yml b/roles/onos-vm-install/defaults/main.yml
new file mode 100644
index 0000000..2a5be99
--- /dev/null
+++ b/roles/onos-vm-install/defaults/main.yml
@@ -0,0 +1,2 @@
+--- 
+
diff --git a/roles/onos-vm-install/files/Dockerfile.xos-onos b/roles/onos-vm-install/files/Dockerfile.xos-onos
new file mode 100644
index 0000000..267d820
--- /dev/null
+++ b/roles/onos-vm-install/files/Dockerfile.xos-onos
@@ -0,0 +1,17 @@
+# ONOS dockerfile with XOS additions
+
+FROM onosproject/onos
+MAINTAINER Zack Williams <zdw@cs.arizona.edu>
+
+# Include SSL certs
+COPY xos-certs.crt /usr/local/share/ca-certificates/xos-certs.crt
+RUN update-ca-certificates
+
+# Create Java KeyStore from certs
+RUN openssl x509 -in /usr/local/share/ca-certificates/xos-certs.crt -outform der -out /usr/local/share/ca-certificates/xos-certs.der
+RUN keytool -import -noprompt -storepass 222222 -alias xos-certs -file /usr/local/share/ca-certificates/xos-certs.der -keystore /usr/local/share/ca-certificates/xos-certs.jks
+
+# Updated onos-service to use the jks
+COPY onos-service /root/onos/bin/onos-service
+RUN chmod 755 /root/onos/bin/onos-service
+
diff --git a/roles/onos-vm-install/files/onos-docker-compose.yml b/roles/onos-vm-install/files/onos-docker-compose.yml
new file mode 100644
index 0000000..09255fa
--- /dev/null
+++ b/roles/onos-vm-install/files/onos-docker-compose.yml
@@ -0,0 +1,19 @@
+# ONOS with XOS features for docker-compose
+version: '2'
+
+services:
+
+   xos-onos:
+      build:
+       context: .
+       dockerfile: Dockerfile.xos-onos
+      image: xos/onos
+      expose:
+      - "6653"
+      - "8101"
+      - "8181"
+      - "9876"
+      network_mode: host
+      volumes:
+      - ./id_rsa:/root/node_key:ro
+
diff --git a/roles/onos-vm-install/files/onos-service b/roles/onos-vm-install/files/onos-service
new file mode 100644
index 0000000..7d810c4
--- /dev/null
+++ b/roles/onos-vm-install/files/onos-service
@@ -0,0 +1,53 @@
+#!/bin/bash
+# -----------------------------------------------------------------------------
+# Starts ONOS Apache Karaf container
+# -----------------------------------------------------------------------------
+
+# uncomment the following line for performance testing
+#export JAVA_OPTS="${JAVA_OPTS:--Xms8G -Xmx8G -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode -XX:+PrintGCDetails -XX:+PrintGCTimeStamps}"
+
+# uncomment the following line for Netty TLS encryption
+# Do modify the keystore location/password and truststore location/password accordingly
+#export JAVA_OPTS="${JAVA_OPTS:--DenableNettyTLS=true -Djavax.net.ssl.keyStore=/home/ubuntu/onos.jks -Djavax.net.ssl.keyStorePassword=222222 -Djavax.net.ssl.trustStore=/home/ubuntu/onos.jks -Djavax.net.ssl.trustStorePassword=222222}"
+
+export JAVA_OPTS="-Djavax.net.ssl.trustStore=/usr/local/share/ca-certificates/xos-certs.jks -Djavax.net.ssl.trustStorePassword=222222" 
+
+set -e  # exit on error
+set -u  # exit on undefined variable
+
+# If ONOS_HOME is set, respect its value.
+# If ONOS_HOME is not set (e.g. in the init or service environment),
+# set it based on this script's path.
+ONOS_HOME=${ONOS_HOME:-$(cd $(dirname $0)/.. >/dev/null 2>&1 && pwd)}
+KARAF_ARGS=
+SYS_APPS=drivers
+ONOS_APPS=${ONOS_APPS:-}  # Empty means don't activate any new apps
+
+cd $ONOS_HOME
+
+# Parse out arguments destinted for karaf invocation v. arguments that
+# will be processed in line
+while [ $# -gt 0 ]; do
+  case $1 in
+    apps-clean)
+      # Deactivate all applications
+      find ${ONOS_HOME}/apps -name "active" -exec rm \{\} \;
+      ;;
+    *)
+      KARAF_ARGS+=" $1"
+      ;;
+  esac
+  shift
+done
+
+# Activate the system required applications (SYS_APPS) as well as any
+# specified applications in the var ONOS_APPS
+for app in ${SYS_APPS//,/ } ${ONOS_APPS//,/ }; do
+  if [[ "$app" =~ \. ]]; then
+    touch ${ONOS_HOME}/apps/$app/active
+  else
+    touch ${ONOS_HOME}/apps/org.onosproject.$app/active
+  fi
+done
+
+exec ${ONOS_HOME}/apache-karaf-3.0.5/bin/karaf $KARAF_ARGS
diff --git a/roles/onos-vm-install/files/onos-setup-playbook.yml b/roles/onos-vm-install/files/onos-setup-playbook.yml
new file mode 100644
index 0000000..2d4170d
--- /dev/null
+++ b/roles/onos-vm-install/files/onos-setup-playbook.yml
@@ -0,0 +1,39 @@
+---
+- hosts: onos-cord-1
+  remote_user: ubuntu
+
+  tasks:
+    - name: Include configuration vars
+      include_vars: onos-setup-vars.yml
+
+    - name: Create CORD directory
+      file:
+        path={{ ansible_user_dir }}/cord
+        state=directory
+
+    - name: Copy over SSH keys
+      copy:
+        src=~/.ssh/{{ item }}
+        dest={{ ansible_user_dir }}/cord/{{ item }}
+        owner={{ ansible_user_id }} mode=0600
+      with_items:
+       - id_rsa
+       - id_rsa.pub
+
+    - name: Copy over files to build XOS variant of ONOS
+      copy:
+        src="~/{{ item }}"
+        dest="{{ ansible_user_dir }}/cord/{{ item }}"
+      with_items:
+       - Dockerfile.xos-onos
+       - onos-service
+
+    - name: Copy over & rename docker-compose file
+      copy:
+        src=~/onos-docker-compose.yml
+        dest={{ ansible_user_dir }}/cord/docker-compose.yml
+
+    - name: Pull docker image for ONOS
+      become: yes
+      command: docker pull onosproject/onos
+
diff --git a/roles/onos-vm-install/tasks/main.yml b/roles/onos-vm-install/tasks/main.yml
new file mode 100644
index 0000000..0dbb54f
--- /dev/null
+++ b/roles/onos-vm-install/tasks/main.yml
@@ -0,0 +1,26 @@
+---
+# onos-vm-install/tasks/main.yml
+#
+# Install ONOS on a sub vm by calling ansible
+
+- name: Create a vars file from template
+  template:
+    src=onos-setup-vars.yml.j2
+    dest={{ ansible_user_dir }}/onos-setup-vars.yml
+
+- name: Copy over ONOS playbook and docker compose files
+  copy:
+    src={{ item }}
+    dest={{ ansible_user_dir }}/{{ item }}
+  with_items:
+    - onos-setup-playbook.yml
+    - onos-docker-compose.yml
+    - Dockerfile.xos-onos
+    - onos-service
+
+- name: Run the ONOS ansible playbook
+  command: ansible-playbook {{ ansible_user_dir }}/onos-setup-playbook.yml
+  async: 1200
+  poll: 0
+  register: onos_setup_playbook
+
diff --git a/roles/onos-vm-install/templates/onos-setup-vars.yml.j2 b/roles/onos-vm-install/templates/onos-setup-vars.yml.j2
new file mode 100644
index 0000000..cd21505
--- /dev/null
+++ b/roles/onos-vm-install/templates/onos-setup-vars.yml.j2
@@ -0,0 +1,2 @@
+---
+
diff --git a/roles/simulate-fabric/defaults/main.yml b/roles/simulate-fabric/defaults/main.yml
new file mode 100644
index 0000000..b41396d
--- /dev/null
+++ b/roles/simulate-fabric/defaults/main.yml
@@ -0,0 +1,38 @@
+---
+# simulate-fabric/defaults/main.yml
+
+simfabric_bridges:
+  - name: databr
+    addresses:
+      - "10.168.0.1/24"
+      - "10.168.1.1/24"
+    interfaces:
+      - veth0
+
+simfabric_links:
+  - name: veth
+    mac: "02:42:0a:a8:00:01"
+
+simfabric_interfaces:
+  - veth0
+  - veth1
+
+# see note in playbook, won't apply until ansible gets module support
+simfabric_iptables:
+  - table: nat
+    chain: POSTROUTING
+    source: "10.168.0.0/16"
+    dest: "! 10.168.0.0/16"
+    jump: MASQUERADE
+
+simfabric_sysctl:
+  - name: net.ipv4.ip_forward
+    value: 1
+  - name: net.ipv4.conf.all.send_redirects
+    value: 0
+  - name: net.ipv4.conf.default.send_redirects
+    value: 0
+  - name: net.ipv4.conf.eth0.send_redirects
+    value: 0
+  - name: net.ipv4.conf.databr.send_redirects
+    value: 0
diff --git a/roles/simulate-fabric/files/simulate-fabric-playbook.yml b/roles/simulate-fabric/files/simulate-fabric-playbook.yml
new file mode 100644
index 0000000..6f24f42
--- /dev/null
+++ b/roles/simulate-fabric/files/simulate-fabric-playbook.yml
@@ -0,0 +1,83 @@
+---
+- hosts: nova-compute-1
+  remote_user: ubuntu
+  become: yes
+
+  tasks:
+    - name: Include configuration vars
+      include_vars: simulate-fabric-vars.yml
+
+    - name: Install prerequisites
+      apt:
+        name={{ item }}
+        update_cache=yes
+        cache_valid_time=3600
+      become: yes
+      with_items:
+       - bridge-utils
+
+    - name: Create bridges
+      when: "ansible_{{ item.name }} is not defined"
+      command: brctl addbr "{{ item.name }}"
+      with_items: "{{ simfabric_bridges }}"
+
+    # note, not idempotent if failed between prior step and this step
+    - name: Set IP addresses to bridges
+      when: "ansible_{{ item.0.name }} is not defined"
+      command: "ip addr add {{ item.1 }} dev {{ item.0.name }}"
+      with_subelements:
+       - "{{ simfabric_bridges }}"
+       - addresses
+
+    - name: Run setup again to obtain bridge info
+      setup:
+
+    - name: Start bridges
+      when: "not ansible_{{ item.name }}.active"
+      command: "ip link set dev {{ item.name }} up"
+      with_items: "{{ simfabric_bridges }}"
+
+    - name: Create ip links
+      when: "ansible_{{ item.name }}0 is not defined"
+      command: "ip link add address {{ item.mac }} type {{ item.name }}"
+      with_items: "{{ simfabric_links }}"
+
+    - name: Run setup again to obtain link info
+      setup:
+
+    - name: Start interfaces
+      when: "not ansible_{{ item }}.active"
+      command: "ip link set dev {{ item }} up"
+      with_items: "{{ simfabric_interfaces }}"
+
+    - name: Add interfaces to bridges
+      when: "not item.1 in ansible_{{ item.0.name }}.interfaces"
+      command: "brctl addif {{ item.0.name }} {{ item.1 }}"
+      with_subelements:
+       - "{{ simfabric_bridges }}"
+       - interfaces
+
+    - name: Check for iptables rule
+      command: "iptables -t nat -C POSTROUTING -s 10.168.0.0/16 ! -d 10.168.0.0/16 -j MASQUERADE"
+      register: iptables_check
+      failed_when: "iptables_check|failed and 'No chain/target/match by that name' not in iptables_check.stderr"
+
+    - name: Create iptables rule
+      when: "iptables_check.rc != 0"
+      command: "iptables -t nat -A POSTROUTING -s 10.168.0.0/16 ! -d 10.168.0.0/16 -j MASQUERADE"
+
+# the below will likely work when this pull makes it into ansible:
+# https://github.com/ansible/ansible-modules-extras/pull/1685
+#   - name: Configure iptables
+#     iptables: "table={{ item.table }} chain={{ item.chain }} source={{ item.source }} destination={{ item.dest }} jump={{ item.jump }}"
+#     with_items: "{{ simfabric_iptables }}"
+
+    - name: Set kernel sysctl values
+      sysctl:
+        name="{{ item.name }}"
+        value="{{ item.value }}"
+        sysctl_set=yes
+        state=present
+        reload=yes
+      with_items: "{{ simfabric_sysctl }}"
+
diff --git a/roles/simulate-fabric/tasks/main.yml b/roles/simulate-fabric/tasks/main.yml
new file mode 100644
index 0000000..c9e834b
--- /dev/null
+++ b/roles/simulate-fabric/tasks/main.yml
@@ -0,0 +1,16 @@
+---
+# simulate-fabric/tasks/main.yml
+
+- name: Create a vars file from template
+  template:
+    src=simulate-fabric-vars.yml.j2
+    dest={{ ansible_user_dir }}/simulate-fabric-vars.yml
+
+- name: Copy over simulate-fabric ansible playbook
+  copy:
+    src=simulate-fabric-playbook.yml
+    dest={{ ansible_user_dir }}/simulate-fabric-playbook.yml
+
+- name: Setup simulated fabric on nova-compute-1 using playbook
+  command: ansible-playbook {{ ansible_user_dir }}/simulate-fabric-playbook.yml
+
diff --git a/roles/simulate-fabric/templates/simulate-fabric-vars.yml.j2 b/roles/simulate-fabric/templates/simulate-fabric-vars.yml.j2
new file mode 100644
index 0000000..599443f
--- /dev/null
+++ b/roles/simulate-fabric/templates/simulate-fabric-vars.yml.j2
@@ -0,0 +1,17 @@
+---
+
+simfabric_bridges:
+{{ simfabric_bridges | to_nice_yaml }}
+
+simfabric_links:
+{{ simfabric_links | to_nice_yaml }}
+
+simfabric_interfaces:
+{{ simfabric_interfaces | to_nice_yaml }}
+
+simfabric_iptables:
+{{ simfabric_iptables | to_nice_yaml }}
+
+simfabric_sysctl:
+{{ simfabric_sysctl | to_nice_yaml }}
+
diff --git a/roles/test-prep/tasks/main.yml b/roles/test-prep/tasks/main.yml
new file mode 100644
index 0000000..1ebf604
--- /dev/null
+++ b/roles/test-prep/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+# roles/test-prep/tasks/main.yml
+
+- name: Add local resolver to /etc/resolv.conf
+  lineinfile:
+    dest=/etc/resolv.conf
+    insertafter=".*DO NOT EDIT THIS FILE.*" 
+    line="nameserver 192.168.122.1"
+
diff --git a/roles/xos-install/defaults/main.yml b/roles/xos-install/defaults/main.yml
new file mode 100644
index 0000000..ba26bf6
--- /dev/null
+++ b/roles/xos-install/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# default variables for xos-install role
+
+xos_repo_url: "https://github.com/open-cloud/xos.git"
+xos_repo_dest: "~/xos"
+xos_repo_branch: "HEAD"
diff --git a/roles/xos-install/tasks/main.yml b/roles/xos-install/tasks/main.yml
new file mode 100644
index 0000000..d982c26
--- /dev/null
+++ b/roles/xos-install/tasks/main.yml
@@ -0,0 +1,7 @@
+---
+# tasks for xos-install role
+
+ - name: checkout XOS repo
+   git: repo={{ xos_repo_url }}
+        dest={{ xos_repo_dest }}
+        version={{ xos_repo_branch }}
diff --git a/roles/xos-vm-install/defaults/main.yml b/roles/xos-vm-install/defaults/main.yml
new file mode 100644
index 0000000..a2d12d1
--- /dev/null
+++ b/roles/xos-vm-install/defaults/main.yml
@@ -0,0 +1,7 @@
+--- 
+
+xos_repo_url: "https://github.com/open-cloud/xos.git"
+xos_repo_dest: "~/xos"
+xos_repo_branch: "HEAD"
+
+xos_configuration: "devel"
diff --git a/roles/xos-vm-install/files/xos-setup-cord-pod-playbook.yml b/roles/xos-vm-install/files/xos-setup-cord-pod-playbook.yml
new file mode 100644
index 0000000..5f3bcd8
--- /dev/null
+++ b/roles/xos-vm-install/files/xos-setup-cord-pod-playbook.yml
@@ -0,0 +1,58 @@
+---
+- hosts: xos-1
+  remote_user: ubuntu
+
+  tasks:
+    - name: Include configuration vars
+      include_vars: xos-setup-vars.yml
+
+    - name: Install prerequisites
+      apt:
+        name={{ item }}
+        update_cache=yes
+        cache_valid_time=3600
+      become: yes
+      with_items:
+       - git
+       - make
+       - curl
+       - python-novaclient
+       - python-neutronclient
+       - python-keystoneclient
+       - python-glanceclient
+
+    - name: Clone XOS repo
+      git:
+        repo={{ xos_repo_url }}
+        dest={{ xos_repo_dest }}
+        version={{ xos_repo_branch }}
+        force=yes
+
+    - name: Copy over SSH keys
+      copy:
+        src=~/.ssh/{{ item }}
+        dest={{ xos_repo_dest }}/xos/configurations/{{ xos_configuration }}/
+        owner={{ ansible_user_id }} mode=0600
+      with_items:
+       - id_rsa
+       - id_rsa.pub
+
+    - name: copy over SSH key as node_key
+      copy:
+        src=~/.ssh/id_rsa
+        dest={{ xos_repo_dest }}/xos/configurations/{{ xos_configuration }}/node_key
+
+    - name: download software image
+      get_url:
+        url={{ item.url }}
+        checksum={{ item.checksum }}
+        dest={{ xos_repo_dest }}/xos/configurations/{{ xos_configuration }}/images/{{ item.name }}.img
+      with_items: "{{ xos_images }}"
+
+    - name: Pull docker images for XOS
+      become: yes
+      command: docker pull {{ item }}
+      with_items:
+        - xosproject/xos-base
+        - xosproject/xos-postgres
+
diff --git a/roles/xos-vm-install/tasks/main.yml b/roles/xos-vm-install/tasks/main.yml
new file mode 100644
index 0000000..1aa66a9
--- /dev/null
+++ b/roles/xos-vm-install/tasks/main.yml
@@ -0,0 +1,21 @@
+---
+# xos-vm-install/tasks/main.yml
+#
+# Install XOS on a sub vm by calling ansible
+
+- name: Create a vars file from template
+  template:
+    src=xos-setup-vars.yml.j2
+    dest={{ ansible_user_dir }}/xos-setup-vars.yml
+
+- name: Copy over XOS ansible playbook
+  copy:
+    src=xos-setup-{{ xos_configuration }}-playbook.yml
+    dest={{ ansible_user_dir }}/xos-setup-playbook.yml
+
+- name: Run the XOS ansible playbook
+  command: ansible-playbook {{ ansible_user_dir }}/xos-setup-playbook.yml
+  async: 2400
+  poll: 0
+  register: xos_setup_playbook
+
diff --git a/roles/xos-vm-install/templates/xos-setup-vars.yml.j2 b/roles/xos-vm-install/templates/xos-setup-vars.yml.j2
new file mode 100644
index 0000000..2d5ab15
--- /dev/null
+++ b/roles/xos-vm-install/templates/xos-setup-vars.yml.j2
@@ -0,0 +1,10 @@
+---
+
+xos_repo_url: "{{ xos_repo_url }}"
+xos_repo_dest: "{{ xos_repo_dest }}"
+xos_repo_branch: "{{ xos_repo_branch }}"
+
+xos_configuration: "{{ xos_configuration }}"
+
+xos_images:
+{{ xos_images | to_nice_yaml }}
diff --git a/scripts/single-node-pod.sh b/scripts/single-node-pod.sh
index c3f06ab..72050d9 100755
--- a/scripts/single-node-pod.sh
+++ b/scripts/single-node-pod.sh
@@ -1,30 +1,45 @@
-#!/bin/bash
+#!/usr/bin/env bash
 
 function cleanup_from_previous_test() {
+    echo "## Cleanup ##"
+
+    echo "Destroying juju environment"
+    juju destroy-environment --force -y manual
+
     VMS=$( sudo uvt-kvm list )
     for VM in $VMS
     do
+      echo "Destroying $VM"
       sudo uvt-kvm destroy $VM
     done
 
+    echo "Cleaning up files"
     rm -rf ~/.juju
     rm -f ~/.ssh/known_hosts
     rm -rf ~/openstack-cluster-setup
 
-    # Attempt to flush out old leases from dnsmasq, for repeated runs
-    sudo cp /var/lib/libvirt/dnsmasq/default.leases /var/lib/libvirt/dnsmasq/default.leases.bak
-    sudo truncate -s 0 /var/lib/libvirt/dnsmasq/default.leases
+    echo "Cleaning up libvirt/dnsmasq"
+    sudo rm -f /var/lib/libvirt/dnsmasq/xos-mgmtbr.leases
     sudo killall dnsmasq
-    sudo /usr/sbin/dnsmasq --conf-file=/var/lib/libvirt/dnsmasq/default.conf
+    sudo service libvirt-bin restart
 }
 
 function bootstrap() {
     cd ~
     sudo apt-get update
-    sudo apt-get -y install git
+    sudo apt-get -y install software-properties-common git mosh tmux dnsutils python-netaddr
+    sudo add-apt-repository -y ppa:ansible/ansible
+    sudo apt-get update
+    sudo apt-get install -y ansible
+
+    [ -e ~/.ssh/id_rsa ] || ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
+    cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
+
     git clone https://github.com/open-cloud/openstack-cluster-setup.git
     cd ~/openstack-cluster-setup
-    ./bootstrap.sh
+
+    sed -i "s/ubuntu/`whoami`/" $INVENTORY
+    cp vars/example_keystone.yml vars/cord_keystone.yml
 
     # Log into the local node once to get host key
     ssh -o StrictHostKeyChecking=no localhost "ls > /dev/null"
@@ -32,40 +47,7 @@
 
 function setup_openstack() {
     # Run the playbook
-    ansible-playbook -i cord-test-hosts cord-setup.yml
-}
-
-function pull_onos_docker_image() {
-    echo ""
-    echo "Pull down the ONOS Docker image"
-    ssh ubuntu@onos-cord "cd cord; sudo docker-compose up -d"
-}
-
-function wait_for_openstack() {
-    # Need to wait for OpenStack services to come up before running any XOS "make" commands
-    echo "Waiting for the OpenStack services to fully come up."
-    echo "This can take 30 minutes or more, be patient!"
-    i=0
-    until juju status --format=summary|grep "started:  23" > /dev/null
-    do
-      sleep 60
-      (( i += 1 ))
-      echo "Waited $i minutes"
-    done
-
-    echo "All OpenStack services are up."
-}
-
-function simulate_fabric() {
-    echo ""
-    echo "Setting up simulated fabric on nova-compute node"
-    if [[ $EXAMPLESERVICE -eq 1 ]]
-    then
-      SCRIPT=compute-ext-net-tutorial.sh
-    else
-      SCRIPT=compute-ext-net.sh
-    fi
-    ssh ubuntu@nova-compute "wget https://raw.githubusercontent.com/open-cloud/openstack-cluster-setup/master/scripts/$SCRIPT; sudo bash $SCRIPT"
+    ansible-playbook -i $INVENTORY cord-single-playbook.yml
 }
 
 function build_xos_docker_images() {
@@ -73,20 +55,20 @@
     echo "Checking out XOS branch $BUILD_BRANCH"
     ssh ubuntu@xos "cd xos; git config --global user.email 'ubuntu@localhost'; git config --global user.name 'XOS ExampleService'"
     ssh ubuntu@xos "cd xos; git checkout $BUILD_BRANCH"
+
     if [[ $EXAMPLESERVICE -eq 1 ]]
     then
       echo ""
       echo "Adding exampleservice to XOS"
       ssh ubuntu@xos "cd xos; git cherry-pick 775e00549e535803522fbcd70152e5e1b0629c83"
     fi
-    echo ""
+
     echo "Rebuilding XOS containers"
     ssh ubuntu@xos "cd xos/xos/configurations/cord-pod; make local_containers"
-
 }
 
 function setup_xos() {
-    echo ""
+
     echo "Setting up XOS, will take a few minutes"
     ssh ubuntu@xos "cd xos/xos/configurations/cord-pod; make"
     echo ""
@@ -128,6 +110,7 @@
 
     echo "*** Wait for vSG VM to come up"
     i=0
+
     until nova list --all-tenants|grep 'vsg.*ACTIVE' > /dev/null
     do
       sleep 60
@@ -151,13 +134,13 @@
 
     echo ""
     echo "*** Run dhclient in test client"
+
     ssh ubuntu@nova-compute "sudo lxc-attach -n testclient -- dhclient eth0.222.111" > /dev/null
 
     echo ""
     echo "*** Routes in test client"
     ssh ubuntu@nova-compute "sudo lxc-attach -n testclient -- route -n"
 
-
     echo ""
     echo "*** Test external connectivity in test client"
     ssh ubuntu@nova-compute "sudo lxc-attach -n testclient -- ping -c 3 8.8.8.8"
@@ -214,22 +197,27 @@
 # Parse options
 RUN_TEST=0
 EXAMPLESERVICE=0
-BUILD_BRANCH=""
-while getopts "b:eht" opt; do
+BUILD_BRANCH="master"
+INVENTORY="inventory/single-localhost"
+
+while getopts "b:ehi:t" opt; do
   case ${opt} in
     b ) BUILD_BRANCH=$OPTARG
       ;;
+    e ) EXAMPLESERVICE=1
+      ;;
     h ) echo "Usage:"
-      echo "    $0             install OpenStack and prep XOS and ONOS VMs [default]"
-      echo "    $0 -b <branch> build XOS containers based on GitHub <branch> instead of pulling them from Docker Hub"
-      echo "    $0 -e          add exampleservice to XOS"
-      echo "    $0 -h          display this help message"
-      echo "    $0 -t          do install, bring up cord-pod configuration, run E2E test"
+      echo "    $0                install OpenStack and prep XOS and ONOS VMs [default]"
+      echo "    $0 -b <branch>    build XOS containers based on GitHub <branch>"
+      echo "    $0 -e             add exampleservice to XOS"
+      echo "    $0 -h             display this help message"
+      echo "    $0 -i <inv_file>  specify an inventory file (default is inventory/single-localhost)"
+      echo "    $0 -t             do install, bring up cord-pod configuration, run E2E test"
       exit 0
       ;;
-    t ) RUN_TEST=1
+    i ) INVENTORY=$OPTARG
       ;;
-    e ) EXAMPLESERVICE=1
+    t ) RUN_TEST=1
       ;;
     \? ) echo "Invalid option: -$OPTARG"
       exit 1
@@ -247,16 +235,10 @@
 
 bootstrap
 setup_openstack
-pull_onos_docker_image
-wait_for_openstack
-simulate_fabric
 
 if [[ $RUN_TEST -eq 1 ]]
 then
-  if [[ -n $BUILD_BRANCH || $EXAMPLESERVICE -eq 1 ]]
-  then
-    build_xos_docker_images
-  fi
+  build_xos_docker_images
   setup_xos
   setup_test_client
   run_e2e_test
@@ -267,3 +249,4 @@
 fi
 
 exit 0
+
diff --git a/vars/aztest.yml b/vars/aztest.yml
new file mode 100644
index 0000000..c56f092
--- /dev/null
+++ b/vars/aztest.yml
@@ -0,0 +1,31 @@
+---
+# file: group_vars/aztest.yml
+
+# IP prefix for VMs
+virt_nets:
+  - name: mgmtbr
+    ipv4_prefix: 192.168.250
+    head_vms: true
+
+# site domain suffix
+site_suffix: aztest.infra.opencloud.us
+
+# resolv.conf settings
+dns_search:
+  - aztest.infra.opencloud.us
+  - opencloud.cs.arizona.edu
+
+# NSD/Unbound settings
+
+nsd_zones:
+  - name: aztest.infra.opencloud.us
+    ipv4_first_octets: 192.168.250
+    name_reverse_unbound: "168.192.in-addr.arpa"
+    soa: ns1
+    ns:
+      - { name: ns1 }
+    nodelist: head_vm_list
+
+# If true, unbound listens on the head node's `ansible_default_ipv4` interface
+unbound_listen_on_default: True
+
diff --git a/vars/cord.yml b/vars/cord.yml
new file mode 100644
index 0000000..c8a7c87
--- /dev/null
+++ b/vars/cord.yml
@@ -0,0 +1,47 @@
+---
+# file: group_vars/cord.yml
+
+# VM networks/bridges on head
+virt_nets:
+  - name: mgmtbr
+    ipv4_prefix: 192.168.122
+    head_vms: true
+
+# site domain suffix
+site_suffix: cordtest.opencloud.us
+
+# resolv.conf settings
+dns_search:
+  - cordtest.opencloud.us
+
+# NSD/Unbound settings
+nsd_zones:
+  - name: cordtest.opencloud.us
+    ipv4_first_octets: 192.168.122
+    name_reverse_unbound: "168.192.in-addr.arpa"
+    soa: ns1
+    ns:
+      - { name: ns1 }
+    nodelist: head_vm_list
+    aliases:
+      - { name: "ns1" , dest: "head" }
+      - { name: "ns" , dest: "head" }
+      - { name: "apt-cache" , dest: "head" }
+
+name_on_public_interface: head
+
+# If true, unbound listens on the head node's `ansible_default_ipv4` interface
+unbound_listen_on_default: True
+
+
+xos_images:
+  - name: "trusty-server-multi-nic"
+    url: "http://www.vicci.org/opencloud/trusty-server-cloudimg-amd64-disk1.img"
+    checksum: "sha256:5fb160ea09649f9cebe5cfd7aaf3d1a341e8e0f9eca6e52e46eb6271155c48b0"
+  - name: "vsg-1.0"
+    url: "http://www.vicci.org/cord/vsg-1.0.img"
+    checksum: "sha256:ba1f1dfd9099a21ed6512f9e08ceddafbc8302dcc0da66eda3a0318281219abc"
+#  - name: ceilometer-trusty-server-multi-nic
+#    image_url: "http://www.vicci.org/cord/ceilometer-trusty-server-multi-nic.compressed.qcow2"
+#    checksum: "sha256:afde3f0448483902693be4d52ae76bb683fd74b1c7728019094bf81b37d86105"
+
diff --git a/vars/cord_defaults.yml b/vars/cord_defaults.yml
new file mode 100644
index 0000000..2c35dc2
--- /dev/null
+++ b/vars/cord_defaults.yml
@@ -0,0 +1,243 @@
+---
+# vars/cord_defaults.yml
+
+openstack_version: kilo
+
+juju_config_name: cord
+
+xos_configuration: cord-pod
+
+xos_repo_branch: "master"
+
+apt_cacher_name: apt-cache
+
+apt_ssl_sites:
+  - apt.dockerproject.org
+  - butler.opencloud.cs.arizona.edu
+  - deb.nodesource.com
+
+charm_versions:
+  ceilometer: "cs:trusty/ceilometer-17"
+  ceilometer-agent: "cs:trusty/ceilometer-agent-13"
+  glance: "cs:trusty/glance-28"
+  keystone: "cs:trusty/keystone-33"
+  mongodb: "cs:trusty/mongodb-33"
+  percona-cluster: "cs:trusty/percona-cluster-31"
+  nagios: "cs:trusty/nagios-10"
+  neutron-api: "cs:~cordteam/trusty/neutron-api-3"
+  nova-cloud-controller: "cs:trusty/nova-cloud-controller-64"
+  nova-compute: "cs:~cordteam/trusty/nova-compute-2"
+  nrpe: "cs:trusty/nrpe-4"
+  ntp: "cs:trusty/ntp-14"
+  openstack-dashboard: "cs:trusty/openstack-dashboard-19"
+  rabbitmq-server: "cs:trusty/rabbitmq-server-42"
+
+head_vm_list:
+  - name: "juju-1"
+    service: "juju"
+    aliases:
+      - "juju"
+    ipv4_last_octet: 10
+    cpu: 1
+    memMB: 2048
+    diskGB: 20
+
+  - name: "ceilometer-1"
+    service: "ceilometer"
+    aliases:
+      - "ceilometer"
+    ipv4_last_octet: 20
+    cpu: 1
+    memMB: 2048
+    diskGB: 20
+    forwarded_ports:
+      - { ext: 8777, int: 8777 }
+
+  - name: "glance-1"
+    service: "glance"
+    aliases:
+      - "glance"
+    ipv4_last_octet: 30
+    cpu: 2
+    memMB: 4096
+    diskGB: 160
+    forwarded_ports:
+      - { ext: 9292, int: 9292 }
+
+  - name: "keystone-1"
+    service: "keystone"
+    aliases:
+      - "keystone"
+    ipv4_last_octet: 40
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+    forwarded_ports:
+      - { ext: 35357, int: 35357 }
+      - { ext: 4990, int: 4990 }
+      - { ext: 5000, int: 5000 }
+
+  - name: "percona-cluster-1"
+    service: "percona-cluster"
+    aliases:
+      - "percona-cluster"
+    ipv4_last_octet: 50
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+
+  - name: "nagios-1"
+    service: "nagios"
+    aliases:
+      - "nagios"
+    ipv4_last_octet: 60
+    cpu: 1
+    memMB: 2048
+    diskGB: 20
+    forwarded_ports:
+      - { ext: 3128, int: 80 }
+
+  - name: "neutron-api-1"
+    service: "neutron-api"
+    aliases:
+      - "neutron-api"
+    ipv4_last_octet: 70
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+    forwarded_ports:
+      - { ext: 9696, int: 9696 }
+
+  - name: "nova-cloud-controller-1"
+    service: "nova-cloud-controller"
+    aliases:
+      - "nova-cloud-controller"
+    ipv4_last_octet: 80
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+    forwarded_ports:
+      - { ext: 8774, int: 8774 }
+
+  - name: "openstack-dashboard-1"
+    service: "openstack-dashboard"
+    aliases:
+      - "openstack-dashboard"
+    ipv4_last_octet: 90
+    cpu: 1
+    memMB: 2048
+    diskGB: 20
+    forwarded_ports:
+      - { ext: 8080, int: 80 }
+
+  - name: "rabbitmq-server-1"
+    service: "rabbitmq-server"
+    aliases:
+      - "rabbitmq-server"
+    ipv4_last_octet: 100
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+
+  - name: "onos-cord-1"
+    aliases:
+      - "onos-cord"
+    ipv4_last_octet: 110
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+    docker_path: "cord"
+
+  - name: "onos-fabric-1"
+    aliases:
+      - "onos-fabric"
+    ipv4_last_octet: 120
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+
+  - name: "xos-1"
+    aliases:
+      - "xos"
+    ipv4_last_octet: 130
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+    docker_path: 'xos/xos/configurations/cord-pod'
+
+  - name: "nova-compute-1"
+    service: "nova-compute"
+    root_ssh_login: true
+    aliases:
+      - "nova-compute"
+    ipv4_last_octet: 140
+    cpu: 6
+    memMB: 16384
+    diskGB: 240
+
+
+vm_service_list:
+  - ceilometer
+  - glance
+  - keystone
+  - percona-cluster
+  - nagios
+  - neutron-api
+  - nova-cloud-controller
+  - openstack-dashboard
+  - rabbitmq-server
+  - nova-compute
+
+
+standalone_service_list:
+  - ntp
+  - nrpe
+  - ceilometer-agent
+
+
+service_relations:
+  - name: keystone
+    relations: [ "percona-cluster", "nrpe", ]
+
+  - name: nova-cloud-controller
+    relations: [ "percona-cluster", "rabbitmq-server", "glance", "keystone", "nrpe", ]
+
+  - name: glance
+    relations: [ "percona-cluster", "keystone", "nrpe", ]
+
+  - name: neutron-api
+    relations: [ "keystone",  "percona-cluster", "rabbitmq-server", "nova-cloud-controller", "nrpe", ]
+
+  - name: openstack-dashboard
+    relations: [ "keystone", "nrpe", ]
+
+  - name: nagios
+    relations: [ "nrpe", ]
+
+  - name: "percona-cluster:juju-info"
+    relations: [ "nrpe:general-info", ]
+
+  - name: rabbitmq-server
+    relations: [ "nrpe", ]
+
+  - name: ceilometer
+    relations: [ "mongodb", "rabbitmq-server", "nagios", "nrpe", ]
+
+  - name: "ceilometer:identity-service"
+    relations: [ "keystone:identity-service", ]
+
+  - name: "ceilometer:ceilometer-service"
+    relations: [ "ceilometer-agent:ceilometer-service", ]
+
+  - name: nova-compute
+    relations: [ "ceilometer-agent", "glance", "nova-cloud-controller", "nagios", "nrpe", ]
+
+  - name: "nova-compute:shared-db"
+    relations: [ "percona-cluster:shared-db", ]
+
+  - name: "nova-compute:amqp"
+    relations: [ "rabbitmq-server:amqp", ]
+
+  - name: ntp
+    relations: [ "nova-compute", ]
+
diff --git a/vars/example_keystone.yml b/vars/example_keystone.yml
new file mode 100644
index 0000000..14df06f
--- /dev/null
+++ b/vars/example_keystone.yml
@@ -0,0 +1,4 @@
+---
+
+keystone_admin_password: "VeryLongKeystoneAdminPassword"
+
diff --git a/vars/opencloud_defaults.yml b/vars/opencloud_defaults.yml
new file mode 100644
index 0000000..32ca9a8
--- /dev/null
+++ b/vars/opencloud_defaults.yml
@@ -0,0 +1,187 @@
+---
+# vars/opencloud_defaults.yml
+
+openstack_version: kilo
+
+juju_config_name: opencloud
+
+charm_versions: {}
+
+head_vm_list:
+  - name: "juju-1"
+    service: "juju"
+    aliases:
+       - "juju"
+    ipv4_last_octet: 10
+    cpu: 1
+    memMB: 2048
+    diskGB: 20
+
+  - name: "ceilometer-1"
+    service: "ceilometer"
+    aliases:
+      - "ceilometer"
+    ipv4_last_octet: 20
+    cpu: 1
+    memMB: 2048
+    diskGB: 20
+    forwarded_ports:
+      - { ext: 8777, int: 8777 }
+
+  - name: "glance-1"
+    service: "glance"
+    aliases:
+      - "glance"
+    ipv4_last_octet: 30
+    cpu: 2
+    memMB: 4096
+    diskGB: 160
+    forwarded_ports:
+      - { ext: 9292, int: 9292 }
+
+  - name: "keystone-1"
+    service: "keystone"
+    aliases:
+      - "keystone"
+    ipv4_last_octet: 40
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+    forwarded_ports:
+      - { ext: 35357, int: 35357 }
+      - { ext: 4990, int: 4990 }
+      - { ext: 5000, int: 5000 }
+
+  - name: "percona-cluster-1"
+    service: "percona-cluster"
+    aliases:
+      - "percona-cluster"
+    ipv4_last_octet: 50
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+
+  - name: "nagios-1"
+    service: "nagios"
+    aliases:
+      - "nagios"
+    ipv4_last_octet: 60
+    cpu: 1
+    memMB: 2048
+    diskGB: 20
+    forwarded_ports:
+      - { ext: 3128, int: 80 }
+
+  - name: "neutron-api-1"
+    service: "neutron-api"
+    aliases:
+      - "neutron-api"
+    ipv4_last_octet: 70
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+    forwarded_ports:
+      - { ext: 9696, int: 9696 }
+
+  - name: "neutron-gateway-1"
+    service: "neutron-gateway"
+    aliases:
+      - "neutron-gateway"
+    ipv4_last_octet: 80
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+
+  - name: "nova-cloud-controller-1"
+    service: "nova-cloud-controller"
+    aliases:
+      - "nova-cloud-controller"
+    ipv4_last_octet: 90
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+    forwarded_ports:
+      - { ext: 8774, int: 8774 }
+
+  - name: "openstack-dashboard-1"
+    service: "openstack-dashboard"
+    aliases:
+      - "openstack-dashboard"
+    ipv4_last_octet: 100
+    cpu: 1
+    memMB: 2048
+    diskGB: 20
+    forwarded_ports:
+      - { ext: 8080, int: 80 }
+
+  - name: "rabbitmq-server-1"
+    service: "rabbitmq-server"
+    aliases:
+      - "rabbitmq-server"
+    ipv4_last_octet: 110
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+
+vm_service_list:
+  - ceilometer
+  - glance
+  - keystone
+  - nagios
+  - neutron-api
+  - neutron-gateway
+  - nova-cloud-controller
+  - openstack-dashboard
+  - percona-cluster
+  - rabbitmq-server
+
+standalone_service_list:
+  - ceilometer-agent
+  - neutron-openvswitch
+  - nrpe
+  - ntp
+
+service_relations:
+  - name: keystone
+    relations: [ "percona-cluster", "nrpe", ]
+
+  - name: nova-cloud-controller
+    relations: [ "percona-cluster", "rabbitmq-server", "glance", "keystone", "nrpe", ]
+
+  - name: glance
+    relations: [ "percona-cluster", "keystone", "nrpe", ]
+
+  - name: neutron-gateway
+    relations: [ "neutron-api", "nova-cloud-controller", "percona-cluster", "nrpe", ]
+
+  - name: "neutron-gateway:amqp"
+    relations: [ "rabbitmq-server:amqp", ]
+
+  - name: neutron-api
+    relations: [ "keystone", "neutron-openvswitch", "percona-cluster", "rabbitmq-server", "nova-cloud-controller", "nrpe", ]
+
+  - name: neutron-openvswitch
+    relations: [ "rabbitmq-server", ]
+
+  - name: openstack-dashboard
+    relations: [ "keystone", "nrpe", ]
+
+  - name: nagios
+    relations: [ "nrpe", ]
+
+  - name: "percona-cluster:juju-info"
+    relations: [ "nrpe:general-info", ]
+
+  - name: rabbitmq-server
+    relations: [ "nrpe", ]
+
+  - name: ceilometer
+    relations: [ "mongodb", "rabbitmq-server", "nagios", "nrpe", ]
+
+  - name: "ceilometer:identity-service"
+    relations: [ "keystone:identity-service", ]
+
+  - name: "ceilometer:ceilometer-service"
+    relations: [ "ceilometer-agent:ceilometer-service", ]
+
+