Support multi-node CORD install
diff --git a/cord-compute-playbook.yml b/cord-compute-playbook.yml
new file mode 100644
index 0000000..b0f191e
--- /dev/null
+++ b/cord-compute-playbook.yml
@@ -0,0 +1,40 @@
+---
+# Installs  new compute nodes in cord-pod XOS configuration using Juju
+
+- name: Include vars
+  hosts: all
+  tasks:
+  - include_vars: vars/cord_defaults.yml
+  - include_vars: vars/cord.yml
+  - include_vars: vars/example_keystone.yml
+
+- name: Configure compute hosts to use DNS server
+  hosts: all
+  become: yes
+  roles:
+    - { role: dns-configure, when: on_cloudlab }
+
+- name: Prep systems
+  hosts: compute
+  become: yes
+  roles:
+    - common-prep
+    - { role: cloudlab-prep, when: on_cloudlab }
+
+- name: Configure head node (for sshkey)
+  hosts: head
+  roles:
+    - { role: head-prep, become: yes }
+
+- name: Configure compute nodes
+  hosts: compute
+  become: yes
+  roles:
+    - compute-prep
+
+- name: Deploy compute nodes
+  hosts: head
+  roles:
+    - juju-compute-setup
+    - xos-compute-setup
+
diff --git a/cord-head-playbook.yml b/cord-head-playbook.yml
new file mode 100644
index 0000000..dd550c8
--- /dev/null
+++ b/cord-head-playbook.yml
@@ -0,0 +1,50 @@
+---
+# Installs the single node cord-pod XOS configuration, using Juju to provision
+# the OpenStack installation inside of VM's on the head node.
+#
+# This is used by `scripts/single-node-pod.sh` for E2E testing.
+
+- name: Include vars
+  hosts: all
+  tasks:
+  - include_vars: vars/cord_defaults.yml
+  - include_vars: vars/cord.yml
+  - include_vars: vars/example_keystone.yml  # for testing
+
+- name: DNS Server and apt-cacher-ng Setup
+  hosts: head
+  become: yes
+  roles:
+    - { role: dns-nsd, when: on_cloudlab }
+    - { role: dns-unbound, when: on_cloudlab }
+    - apt-cacher-ng
+
+- name: Configure all hosts to use DNS server
+  hosts: all
+  become: yes
+  roles:
+    - { role: dns-configure, when: on_cloudlab }
+
+- name: Prep systems
+  hosts: all
+  become: yes
+  roles:
+    - common-prep
+    - { role: cloudlab-prep, when: on_cloudlab }
+
+- name: Configure head node, create VM's
+  hosts: head
+  roles:
+    - { role: head-prep, become: yes }
+    - { role: config-virt, become: yes }
+    - create-vms
+
+- name: Set up VM's, juju, start ONOS and XOS
+  hosts: head
+  roles:
+    - xos-vm-install
+    - onos-vm-install
+    - juju-setup
+    - docker-compose
+    - xos-start
+
diff --git a/cord-single-playbook.yml b/cord-single-playbook.yml
index dec17a3..81036ad 100644
--- a/cord-single-playbook.yml
+++ b/cord-single-playbook.yml
@@ -7,7 +7,7 @@
 - name: Include vars
   hosts: head
   tasks:
-  - include_vars: vars/cord_defaults.yml
+  - include_vars: vars/cord_single_defaults.yml
   - include_vars: vars/cord.yml
   - include_vars: vars/cord_keystone.yml
 
@@ -48,4 +48,4 @@
     - docker-compose
     - simulate-fabric
     - onos-load-apps
-
+    - xos-start
diff --git a/inventory/multi-localhost b/inventory/multi-localhost
new file mode 100644
index 0000000..66a54f4
--- /dev/null
+++ b/inventory/multi-localhost
@@ -0,0 +1,11 @@
+[head]
+localhost
+
+[head:vars]
+ansible_ssh_user=admin
+
+# Add compute nodes here as they are provisioned
+[compute]
+
+[compute:vars]
+ansible_ssh_user=ubuntu
diff --git a/roles/cloudlab-prep/defaults/main.yml b/roles/cloudlab-prep/defaults/main.yml
index 1b3f00e..99c6595 100644
--- a/roles/cloudlab-prep/defaults/main.yml
+++ b/roles/cloudlab-prep/defaults/main.yml
@@ -2,5 +2,5 @@
 # roles/cloudlab-prep/defaults/main.yml
 
 cloudlab_extrafs:
-  - /var/lib/uvtool/libvirt/images
-  #  - /var/lib/nova
+  - { mount: /var/lib/uvtool/libvirt/images, dev: /dev/sda }
+  - { mount: /var/lib/nova, dev: /dev/sdb }
diff --git a/roles/cloudlab-prep/tasks/main.yml b/roles/cloudlab-prep/tasks/main.yml
index 5dc9c2c..5bf71a3 100644
--- a/roles/cloudlab-prep/tasks/main.yml
+++ b/roles/cloudlab-prep/tasks/main.yml
@@ -3,14 +3,14 @@
 
 - name: create directories to mount extra filesystems
   file:
-    dest={{ item }}
+    dest={{ item.mount }}
     state=directory
   with_items: "{{ cloudlab_extrafs }}"
 
 
 # FIXME: does not create lost+found, -q makes it fail without error...
 - name: Set up extra disk space
-  command: /usr/testbed/bin/mkextrafs -qf {{ item }}
-    creates={{ item }}/lost+found
+  command: /usr/testbed/bin/mkextrafs -r {{ item.dev }} -qf {{ item.mount }}
+    creates={{ item.mount }}/lost+found
   with_items: "{{ cloudlab_extrafs }}"
 
diff --git a/roles/compute-prep/tasks/main.yml b/roles/compute-prep/tasks/main.yml
index a62305f..0c57979 100644
--- a/roles/compute-prep/tasks/main.yml
+++ b/roles/compute-prep/tasks/main.yml
@@ -10,11 +10,21 @@
   with_items:
     - python-yaml
 
+- name: Add ubuntu user
+  user:
+   name=ubuntu
+   groups=admin
+
 - name: Add head node ubuntu user key
   authorized_key:
     user=ubuntu
     key="{{ hostvars[groups['head'][0]]['sshkey']['stdout'] }}"
 
+- name: Add head node root user key
+  authorized_key:
+    user=root
+    key="{{ hostvars[groups['head'][0]]['sshkey']['stdout'] }}"
+
 - name: Add route via /etc/rc.local
   template:
     src=rc.local.j2
@@ -22,9 +32,9 @@
     mode=0755
   notify:
     - run rc.local
+  when: not cord_provisioned
 
 - name: Create /var/lib/nova dir
   file:
     path=/var/lib/nova
     state=directory
-
diff --git a/roles/config-virt/tasks/main.yml b/roles/config-virt/tasks/main.yml
index da7b239..f3dc91d 100644
--- a/roles/config-virt/tasks/main.yml
+++ b/roles/config-virt/tasks/main.yml
@@ -13,7 +13,7 @@
     command=facts
 
 - name: Tear down libvirt's default network
-  when: ansible_libvirt_networks["default"] is defined
+  when: not cord_provisioned and ansible_libvirt_networks["default"] is defined
   virt_net:
     command={{ item }}
     name=default
@@ -28,20 +28,22 @@
     command=define
     xml='{{ lookup("template", "virt_net.xml.j2") }}'
   with_items: '{{ virt_nets }}'
+  when: not cord_provisioned
 
 - name: collect libvirt network facts after defining new network
   virt_net:
     command=facts
+  when: not cord_provisioned
 
 - name: start libvirt networks
-  when: ansible_libvirt_networks["xos-{{ item.name }}"].state != "active"
+  when: not cord_provisioned and ansible_libvirt_networks["xos-{{ item.name }}"].state != "active"
   virt_net:
     name=xos-{{ item.name }}
     command=create
   with_items: '{{ virt_nets }}'
 
 - name: have libvirt networks autostart
-  when: ansible_libvirt_networks["xos-{{ item.name }}"].autostart != "yes"
+  when: not cord_provisioned and ansible_libvirt_networks["xos-{{ item.name }}"].autostart != "yes"
   virt_net:
     name=xos-{{ item.name }}
     autostart=yes
@@ -59,6 +61,7 @@
   notify:
     - reload libvirt-bin
     - run qemu hook
+  when: not cord_provisioned
 
 - name: Wait for uvt-kvm image to be available
   async_status: jid={{ uvt_sync.ansible_job_id }}
diff --git a/roles/create-vms/tasks/main.yml b/roles/create-vms/tasks/main.yml
index 28350ef..29b7545 100644
--- a/roles/create-vms/tasks/main.yml
+++ b/roles/create-vms/tasks/main.yml
@@ -39,15 +39,19 @@
   template:
     src=eth0.cfg.j2
     dest={{ ansible_user_dir }}/eth0.cfg
+  when: not cord_provisioned
 
 - name: Copy eth0 interface config file to all VMs
   command: ansible services -b -u ubuntu -m copy -a "src={{ ansible_user_dir }}/eth0.cfg dest=/etc/network/interfaces.d/eth0.cfg owner=root group=root mode=0644"
+  when: not cord_provisioned
 
 - name: Restart eth0 interface on all VMs
   command: ansible services -b -u ubuntu -m shell -a "ifdown eth0 ; ifup eth0"
+  when: not cord_provisioned
 
 - name: Verify that we can log into every VM after restarting network interfaces
   command: ansible services -m ping -u ubuntu
+  when: not cord_provisioned
 
 # sshkey is registered in head-prep task
 - name: Enable root ssh login on VM's that require it
diff --git a/roles/docker-compose/tasks/main.yml b/roles/docker-compose/tasks/main.yml
index 59325e4..efb310a 100644
--- a/roles/docker-compose/tasks/main.yml
+++ b/roles/docker-compose/tasks/main.yml
@@ -29,5 +29,4 @@
 
 - name: Copy admin-openrc.sh into XOS container
   command: ansible xos-1 -u ubuntu -m copy \
-    -a "src=~/admin-openrc.sh dest=~/service-profile/{{ xos_configuration }}"
-
+    -a "src=~/admin-openrc.sh dest={{ service_profile_repo_dest }}/{{ xos_configuration }}"
diff --git a/roles/juju-compute-setup/tasks/main.yml b/roles/juju-compute-setup/tasks/main.yml
new file mode 100644
index 0000000..f12ce43
--- /dev/null
+++ b/roles/juju-compute-setup/tasks/main.yml
@@ -0,0 +1,62 @@
+---
+# roles/juju-compute-setup/main/tasks.yml
+
+# Code for this is in library/juju_facts.py
+- name: Obtain Juju Facts for creating machines
+  juju_facts:
+
+# For setwise operations on desired vs Juju state:
+# list of active juju_machines names: juju_machines.keys()
+# list of active juju_services names: juju_services.keys()
+
+- name: Add machines to Juju
+  command: "juju add-machine ssh:{{ item }}"
+  with_items: "{{ groups['compute'] | difference( juju_machines.keys() ) }}"
+
+# run this again, so machines will be in the juju_machines list
+- name: Obtain Juju Facts after machine creation
+  juju_facts:
+
+- name: Deploy nova-compute service if needed
+  command: "juju deploy {{ charm_versions[item] | default(item) }} --to {{ juju_machines[groups['compute'][0]]['machine_id'] }} --config={{ juju_config_path }}"
+  with_items: 
+    - "nova-compute"
+  when: '"nova-compute" not in juju_services.keys()'
+
+- name: Create relations between nova-compute and other services if needed
+  command: "juju add-relation '{{ item.0.name }}' '{{ item.1 }}'"
+  register: juju_relation
+  failed_when: "juju_relation|failed and 'relation already exists' not in juju_relation.stderr"
+  with_subelements:
+    - "{{ compute_relations }}"
+    - relations
+
+# run another time
+- name: Obtain Juju Facts after deploying nova-compute
+  juju_facts:
+  when: '"nova-compute" not in juju_services.keys()'
+
+- name: Add more nova-compute units
+  command: "juju add-unit nova-compute --to {{ juju_machines[item]['machine_id'] }}"
+  with_items: "{{ groups['compute'] | difference( juju_compute_nodes.keys() ) }}"
+
+- name: Pause to let Juju settle
+  pause:
+    prompt="Waiting for Juju..."
+    seconds=20
+
+# 160*15s = 2400s = 40m max wait
+- name: Wait for nova-compute nodes to come online
+  juju_facts:
+  until: item in juju_compute_nodes.keys() and juju_compute_nodes[item]['workload-status']['message'] == "Unit is ready"
+  retries: 160
+  delay: 15
+  with_items: "{{ groups['compute'] }}"
+
+- name: verify that the nodes appear in nova
+  action: shell bash -c "source ~/admin-openrc.sh; nova hypervisor-list | grep '{{ item }}'"
+  register: result
+  until: result | success
+  retries: 5
+  delay: 5
+  with_items: "{{ groups['compute'] }}"
diff --git a/roles/xos-compute-setup/tasks/main.yml b/roles/xos-compute-setup/tasks/main.yml
new file mode 100644
index 0000000..b2689a7
--- /dev/null
+++ b/roles/xos-compute-setup/tasks/main.yml
@@ -0,0 +1,7 @@
+---
+# xos-compute-setup/tasks/main.yml
+#
+# Tell XOS that a new compute node has been added
+
+- name: ssh to XOS VM and run 'make new-nodes'
+  command: ssh ubuntu@xos "cd {{ service_profile_repo_dest }}/{{ xos_configuration }}/; make new-nodes"
diff --git a/roles/xos-start/tasks/main.yml b/roles/xos-start/tasks/main.yml
new file mode 100644
index 0000000..a819727
--- /dev/null
+++ b/roles/xos-start/tasks/main.yml
@@ -0,0 +1,28 @@
+---
+# xos-start/tasks/main.yml
+
+- name: Build XOS containers
+  command: ansible xos-1 -u ubuntu -m shell \
+    -a "cd {{ service_profile_repo_dest }}/{{ xos_configuration }}; make local_containers | tee xos-build.out"
+
+- name: Onboard services and start XOS
+  command: ansible xos-1 -u ubuntu -m shell \
+    -a "cd {{ service_profile_repo_dest }}/{{ xos_configuration }}; make xos | tee xos-onboard.out"
+
+- name: Pause to let XOS initialize
+  pause: seconds=120
+
+- name: Initial VTN configuration
+  command: ansible xos-1 -u ubuntu -m shell \
+    -a "cd {{ service_profile_repo_dest }}/{{ xos_configuration }}; make vtn"
+
+- name: Initial fabric configuration
+  command: ansible xos-1 -u ubuntu -m shell \
+    -a "cd {{ service_profile_repo_dest }}/{{ xos_configuration }}; make fabric"
+
+- name: Pause to let ONOS initialize
+  pause: seconds=20
+
+- name: Configure CORD services
+  command: ansible xos-1 -u ubuntu -m shell \
+    -a "cd {{ service_profile_repo_dest }}/{{ xos_configuration }}; make cord"
diff --git a/roles/xos-vm-install/files/xos-setup-cord-pod-playbook.yml b/roles/xos-vm-install/files/xos-setup-cord-pod-playbook.yml
index 0481444..364882e 100644
--- a/roles/xos-vm-install/files/xos-setup-cord-pod-playbook.yml
+++ b/roles/xos-vm-install/files/xos-setup-cord-pod-playbook.yml
@@ -70,4 +70,3 @@
         chdir="{{ service_profile_repo_dest }}/containers/xos/"
       with_items:
        - base
-
diff --git a/scripts/single-node-pod.sh b/scripts/single-node-pod.sh
index 18ece0f..185774a 100755
--- a/scripts/single-node-pod.sh
+++ b/scripts/single-node-pod.sh
@@ -47,6 +47,7 @@
 }
 
 function setup_openstack() {
+    cd ~/openstack-cluster-setup
 
     extra_vars="xos_repo_url=$XOS_REPO_URL"
 
@@ -59,31 +60,9 @@
     ansible-playbook -i $INVENTORY cord-single-playbook.yml --extra-vars="$extra_vars"
 }
 
-function build_xos_docker_images() {
-    echo ""
-    echo "Checking out XOS branch $XOS_BRANCH"
-    ssh ubuntu@xos "cd xos; git config --global user.email 'ubuntu@localhost'; git config --global user.name 'XOS ExampleService'"
-    ssh ubuntu@xos "cd xos; git checkout $XOS_BRANCH"
-
-    echo "Rebuilding XOS containers"
-    ssh ubuntu@xos "cd xos/containers/xos; make base"
-    ssh ubuntu@xos "cd service-profile/cord-pod; make local_containers"
-}
-
 function setup_xos() {
 
-    echo "Setting up XOS, will take a few minutes"
-    ssh ubuntu@xos "cd service-profile/cord-pod; make"
-    echo ""
-    echo "Pause 2 minutes"
-    sleep 120
-
-    ssh ubuntu@xos "cd service-profile/cord-pod; make vtn; make fabric"
-    echo ""
-    echo "Pause 30 seconds"
-    sleep 30
-
-    ssh ubuntu@xos "cd service-profile/cord-pod; make cord; make cord-subscriber"
+    ssh ubuntu@xos "cd service-profile/cord-pod; make cord-subscriber"
 
     if [[ $EXAMPLESERVICE -eq 1 ]]
     then
@@ -263,7 +242,6 @@
 
 if [[ $RUN_TEST -eq 1 ]]
 then
-  build_xos_docker_images
   setup_xos
   setup_test_client
   run_e2e_test
diff --git a/vars/cord.yml b/vars/cord.yml
index c4ffc53..65b7f4a 100644
--- a/vars/cord.yml
+++ b/vars/cord.yml
@@ -8,15 +8,15 @@
     head_vms: true
 
 # site domain suffix
-site_suffix: cordtest.opencloud.us
+site_suffix: cord.lab
 
 # resolv.conf settings
 dns_search:
-  - cordtest.opencloud.us
+  - cord.lab
 
 # NSD/Unbound settings
 nsd_zones:
-  - name: cordtest.opencloud.us
+  - name: cord.lab
     ipv4_first_octets: 192.168.122
     name_reverse_unbound: "168.192.in-addr.arpa"
     soa: ns1
diff --git a/vars/cord_defaults.yml b/vars/cord_defaults.yml
index d86c9db..da45ddf 100644
--- a/vars/cord_defaults.yml
+++ b/vars/cord_defaults.yml
@@ -1,14 +1,24 @@
 ---
 # vars/cord_defaults.yml
 
+# indicate that the nodes have been provisioned by CORD MaaS
+# Change or override for a multi-node install on CloudLab
+cord_provisioned: True
+
 openstack_version: kilo
 
 juju_config_name: cord
 
+juju_config_path: /usr/local/src/juju_config.yml
+
+service_profile_repo_dest: "~/service-profile"
+
 xos_configuration: cord-pod
 
 xos_repo_branch: "master"
 
+xos_repo_dest: "~/xos"
+
 apt_cacher_name: apt-cache
 
 apt_ssl_sites:
@@ -166,16 +176,6 @@
     diskGB: 40
     docker_path: 'service-profile/cord-pod'
 
-  - name: "nova-compute-1"
-    service: "nova-compute"
-    root_ssh_login: true
-    aliases:
-      - "nova-compute"
-    ipv4_last_octet: 140
-    cpu: 6
-    memMB: 16384
-    diskGB: 240
-
 
 vm_service_list:
   - ceilometer
@@ -187,7 +187,6 @@
   - nova-cloud-controller
   - openstack-dashboard
   - rabbitmq-server
-  - nova-compute
 
 
 standalone_service_list:
@@ -230,6 +229,7 @@
   - name: "ceilometer:ceilometer-service"
     relations: [ "ceilometer-agent:ceilometer-service", ]
 
+compute_relations:
   - name: nova-compute
     relations: [ "ceilometer-agent", "glance", "nova-cloud-controller", "nagios", "nrpe", ]
 
diff --git a/vars/cord_single_defaults.yml b/vars/cord_single_defaults.yml
new file mode 100644
index 0000000..9491b37
--- /dev/null
+++ b/vars/cord_single_defaults.yml
@@ -0,0 +1,252 @@
+---
+# vars/cord_defaults.yml
+
+# For a single-node case, we don't expect the node to already have been
+# provisioned by CORD MaaS.  It's just Ubuntu 14.04.
+cord_provisioned: False
+
+openstack_version: kilo
+
+juju_config_name: cord
+
+juju_config_path: /usr/local/src/juju_config.yml
+
+service_profile_repo_dest: "~/service-profile"
+
+xos_configuration: cord-pod
+
+xos_repo_branch: "master"
+
+apt_cacher_name: apt-cache
+
+apt_ssl_sites:
+  - apt.dockerproject.org
+  - butler.opencloud.cs.arizona.edu
+  - deb.nodesource.com
+
+charm_versions:
+  ceilometer: "cs:trusty/ceilometer-17"
+  ceilometer-agent: "cs:trusty/ceilometer-agent-13"
+  glance: "cs:trusty/glance-28"
+  keystone: "cs:trusty/keystone-33"
+  mongodb: "cs:trusty/mongodb-33"
+  percona-cluster: "cs:trusty/percona-cluster-31"
+  nagios: "cs:trusty/nagios-10"
+  neutron-api: "cs:~cordteam/trusty/neutron-api-3"
+  nova-cloud-controller: "cs:trusty/nova-cloud-controller-64"
+  nova-compute: "cs:~cordteam/trusty/nova-compute-2"
+  nrpe: "cs:trusty/nrpe-4"
+  ntp: "cs:trusty/ntp-14"
+  openstack-dashboard: "cs:trusty/openstack-dashboard-19"
+  rabbitmq-server: "cs:trusty/rabbitmq-server-42"
+
+head_vm_list:
+  - name: "juju-1"
+    service: "juju"
+    aliases:
+      - "juju"
+    ipv4_last_octet: 10
+    cpu: 1
+    memMB: 2048
+    diskGB: 20
+
+  - name: "ceilometer-1"
+    service: "ceilometer"
+    aliases:
+      - "ceilometer"
+    ipv4_last_octet: 20
+    cpu: 1
+    memMB: 2048
+    diskGB: 20
+    forwarded_ports:
+      - { ext: 8777, int: 8777 }
+
+  - name: "glance-1"
+    service: "glance"
+    aliases:
+      - "glance"
+    ipv4_last_octet: 30
+    cpu: 2
+    memMB: 4096
+    diskGB: 160
+    forwarded_ports:
+      - { ext: 9292, int: 9292 }
+
+  - name: "keystone-1"
+    service: "keystone"
+    aliases:
+      - "keystone"
+    ipv4_last_octet: 40
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+    forwarded_ports:
+      - { ext: 35357, int: 35357 }
+      - { ext: 4990, int: 4990 }
+      - { ext: 5000, int: 5000 }
+
+  - name: "percona-cluster-1"
+    service: "percona-cluster"
+    aliases:
+      - "percona-cluster"
+    ipv4_last_octet: 50
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+
+  - name: "nagios-1"
+    service: "nagios"
+    aliases:
+      - "nagios"
+    ipv4_last_octet: 60
+    cpu: 1
+    memMB: 2048
+    diskGB: 20
+    forwarded_ports:
+      - { ext: 3128, int: 80 }
+
+  - name: "neutron-api-1"
+    service: "neutron-api"
+    aliases:
+      - "neutron-api"
+    ipv4_last_octet: 70
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+    forwarded_ports:
+      - { ext: 9696, int: 9696 }
+
+  - name: "nova-cloud-controller-1"
+    service: "nova-cloud-controller"
+    aliases:
+      - "nova-cloud-controller"
+    ipv4_last_octet: 80
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+    forwarded_ports:
+      - { ext: 8774, int: 8774 }
+
+  - name: "openstack-dashboard-1"
+    service: "openstack-dashboard"
+    aliases:
+      - "openstack-dashboard"
+    ipv4_last_octet: 90
+    cpu: 1
+    memMB: 2048
+    diskGB: 20
+    forwarded_ports:
+      - { ext: 8080, int: 80 }
+
+  - name: "rabbitmq-server-1"
+    service: "rabbitmq-server"
+    aliases:
+      - "rabbitmq-server"
+    ipv4_last_octet: 100
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+
+  - name: "onos-cord-1"
+    aliases:
+      - "onos-cord"
+    ipv4_last_octet: 110
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+    docker_path: "cord"
+
+  - name: "onos-fabric-1"
+    aliases:
+      - "onos-fabric"
+    ipv4_last_octet: 120
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+    docker_path: "cord"
+
+  - name: "xos-1"
+    aliases:
+      - "xos"
+    ipv4_last_octet: 130
+    cpu: 2
+    memMB: 4096
+    diskGB: 40
+    docker_path: 'service-profile/cord-pod'
+
+  - name: "nova-compute-1"
+    service: "nova-compute"
+    root_ssh_login: true
+    aliases:
+      - "nova-compute"
+    ipv4_last_octet: 140
+    cpu: 6
+    memMB: 16384
+    diskGB: 240
+
+
+vm_service_list:
+  - ceilometer
+  - glance
+  - keystone
+  - percona-cluster
+  - nagios
+  - neutron-api
+  - nova-cloud-controller
+  - openstack-dashboard
+  - rabbitmq-server
+  - nova-compute
+
+
+standalone_service_list:
+  - ntp
+  - nrpe
+  - ceilometer-agent
+
+
+service_relations:
+  - name: keystone
+    relations: [ "percona-cluster", "nrpe", ]
+
+  - name: nova-cloud-controller
+    relations: [ "percona-cluster", "rabbitmq-server", "glance", "keystone", "nrpe", ]
+
+  - name: glance
+    relations: [ "percona-cluster", "keystone", "nrpe", ]
+
+  - name: neutron-api
+    relations: [ "keystone",  "percona-cluster", "rabbitmq-server", "nova-cloud-controller", "nrpe", ]
+
+  - name: openstack-dashboard
+    relations: [ "keystone", "nrpe", ]
+
+  - name: nagios
+    relations: [ "nrpe", ]
+
+  - name: "percona-cluster:juju-info"
+    relations: [ "nrpe:general-info", ]
+
+  - name: rabbitmq-server
+    relations: [ "nrpe", ]
+
+  - name: ceilometer
+    relations: [ "mongodb", "rabbitmq-server", "nagios", "nrpe", ]
+
+  - name: "ceilometer:identity-service"
+    relations: [ "keystone:identity-service", ]
+
+  - name: "ceilometer:ceilometer-service"
+    relations: [ "ceilometer-agent:ceilometer-service", ]
+
+  - name: nova-compute
+    relations: [ "ceilometer-agent", "glance", "nova-cloud-controller", "nagios", "nrpe", ]
+
+  - name: "nova-compute:shared-db"
+    relations: [ "percona-cluster:shared-db", ]
+
+  - name: "nova-compute:amqp"
+    relations: [ "rabbitmq-server:amqp", ]
+
+  - name: ntp
+    relations: [ "nova-compute", ]
+