Support multi-node CORD install
diff --git a/roles/cloudlab-prep/defaults/main.yml b/roles/cloudlab-prep/defaults/main.yml
index 1b3f00e..99c6595 100644
--- a/roles/cloudlab-prep/defaults/main.yml
+++ b/roles/cloudlab-prep/defaults/main.yml
@@ -2,5 +2,5 @@
 # roles/cloudlab-prep/defaults/main.yml
 
 cloudlab_extrafs:
-  - /var/lib/uvtool/libvirt/images
-  #  - /var/lib/nova
+  - { mount: /var/lib/uvtool/libvirt/images, dev: /dev/sda }
+  - { mount: /var/lib/nova, dev: /dev/sdb }
diff --git a/roles/cloudlab-prep/tasks/main.yml b/roles/cloudlab-prep/tasks/main.yml
index 5dc9c2c..5bf71a3 100644
--- a/roles/cloudlab-prep/tasks/main.yml
+++ b/roles/cloudlab-prep/tasks/main.yml
@@ -3,14 +3,14 @@
 
 - name: create directories to mount extra filesystems
   file:
-    dest={{ item }}
+    dest={{ item.mount }}
     state=directory
   with_items: "{{ cloudlab_extrafs }}"
 
 
 # FIXME: does not create lost+found, -q makes it fail without error...
 - name: Set up extra disk space
-  command: /usr/testbed/bin/mkextrafs -qf {{ item }}
-    creates={{ item }}/lost+found
+  command: /usr/testbed/bin/mkextrafs -r {{ item.dev }} -qf {{ item.mount }}
+    creates={{ item.mount }}/lost+found
   with_items: "{{ cloudlab_extrafs }}"
 
diff --git a/roles/compute-prep/tasks/main.yml b/roles/compute-prep/tasks/main.yml
index a62305f..0c57979 100644
--- a/roles/compute-prep/tasks/main.yml
+++ b/roles/compute-prep/tasks/main.yml
@@ -10,11 +10,21 @@
   with_items:
     - python-yaml
 
+- name: Add ubuntu user
+  user:
+   name=ubuntu
+   groups=admin
+
 - name: Add head node ubuntu user key
   authorized_key:
     user=ubuntu
     key="{{ hostvars[groups['head'][0]]['sshkey']['stdout'] }}"
 
+- name: Add head node root user key
+  authorized_key:
+    user=root
+    key="{{ hostvars[groups['head'][0]]['sshkey']['stdout'] }}"
+
 - name: Add route via /etc/rc.local
   template:
     src=rc.local.j2
@@ -22,9 +32,9 @@
     mode=0755
   notify:
     - run rc.local
+  when: not cord_provisioned
 
 - name: Create /var/lib/nova dir
   file:
     path=/var/lib/nova
     state=directory
-
diff --git a/roles/config-virt/tasks/main.yml b/roles/config-virt/tasks/main.yml
index da7b239..f3dc91d 100644
--- a/roles/config-virt/tasks/main.yml
+++ b/roles/config-virt/tasks/main.yml
@@ -13,7 +13,7 @@
     command=facts
 
 - name: Tear down libvirt's default network
-  when: ansible_libvirt_networks["default"] is defined
+  when: not cord_provisioned and ansible_libvirt_networks["default"] is defined
   virt_net:
     command={{ item }}
     name=default
@@ -28,20 +28,22 @@
     command=define
     xml='{{ lookup("template", "virt_net.xml.j2") }}'
   with_items: '{{ virt_nets }}'
+  when: not cord_provisioned
 
 - name: collect libvirt network facts after defining new network
   virt_net:
     command=facts
+  when: not cord_provisioned
 
 - name: start libvirt networks
-  when: ansible_libvirt_networks["xos-{{ item.name }}"].state != "active"
+  when: not cord_provisioned and ansible_libvirt_networks["xos-{{ item.name }}"].state != "active"
   virt_net:
     name=xos-{{ item.name }}
     command=create
   with_items: '{{ virt_nets }}'
 
 - name: have libvirt networks autostart
-  when: ansible_libvirt_networks["xos-{{ item.name }}"].autostart != "yes"
+  when: not cord_provisioned and ansible_libvirt_networks["xos-{{ item.name }}"].autostart != "yes"
   virt_net:
     name=xos-{{ item.name }}
     autostart=yes
@@ -59,6 +61,7 @@
   notify:
     - reload libvirt-bin
     - run qemu hook
+  when: not cord_provisioned
 
 - name: Wait for uvt-kvm image to be available
   async_status: jid={{ uvt_sync.ansible_job_id }}
diff --git a/roles/create-vms/tasks/main.yml b/roles/create-vms/tasks/main.yml
index 28350ef..29b7545 100644
--- a/roles/create-vms/tasks/main.yml
+++ b/roles/create-vms/tasks/main.yml
@@ -39,15 +39,19 @@
   template:
     src=eth0.cfg.j2
     dest={{ ansible_user_dir }}/eth0.cfg
+  when: not cord_provisioned
 
 - name: Copy eth0 interface config file to all VMs
   command: ansible services -b -u ubuntu -m copy -a "src={{ ansible_user_dir }}/eth0.cfg dest=/etc/network/interfaces.d/eth0.cfg owner=root group=root mode=0644"
+  when: not cord_provisioned
 
 - name: Restart eth0 interface on all VMs
   command: ansible services -b -u ubuntu -m shell -a "ifdown eth0 ; ifup eth0"
+  when: not cord_provisioned
 
 - name: Verify that we can log into every VM after restarting network interfaces
   command: ansible services -m ping -u ubuntu
+  when: not cord_provisioned
 
 # sshkey is registered in head-prep task
 - name: Enable root ssh login on VM's that require it
diff --git a/roles/docker-compose/tasks/main.yml b/roles/docker-compose/tasks/main.yml
index 59325e4..efb310a 100644
--- a/roles/docker-compose/tasks/main.yml
+++ b/roles/docker-compose/tasks/main.yml
@@ -29,5 +29,4 @@
 
 - name: Copy admin-openrc.sh into XOS container
   command: ansible xos-1 -u ubuntu -m copy \
-    -a "src=~/admin-openrc.sh dest=~/service-profile/{{ xos_configuration }}"
-
+    -a "src=~/admin-openrc.sh dest={{ service_profile_repo_dest }}/{{ xos_configuration }}"
diff --git a/roles/juju-compute-setup/tasks/main.yml b/roles/juju-compute-setup/tasks/main.yml
new file mode 100644
index 0000000..f12ce43
--- /dev/null
+++ b/roles/juju-compute-setup/tasks/main.yml
@@ -0,0 +1,62 @@
+---
+# roles/juju-compute-setup/main/tasks.yml
+
+# Code for this is in library/juju_facts.py
+- name: Obtain Juju Facts for creating machines
+  juju_facts:
+
+# For setwise operations on desired vs Juju state:
+# list of active juju_machines names: juju_machines.keys()
+# list of active juju_services names: juju_services.keys()
+
+- name: Add machines to Juju
+  command: "juju add-machine ssh:{{ item }}"
+  with_items: "{{ groups['compute'] | difference( juju_machines.keys() ) }}"
+
+# run this again, so machines will be in the juju_machines list
+- name: Obtain Juju Facts after machine creation
+  juju_facts:
+
+- name: Deploy nova-compute service if needed
+  command: "juju deploy {{ charm_versions[item] | default(item) }} --to {{ juju_machines[groups['compute'][0]]['machine_id'] }} --config={{ juju_config_path }}"
+  with_items: 
+    - "nova-compute"
+  when: '"nova-compute" not in juju_services.keys()'
+
+- name: Create relations between nova-compute and other services if needed
+  command: "juju add-relation '{{ item.0.name }}' '{{ item.1 }}'"
+  register: juju_relation
+  failed_when: "juju_relation|failed and 'relation already exists' not in juju_relation.stderr"
+  with_subelements:
+    - "{{ compute_relations }}"
+    - relations
+
+# run another time
+- name: Obtain Juju Facts after deploying nova-compute
+  juju_facts:
+  when: '"nova-compute" not in juju_services.keys()'
+
+- name: Add more nova-compute units
+  command: "juju add-unit nova-compute --to {{ juju_machines[item]['machine_id'] }}"
+  with_items: "{{ groups['compute'] | difference( juju_compute_nodes.keys() ) }}"
+
+- name: Pause to let Juju settle
+  pause:
+    prompt="Waiting for Juju..."
+    seconds=20
+
+# 160*15s = 2400s = 40m max wait
+- name: Wait for nova-compute nodes to come online
+  juju_facts:
+  until: item in juju_compute_nodes.keys() and juju_compute_nodes[item]['workload-status']['message'] == "Unit is ready"
+  retries: 160
+  delay: 15
+  with_items: "{{ groups['compute'] }}"
+
+- name: verify that the nodes appear in nova
+  action: shell bash -c "source ~/admin-openrc.sh; nova hypervisor-list | grep '{{ item }}'"
+  register: result
+  until: result | success
+  retries: 5
+  delay: 5
+  with_items: "{{ groups['compute'] }}"
diff --git a/roles/xos-compute-setup/tasks/main.yml b/roles/xos-compute-setup/tasks/main.yml
new file mode 100644
index 0000000..b2689a7
--- /dev/null
+++ b/roles/xos-compute-setup/tasks/main.yml
@@ -0,0 +1,7 @@
+---
+# xos-compute-setup/tasks/main.yml
+#
+# Tell XOS that a new compute node has been added
+
+- name: ssh to XOS VM and run 'make new-nodes'
+  command: ssh ubuntu@xos "cd {{ service_profile_repo_dest }}/{{ xos_configuration }}/; make new-nodes"
diff --git a/roles/xos-start/tasks/main.yml b/roles/xos-start/tasks/main.yml
new file mode 100644
index 0000000..a819727
--- /dev/null
+++ b/roles/xos-start/tasks/main.yml
@@ -0,0 +1,28 @@
+---
+# xos-start/tasks/main.yml
+
+- name: Build XOS containers
+  command: ansible xos-1 -u ubuntu -m shell \
+    -a "cd {{ service_profile_repo_dest }}/{{ xos_configuration }}; make local_containers | tee xos-build.out"
+
+- name: Onboard services and start XOS
+  command: ansible xos-1 -u ubuntu -m shell \
+    -a "cd {{ service_profile_repo_dest }}/{{ xos_configuration }}; make xos | tee xos-onboard.out"
+
+- name: Pause to let XOS initialize
+  pause: seconds=120
+
+- name: Initial VTN configuration
+  command: ansible xos-1 -u ubuntu -m shell \
+    -a "cd {{ service_profile_repo_dest }}/{{ xos_configuration }}; make vtn"
+
+- name: Initial fabric configuration
+  command: ansible xos-1 -u ubuntu -m shell \
+    -a "cd {{ service_profile_repo_dest }}/{{ xos_configuration }}; make fabric"
+
+- name: Pause to let ONOS initialize
+  pause: seconds=20
+
+- name: Configure CORD services
+  command: ansible xos-1 -u ubuntu -m shell \
+    -a "cd {{ service_profile_repo_dest }}/{{ xos_configuration }}; make cord"
diff --git a/roles/xos-vm-install/files/xos-setup-cord-pod-playbook.yml b/roles/xos-vm-install/files/xos-setup-cord-pod-playbook.yml
index 0481444..364882e 100644
--- a/roles/xos-vm-install/files/xos-setup-cord-pod-playbook.yml
+++ b/roles/xos-vm-install/files/xos-setup-cord-pod-playbook.yml
@@ -70,4 +70,3 @@
         chdir="{{ service_profile_repo_dest }}/containers/xos/"
       with_items:
        - base
-