apt dist-upgrade reboot enabled, lint fixes
second round, for testing
lint clean, testing needed
prereqs assert w/dig doesn't loop properly
use head not all for target hosts in single

Change-Id: Ie530204b989a73828f45508fcdd4374a3362c764
diff --git a/ansible/roles/java8-oracle/tasks/main.yml b/ansible/roles/java8-oracle/tasks/main.yml
index 809fbee..32fe37b 100644
--- a/ansible/roles/java8-oracle/tasks/main.yml
+++ b/ansible/roles/java8-oracle/tasks/main.yml
@@ -1,19 +1,19 @@
 ---
 - name: Install add-apt-repository
-  sudo: yes
-  apt: name=software-properties-common state=latest
+  become: yes
+  apt: name=software-properties-common state=installed
 
 - name: Add Oracle Java repository
-  sudo: yes
+  become: yes
   apt_repository: repo='ppa:webupd8team/java'
 
 - name: Accept Java 8 license
-  sudo: yes
+  become: yes
   debconf: name='oracle-java8-installer' question='shared/accepted-oracle-license-v1-1' value='true' vtype='select'
 
 - name: Install Oracle Java 8
-  sudo: yes
-  apt: name={{item}} state=latest
+  become: yes
+  apt: name={{item}} state=installed
   with_items:
   - oracle-java8-installer
   - ca-certificates
diff --git a/cloudlab-openstack-playbook.yml b/cloudlab-openstack-playbook.yml
index e8299f2..c529b8a 100644
--- a/cloudlab-openstack-playbook.yml
+++ b/cloudlab-openstack-playbook.yml
@@ -4,9 +4,9 @@
 - hosts: cloudlab
 
   vars:
-    xos_repo_url: "https://github.com/open-cloud/xos"
+    xos_repo_url: "https://github.com/opencord/xos"
     xos_repo_dest: "~/xos"
-    xos_repo_branch: "feature/lts"
+    xos_repo_branch: "master"
     xos_configuration: "devel"
     xos_container_rebuild: true
 
diff --git a/cord-compute-playbook.yml b/cord-compute-playbook.yml
index 6461e1f..df42336 100644
--- a/cord-compute-playbook.yml
+++ b/cord-compute-playbook.yml
@@ -4,9 +4,12 @@
 - name: Include vars
   hosts: all
   tasks:
-  - include_vars: vars/cord_defaults.yml
-  - include_vars: vars/cord.yml
-  - include_vars: vars/example_keystone.yml
+    - name: Include variables
+      include_vars: "{{ item }}"
+      with_items:
+        - vars/cord_defaults.yml
+        - vars/cord.yml
+        - vars/example_keystone.yml
 
 - name: Configure compute hosts to use DNS server
   hosts: all
diff --git a/cord-diag-playbook.yml b/cord-diag-playbook.yml
index ea06cda..dd6721a 100644
--- a/cord-diag-playbook.yml
+++ b/cord-diag-playbook.yml
@@ -4,7 +4,8 @@
 - name: Create diag_dir fact
   hosts: head
   tasks:
-    - set_fact:
+    - name: Set diag_dir name to diag-rfc3339_datetime
+      set_fact:
         diag_dir: "diag-{{ ansible_date_time.iso8601_basic_short }}"
 
 - name: Diagnostics on head node
@@ -20,7 +21,8 @@
 - name: Collect compute node diagnostics on head node
   hosts: compute
   tasks:
-    - synchronize:
+    - name: rsync diag_dir from compute nodes
+      synchronize:
         src: "/tmp/{{ hostvars[groups['head'][0]]['diag_dir'] }}/{{ inventory_hostname }}"
         dest: "~/{{ hostvars[groups['head'][0]]['diag_dir'] }}/"
         recursive: yes
diff --git a/cord-head-playbook.yml b/cord-head-playbook.yml
index 4774b88..ce55372 100644
--- a/cord-head-playbook.yml
+++ b/cord-head-playbook.yml
@@ -7,9 +7,12 @@
 - name: Include vars
   hosts: all
   tasks:
-  - include_vars: vars/cord_defaults.yml
-  - include_vars: vars/cord.yml
-  - include_vars: vars/example_keystone.yml  # for testing
+    - name: Include variables
+      include_vars: "{{ item }}"
+      with_items:
+        - vars/cord_defaults.yml
+        - vars/cord.yml
+        - vars/example_keystone.yml
 
 - name: DNS Server and apt-cacher-ng Setup
   hosts: head
diff --git a/cord-post-deploy-playbook.yml b/cord-post-deploy-playbook.yml
index f1261eb..c0d6a69 100644
--- a/cord-post-deploy-playbook.yml
+++ b/cord-post-deploy-playbook.yml
@@ -2,11 +2,14 @@
 # Tests single node cord-pod XOS configuration
 
 - name: Include vars
-  hosts: head
+  hosts: all
   tasks:
-  - include_vars: vars/cord_single_defaults.yml
-  - include_vars: vars/cord.yml
-  - include_vars: vars/example_keystone.yml
+    - name: Include variables
+      include_vars: "{{ item }}"
+      with_items:
+        - vars/cord_defaults.yml
+        - vars/cord.yml
+        - vars/example_keystone.yml
 
 - name: Run post-deploy tests
   hosts: head
diff --git a/cord-single-playbook.yml b/cord-single-playbook.yml
index 92807f2..0f5e491 100644
--- a/cord-single-playbook.yml
+++ b/cord-single-playbook.yml
@@ -7,9 +7,12 @@
 - name: Include vars
   hosts: head
   tasks:
-  - include_vars: vars/cord_single_defaults.yml
-  - include_vars: vars/cord.yml
-  - include_vars: vars/example_keystone.yml
+    - name: Include variables
+      include_vars: "{{ item }}"
+      with_items:
+        - vars/cord_single_defaults.yml
+        - vars/cord.yml
+        - vars/example_keystone.yml
 
 - name: Check Prerequisites
   hosts: head
diff --git a/files/ansible.cfg b/files/ansible.cfg
deleted file mode 100644
index dd43d2b..0000000
--- a/files/ansible.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-[defaults]
-host_key_checking = false
diff --git a/files/etc/apt/sources.list.d/linux.dell.com.sources.list b/files/etc/apt/sources.list.d/linux.dell.com.sources.list
deleted file mode 100644
index 25aba6a..0000000
--- a/files/etc/apt/sources.list.d/linux.dell.com.sources.list
+++ /dev/null
@@ -1 +0,0 @@
-deb http://linux.dell.com/repo/community/ubuntu trusty openmanage
diff --git a/files/etc/dnsmasq.d/cord b/files/etc/dnsmasq.d/cord
deleted file mode 100644
index 0925aae..0000000
--- a/files/etc/dnsmasq.d/cord
+++ /dev/null
@@ -1,12 +0,0 @@
-# Range of IP addresses to hand out on management net
-dhcp-range=10.90.0.10,10.90.0.253
-
-# Name of bridge on the management network.
-# Don't change this.  It needs to be mgmtbr or else the install will break.
-interface=mgmtbr
-
-# Put the leasefile here so that 'uvt-kvm ip <vm>' will work.
-dhcp-leasefile=/var/lib/libvirt/dnsmasq/default.leases
-
-# Default route for management network.
-dhcp-option=option:router,10.90.0.1
diff --git a/files/etc/libvirt/hooks/daemon b/files/etc/libvirt/hooks/daemon
deleted file mode 100644
index 8d9102b..0000000
--- a/files/etc/libvirt/hooks/daemon
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/sh
-
-SHELL="/bin/bash"
-
-NIC=$( route|grep default|awk '{print $NF}' )
-
-NAME="${1}"
-OP="${2}"
-SUBOP="${3}"
-ARGS="${4}"
-
-add_port_fwd_rule() {
-    DPORT=$1
-    VM=$2
-    TOPORT=$3
-
-    VMIP=$( getent ahosts $VM|head -1|awk '{print $1}' )
-    iptables -t nat -C PREROUTING -p tcp -i $NIC --dport $DPORT -j DNAT --to-destination $VMIP:$TOPORT
-    if [ "$?" -ne 0 ]
-    then
-        iptables -t nat -A PREROUTING -p tcp -i $NIC --dport $DPORT -j DNAT --to-destination $VMIP:$TOPORT
-    fi
-}
-
-if [ "$OP" = "start" ] || [ "$OP" = "reload" ]
-then
-    iptables -t nat -F
-    add_port_fwd_rule 35357 keystone 35357
-    add_port_fwd_rule 4990 keystone 4990
-    add_port_fwd_rule 5000 keystone 5000
-    add_port_fwd_rule 8774 nova-cloud-controller 8774
-    add_port_fwd_rule 9696 neutron-api 9696
-    add_port_fwd_rule 9292 glance 9292
-    add_port_fwd_rule 8080 openstack-dashboard 80
-    add_port_fwd_rule 3128 nagios 80
-    add_port_fwd_rule 8777 ceilometer 8777
-
-    # Also flush the filter table before rules re-added
-    iptables -F
-fi
diff --git a/files/etc/libvirt/hooks/qemu b/files/etc/libvirt/hooks/qemu
deleted file mode 100644
index 1c947f9..0000000
--- a/files/etc/libvirt/hooks/qemu
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/sh
-
-SHELL="/bin/bash"
-
-NIC=$( route|grep default|awk '{print $NF}' )
-PORTAL=$( dig +short portal.opencloud.us | tail -1 )
-
-NAME="${1}"
-OP="${2}"
-SUBOP="${3}"
-ARGS="${4}"
-
-add_rule() {
-    CHAIN=$1
-    ARGS=$2
-    iptables -C $CHAIN $ARGS
-    if [ "$?" -ne 0 ]
-    then
-        iptables -I $CHAIN 1 $ARGS
-    fi
-}
-
-add_local_access_rules() {
-    SUBNET=$( ip addr show $NIC|grep "inet "|awk '{print $2}' )
-    PRIVATENET=$( ip addr show virbr0|grep "inet "|awk '{print $2}' )
-    add_rule "FORWARD" "-s $SUBNET -j ACCEPT"
-    # Don't NAT traffic from service VMs destined to the local subnet
-    add_rule "POSTROUTING" "-t nat -s $PRIVATENET -d $SUBNET -j RETURN"
-}
-
-add_portal_access_rules() {
-    add_rule "FORWARD" "-s $PORTAL -j ACCEPT"
-}
-
-add_web_access_rules() {
-    add_rule "FORWARD" "-p tcp --dport 80 -j ACCEPT"
-}
-
-if [ "$OP" = "start" ]
-then
-	add_local_access_rules
-	add_portal_access_rules
-	add_web_access_rules
-fi
diff --git a/files/onos/docker-compose.yml b/files/onos/docker-compose.yml
deleted file mode 100644
index 9b16c4d..0000000
--- a/files/onos/docker-compose.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-onos:
-    image: onosproject/onos
-    expose:
-    - "6653"
-    - "8101"
-    - "8181"
-    - "9876"
-    net: host
-    volumes:
-    - ./id_rsa:/root/node_key:ro
diff --git a/files/openstack.cfg b/files/openstack.cfg
deleted file mode 100644
index e9a1866..0000000
--- a/files/openstack.cfg
+++ /dev/null
@@ -1,37 +0,0 @@
-glance:
-   openstack-origin: "cloud:trusty-kilo"
-keystone:
-   admin-password: ""
-   https-service-endpoints: "True"
-   openstack-origin: "cloud:trusty-kilo"
-   use-https: "yes"
-nova-cloud-controller:
-   console-access-protocol: "novnc"
-   network-manager: "Neutron"
-   openstack-origin: "cloud:trusty-kilo"
-nova-compute:
-#   config-flags: "firewall_driver=nova.virt.firewall.NoopFirewallDriver"
-   config-flags: "firewall_driver=nova.virt.firewall.NoopFirewallDriver,xos_api_url=http://portal.opencloud.us"
-   disable-neutron-security-groups: "True"
-   openstack-origin: "cloud:trusty-kilo"
-ntp:
-   source: "0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org"
-openstack-dashboard:
-   openstack-origin: "cloud:trusty-kilo"
-neutron-gateway:
-   bridge-mappings: "physnet1:br-data nat:br-nat"
-   flat-network-providers: "*"
-   instance-mtu: "1400"
-   openstack-origin: "cloud:trusty-kilo"
-   vlan-ranges: "physnet1:1000:2000 nat"
-neutron-api:
-   flat-network-providers: "*"
-   openstack-origin: "cloud:trusty-kilo"
-   vlan-ranges: "physnet1:1000:2000 nat"
-neutron-openvswitch:
-   bridge-mappings: "physnet1:br-data nat:br-nat"
-   disable-security-groups: "True"
-   flat-network-providers: "*"
-   vlan-ranges: "physnet1:1000:2000 nat"
-rabbitmq-server:
-  ssl: "on"
diff --git a/files/tmp/set-up-onos.yml b/files/tmp/set-up-onos.yml
deleted file mode 100644
index 53a84f1..0000000
--- a/files/tmp/set-up-onos.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- hosts: onos-cord
-  sudo: no
-  vars:
-    homedir: /tmp
-  remote_user: ubuntu
-  tasks:
-  - shell: "which docker > /dev/null || wget -qO- https://get.docker.com/ | sh"
-
-  - shell: usermod -aG docker ubuntu
-    sudo: yes
-
-  - shell: "curl -L https://github.com/docker/compose/releases/download/1.5.2/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose; chmod +x /usr/local/bin/docker-compose"
-    sudo: yes
-
-  - file: path={{ ansible_env['PWD'] }}/cord
-      state=directory
-
-  - copy: src={{ homedir }}/{{ item }}
-      dest={{ ansible_env['PWD'] }}/cord
-    with_items:
-      - id_rsa
-      - docker-compose.yml
-
-  - file: path={{ ansible_env['PWD'] }}/cord/id_rsa
-      mode=0600
diff --git a/files/tmp/set-up-xos.yml b/files/tmp/set-up-xos.yml
deleted file mode 100644
index e729d8b..0000000
--- a/files/tmp/set-up-xos.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- hosts: xos
-  sudo: no
-  remote_user: ubuntu
-  vars:
-    homedir: /tmp
-  tasks:
-  - apt: update_cache=yes
-    sudo: yes
-
-  - apt: name={{ item }}
-      state=present
-    sudo: yes
-    with_items:
-    - git
-    - make
-    - python-novaclient
-    - python-neutronclient
-    - python-keystoneclient
-    - python-glanceclient
-
-  - git: repo=https://github.com/open-cloud/xos.git
-      dest={{ ansible_env['PWD'] }}/xos
-
-  - shell: make -f {{ ansible_env['PWD'] }}/xos/xos/configurations/common/Makefile.prereqs
-
-  - copy: src={{ homedir }}/{{ item }}
-      dest={{ ansible_env['PWD'] }}/xos/xos/configurations/cord-pod/
-    with_items:
-    - id_rsa
-    - id_rsa.pub
-
-  - file: path={{ ansible_env['PWD'] }}/xos/xos/configurations/cord-pod/id_rsa
-      mode=0600
-
-  - copy: src={{ homedir }}/admin-openrc.sh
-      dest={{ ansible_env['PWD'] }}/xos/xos/configurations/cord-pod
-
-  - copy: src={{ homedir }}/id_rsa
-      dest={{ ansible_env['PWD'] }}/xos/xos/configurations/cord-pod/node_key
-
-  - file: path={{ ansible_env['PWD'] }}/xos/xos/configurations/cord-pod/images
-      state=directory
-
-  - get_url: url=http://128.112.139.30/opencloud/trusty-server-cloudimg-amd64-disk1.img
-      dest={{ ansible_env['PWD'] }}/xos/xos/configurations/cord-pod/images/trusty-server-multi-nic.img
diff --git a/files/usr/lib/python2.7/dist-packages/uvtool/libvirt/__init__.py b/files/usr/lib/python2.7/dist-packages/uvtool/libvirt/__init__.py
deleted file mode 100644
index 3c72a8d..0000000
--- a/files/usr/lib/python2.7/dist-packages/uvtool/libvirt/__init__.py
+++ /dev/null
@@ -1,246 +0,0 @@
-# Copyright (C) 2013 Canonical Ltd.
-# Author: Robie Basak <robie.basak@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import
-from __future__ import print_function
-from __future__ import unicode_literals
-
-import codecs
-import contextlib
-import itertools
-import os
-import shutil
-import subprocess
-import tempfile
-
-import libvirt
-from lxml import etree
-from lxml.builder import E
-
-LIBVIRT_DNSMASQ_LEASE_FILE = '/var/lib/libvirt/dnsmasq/default.leases'
-
-
-def get_libvirt_pool_object(libvirt_conn, pool_name):
-    try:
-        pool = libvirt_conn.storagePoolLookupByName(pool_name)
-    except libvirt.libvirtError:
-        raise RuntimeError("Cannot find pool %s." % repr(pool_name))
-    return pool
-
-
-def create_volume_from_fobj(new_volume_name, fobj, image_type='raw',
-        pool_name='default'):
-    """Create a new libvirt volume and populate it from a file-like object."""
-
-    compressed_fobj = tempfile.NamedTemporaryFile()
-    decompressed_fobj = tempfile.NamedTemporaryFile()
-    with contextlib.closing(compressed_fobj):
-        with contextlib.closing(decompressed_fobj):
-            shutil.copyfileobj(fobj, compressed_fobj)
-            compressed_fobj.flush()
-            compressed_fobj.seek(0)  # is this necessary?
-            subprocess.check_call(
-                [
-                    'qemu-img', 'convert', '-f', image_type, '-O', image_type,
-                    compressed_fobj.name, decompressed_fobj.name
-                ],
-                shell=False, close_fds=False)
-            decompressed_fobj.seek(0)  # is this necessary?
-            return _create_volume_from_fobj_with_size(
-                new_volume_name=new_volume_name,
-                fobj=decompressed_fobj,
-                fobj_size=os.fstat(decompressed_fobj.fileno()).st_size,
-                image_type=image_type,
-                pool_name=pool_name
-            )
-
-
-def _create_volume_from_fobj_with_size(new_volume_name, fobj, fobj_size,
-        image_type, pool_name):
-    conn = libvirt.open('qemu:///system')
-    pool = get_libvirt_pool_object(conn, pool_name)
-
-    if image_type == 'raw':
-        extra = [E.allocation(str(fobj_size)), E.capacity(str(fobj_size))]
-    elif image_type == 'qcow2':
-        extra = [E.capacity('0')]
-    else:
-        raise NotImplementedError("Unknown image type %r." % image_type)
-
-    new_vol = E.volume(
-        E.name(new_volume_name),
-        E.target(E.format(type=image_type)),
-        *extra
-        )
-    vol = pool.createXML(etree.tostring(new_vol), 0)
-
-    try:
-        stream = conn.newStream(0)
-        vol.upload(stream, 0, fobj_size, 0)
-
-        def handler(stream_ignored, size, opaque_ignored):
-            return fobj.read(size)
-
-        try:
-            stream.sendAll(handler, None)
-        except Exception as e:
-            try:
-                # This unexpectedly raises an exception even on a normal call,
-                # so ignore it.
-                stream.abort()
-            except:
-                pass
-            raise e
-        stream.finish()
-    except:
-        vol.delete(flags=0)
-        raise
-
-    return vol
-
-
-def volume_names_in_pool(pool_name='default'):
-    conn = libvirt.open('qemu:///system')
-    pool = get_libvirt_pool_object(conn, pool_name)
-    return pool.listVolumes()
-
-
-def delete_volume_by_name(volume_name, pool_name='default'):
-    conn = libvirt.open('qemu:///system')
-    pool = get_libvirt_pool_object(conn, pool_name)
-    volume = pool.storageVolLookupByName(volume_name)
-    volume.delete(flags=0)
-
-
-def have_volume_by_name(volume_name, pool_name='default'):
-    conn = libvirt.open('qemu:///system')
-    pool = get_libvirt_pool_object(conn, pool_name)
-    try:
-        volume = pool.storageVolLookupByName(volume_name)
-    except libvirt.libvirtError:
-        return False
-    else:
-        return True
-
-
-def _get_all_domains(conn=None):
-    if conn is None:
-        conn = libvirt.open('qemu:///system')
-
-    # libvirt in Precise doesn't seem to have a binding for
-    # virConnectListAllDomains, and it seems that we must enumerate
-    # defined-by-not-running and running instances separately and in different
-    # ways.
-
-    for domain_id in conn.listDomainsID():
-        yield conn.lookupByID(domain_id)
-
-    for domain_name in conn.listDefinedDomains():
-        yield conn.lookupByName(domain_name)
-
-
-def _domain_element_to_volume_paths(element):
-    assert element.tag == 'domain'
-    return (
-        source.get('file')
-        for source in element.xpath(
-            "/domain/devices/disk[@type='file']/source[@file]"
-        )
-    )
-
-
-def _domain_volume_paths(domain):
-    volume_paths = set()
-
-    for flags in [0, libvirt.VIR_DOMAIN_XML_INACTIVE]:
-        element = etree.fromstring(domain.XMLDesc(flags))
-        volume_paths.update(_domain_element_to_volume_paths(element))
-
-    return frozenset(volume_paths)
-
-
-def _volume_element_to_volume_paths(element):
-    assert element.tag == 'volume'
-    return itertools.chain(
-        (path.text for path in element.xpath('/volume/target/path')),
-        (path.text for path in element.xpath('/volume/backingStore/path')),
-    )
-
-
-def _volume_volume_paths(volume):
-    # Volumes can depend on other volumes ("backing stores"), so return all
-    # paths a volume needs to function, including the top level one.
-    volume_paths = set()
-
-    element = etree.fromstring(volume.XMLDesc(0))
-    volume_paths.update(_volume_element_to_volume_paths(element))
-
-    return frozenset(volume_paths)
-
-
-def _get_all_domain_volume_paths(conn=None):
-    if conn is None:
-        conn = libvirt.open('qemu:///system')
-
-    all_volume_paths = set()
-    for domain in _get_all_domains(conn):
-        for path in _domain_volume_paths(domain):
-            try:
-                volume = conn.storageVolLookupByKey(path)
-            except libvirt.libvirtError:
-                # ignore a lookup failure, since if a volume doesn't exist,
-                # it isn't reasonable to consider what backing volumes it may
-                # have
-                continue
-            all_volume_paths.update(_volume_volume_paths(volume))
-
-    return frozenset(all_volume_paths)
-
-
-def get_all_domain_volume_names(conn=None, filter_by_dir=None):
-    # Limitation: filter_by_dir must currently end in a '/' and be the
-    # canonical path as libvirt returns it. Ideally I'd filter by pool instead,
-    # but the libvirt API appears to not provide any method to find what pool a
-    # volume is in when looked up by key.
-    if conn is None:
-        conn = libvirt.open('qemu:///system')
-
-    for path in _get_all_domain_volume_paths(conn=conn):
-        volume = conn.storageVolLookupByKey(path)
-        if filter_by_dir and not volume.path().startswith(filter_by_dir):
-            continue
-        yield volume.name()
-
-
-def get_domain_macs(domain_name, conn=None):
-    if conn is None:
-        conn = libvirt.open('qemu:///system')
-
-    domain = conn.lookupByName(domain_name)
-    xml = etree.fromstring(domain.XMLDesc(0))
-    for mac in xml.xpath(
-            "/domain/devices/interface[@type='network' or @type='bridge']/mac[@address]"):
-        yield mac.get('address')
-
-
-def mac_to_ip(mac):
-    canonical_mac = mac.lower()
-    with codecs.open(LIBVIRT_DNSMASQ_LEASE_FILE, 'r') as f:
-        for line in f:
-            fields = line.split()
-            if len(fields) > 1 and fields[1].lower() == canonical_mac:
-                return fields[2]
-        return None
diff --git a/inventory/cloudlab-openstack b/inventory/cloudlab-openstack
index 12da333..90b23f6 100644
--- a/inventory/cloudlab-openstack
+++ b/inventory/cloudlab-openstack
@@ -1,5 +1,5 @@
 # inventory file for Clouldlab's OpenStack Profile
 
-[clouldlab]
+[cloudlab]
 ctl ansible_ssh_host=hostname_goes_here.cloudlab.us ansible_ssh_user=cloudlab_username
 
diff --git a/legacy/arizona-compute.yml b/legacy/arizona-compute.yml
deleted file mode 100644
index c342483..0000000
--- a/legacy/arizona-compute.yml
+++ /dev/null
@@ -1,72 +0,0 @@
----
-- hosts: head
-  sudo: no
-  user: ubuntu
-  tasks:
-  - name: Remind user what is going to happen
-    pause: prompt="Install nova-compute on all unused machines managed by Juju"
-
-  - name: Deploy nova-compute
-    script: scripts/juju-compute-setup.py
-
-  - name: Try to avoid race condition
-    pause: seconds=5
-
-  - name: Wait until nova-compute is deployed
-    script: scripts/wait-for-services.sh
-
-  - name: Make sure we're using KVM
-    shell: juju set nova-compute virt-type=kvm
-
-  - name: Add nova-compute relations
-    script: scripts/juju-compute-relations.py
-
-  - name: Try to avoid race condition
-    pause: seconds=5
-
-  - name: Wait until relations are added
-    script: scripts/wait-for-services.sh
-
-# Play: set up ansible-pull for OpenCloud-specific files on nova-compute nodes
-- hosts: compute
-  sudo: yes
-  user: ubuntu
-  vars:
-
-    # schedule is fed directly to cron
-    schedule: '*/15 * * * *'
-
-    # User to run ansible-pull as from cron
-    cron_user: root
-
-    # File that ansible will use for logs
-    logfile: /var/log/ansible-pull.log
-
-    # Directory to where repository will be cloned
-    workdir: /var/lib/ansible/local
-
-    # Repository to check out
-    # repo must contain a local.yml file at top level
-    #repo_url: git://github.com/sfromm/ansible-playbooks.git
-    repo_url: git://github.com/andybavier/opencloud-nova-compute-ansible.git
-
-    # Branch or tag to checkout
-    repo_version: kilo
-
-  tasks:
-
-  - name: Install ansible
-    apt: name=ansible state=installed
-
-  - name: Basic ansible inventory
-    template: src=templates/etc/ansible/hosts.j2
-      dest=/etc/ansible/hosts
-
-  - name: Create local directory to work from
-    file: path={{workdir}} state=directory owner=root group=root mode=0751
-
-  - name: Create crontab entry to clone/pull git repository
-    template: src=templates/etc/cron.d/ansible-pull.j2 dest=/etc/cron.d/ansible-pull owner=root group=root mode=0644
-
-  - name: Create logrotate entry for ansible-pull.log
-    template: src=templates/etc/logrotate.d/ansible-pull.j2 dest=/etc/logrotate.d/ansible-pull owner=root group=root mode=0644
diff --git a/legacy/arizona-hosts b/legacy/arizona-hosts
deleted file mode 100644
index 69efd6b..0000000
--- a/legacy/arizona-hosts
+++ /dev/null
@@ -1,10 +0,0 @@
-head    ansible_ssh_host=node1.cs.arizona.edu
-
-[compute]
-node2.cs.arizona.edu
-node3.cs.arizona.edu
-node4.cs.arizona.edu
-node5.cs.arizona.edu
-
-[all:vars]
-cloudlab=false
diff --git a/legacy/arizona-setup.yml b/legacy/arizona-setup.yml
deleted file mode 100644
index 63f485f..0000000
--- a/legacy/arizona-setup.yml
+++ /dev/null
@@ -1,280 +0,0 @@
----
-# Play: set up head node
-# Assumes basic /etc/ansible/hosts file
-- hosts: head
-  user: ubuntu
-  sudo: yes
-  vars:
-    # Each OpenCloud cluster needs a unique mgmt_net_prefix
-    mgmt_net_prefix: 192.168.102
-  tasks:
-
-  - apt: name={{ item }} update_cache=yes
-    with_items:
-    - python-pycurl
-    - software-properties-common
-
-  - name: Add Juju repository
-    apt_repository: repo="ppa:juju/stable"
-
-  - name: Add Ansible repository
-    apt_repository: repo="ppa:ansible/ansible"
-
-  - name: Update Ansible cache
-    apt: update_cache=yes
-
-  - name: Install packages
-    apt: name={{ item }} state=latest
-    with_items:
-    - ansible
-    - uvtool
-    - git
-    - bzr
-    - juju-core
-    - python-novaclient
-    - python-neutronclient
-    - python-keystoneclient
-    - python-glanceclient
-
-  - name: Get juju-ansible git repo
-    git: repo=https://github.com/cmars/juju-ansible.git
-      dest=/usr/local/src/juju-ansible
-
-  - name: Set up juju-ansible symlink
-    file: dest=/usr/local/bin/juju-ansible
-      src=/usr/local/src/juju-ansible/juju-ansible
-      state=link
-
-  - name: Set up juju-ansible-playbook symlink
-    file: dest=/usr/local/bin/juju-ansible-playbook
-      src=/usr/local/src/juju-ansible/juju-ansible
-      state=link
-
-  - name: Generate key to use in VMs
-    user: name={{ ansible_env['SUDO_USER'] }} generate_ssh_key=yes
-
-  - name: (CloudLab) Set up extra disk space
-    shell: /usr/testbed/bin/mkextrafs /var/lib/uvtool/libvirt/images
-      creates=/var/lib/uvtool/libvirt/images/lost+found
-    when: cloudlab
-
-  - name: Add myself to libvirtd group
-    user: name={{ ansible_env['SUDO_USER'] }}
-      groups=libvirtd
-      append=yes
-
-  - name: Get trusty image for uvtool
-    shell: uvt-simplestreams-libvirt sync --source http://cloud-images.ubuntu.com/daily release=trusty arch=amd64
-
-  - name: Change the virbr0 subnet to {{ mgmt_net_prefix }}.0/24
-    template: src=templates/etc/libvirt/qemu/networks/default.xml.j2
-      dest=/etc/libvirt/qemu/networks/default.xml
-    notify:
-    - recreate default net
-
-  handlers:
-  - name: recreate default net
-    script: scripts/recreate-virbr0.sh
-
-# Play: create VMs to host OpenStack services
-- hosts: head
-  user: ubuntu
-  sudo: yes
-  tasks:
-  - name: Create VMs to host OpenCloud services
-    sudo: no
-    script: scripts/create-vms.sh
-
-  - include: tasks/vm-ips.yml
-
-  - name: Add VMs to /etc/hosts
-    template: src=templates/etc/hosts.j2
-      dest=/etc/hosts
-    notify:
-    - Reload dnsmasq
-
-  - name: Set up /etc/ansible/hosts
-    template: src=templates/etc/ansible/hosts.j2
-      dest=/etc/ansible/hosts
-
-  - name: Copy ansible.cfg to disable host key checking
-    sudo: no
-    copy: src=files/ansible.cfg
-      dest={{ ansible_env['PWD'] }}/.ansible.cfg
-
-  - name: Touch ~/.ssh/config
-    sudo: no
-    file: path={{ ansible_env['PWD'] }}/.ssh/config state=touch
-
-  - name: Disable host key checking in SSH
-    sudo: no
-    lineinfile: dest={{ ansible_env['PWD'] }}/.ssh/config
-      line="StrictHostKeyChecking no"
-
-  - name: Test that we can log into every VM
-    sudo: no
-    shell: ansible services -m ping -u ubuntu
-
-  handlers:
-  - name: Reload dnsmasq
-    shell: killall -HUP dnsmasq
-
-# Play: Install services using Juju
-- hosts: head
-  user: ubuntu
-  vars:
-    charm_src: /usr/local/src/charms/trusty
-  tasks:
-  - name: Initialize Juju
-    sudo: no
-    shell: juju generate-config
-      creates={{ ansible_env['PWD'] }}/.juju/environments.yaml
-
-  - shell: uvt-kvm ip juju
-    register: juju_ip
-
-  - name: Juju config file
-    sudo: no
-    template: src=templates/environments.yaml.j2
-      dest={{ ansible_env['PWD'] }}/.juju/environments.yaml
-
-  - name: Bootstrap Juju
-    sudo: no
-    shell: juju bootstrap
-      creates={{ ansible_env['PWD'] }}/.juju/environments/manual.jenv
-
-  - name: Copy openstack.cfg for Juju
-    sudo: yes
-    copy: src=files/openstack.cfg
-      dest=/usr/local/src/openstack.cfg
-
-  - name: Check for /usr/local/src/charms/trusty
-    sudo: yes
-    file: path={{ charm_src }}
-       state=directory
-       mode=0755
-
-  - name: Deploy OpenStack services with Juju
-    script: scripts/juju-setup.py
-
-  - name: Add all Juju relations between services
-    script: scripts/juju-relations.py
-
-  - name: Wait for relations to be fully added
-    script: scripts/wait-for-services.sh
-
-# Play: Use libvirt hooks to set up iptables
-- hosts: head
-  user: ubuntu
-  sudo: yes
-  tasks:
-  - name: Enable port forwarding for services
-    copy: src=files/{{ item }}
-      dest={{ item }}
-      mode=0755
-    notify:
-    - reload libvirt config
-    - run qemu hook
-    with_items:
-    - /etc/libvirt/hooks/daemon
-    - /etc/libvirt/hooks/qemu
-
-  handlers:
-  - name: reload libvirt config
-    shell: killall -HUP libvirtd
-
-  - name: run qemu hook
-    shell: /etc/libvirt/hooks/qemu start start
-
-# Play: Create credentials, set up some basic OpenStack
-- hosts: head
-  user: ubuntu
-  sudo: no
-  tasks:
-
-  - name: Get keystone admin password
-    shell: juju run --unit=keystone/0 "sudo cat /var/lib/keystone/keystone.passwd"
-    register: keystone_password
-
-  - shell: uvt-kvm ip keystone
-    register: keystone_ip
-
-  - name: Create credentials
-    template: src=templates/admin-openrc.sh.j2
-     dest={{ ansible_env['PWD'] }}/admin-openrc.sh
-
-  - name: (CloudLab) Make sure that /root/setup exists
-    file: path=/root/setup state=directory
-    sudo: yes
-    when: cloudlab
-
-  - name: (CloudLab) Copy credentials to /root/setup
-    shell: scp admin-openrc.sh /root/setup
-    sudo: yes
-    when: cloudlab
-
-  - name: Copy credentials to nova-cloud-controller
-    shell: "scp admin-openrc.sh ubuntu@nova-cloud-controller:"
-
-  - name: Copy network setup script
-    sudo: yes
-    copy: src=scripts/network-setup.sh
-      dest=/usr/local/src/network-setup.sh
-      mode=0644
-
-  - name: Run network setup script
-    shell: ansible nova-cloud-controller -m script -u ubuntu -a "/usr/local/src/network-setup.sh"
-
-  - name: Get public key
-    shell: cat {{ ansible_env['PWD'] }}/.ssh/id_rsa.pub
-    register: sshkey
-
-  - name: Copy CA certificate
-    shell: sudo juju scp nova-cloud-controller/0:/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt /usr/local/share/ca-certificates
-      creates=/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt
-
-  - name: Update CA ca-certificates
-    shell: update-ca-certificates
-    sudo: yes
-
-- hosts: compute
-  user: ubuntu
-  sudo: yes
-  vars:
-    control_net: "{{ hostvars['head']['ansible_virbr0']['ipv4']['network'] }}/24"
-    gateway: "{{ hostvars['head']['ansible_default_ipv4']['address'] }}"
-  tasks:
-  - name: Install package needed by Juju
-    apt: name=python-yaml state=present
-
-  - name: Add key
-    authorized_key: user="{{ ansible_env['SUDO_USER'] }}"
-      key="{{ hostvars['head']['sshkey']['stdout'] }}"
-
-  - name: Add route via /etc/rc.local
-    template: src=templates/etc/rc.local.cloudlab
-      dest=/etc/rc.local
-      mode=0755
-    when: cloudlab
-    notify:
-    - run /etc/rc.local
-
-  - name: Add route via /etc/rc.local
-    template: src=templates/etc/rc.local
-      dest=/etc/rc.local
-      mode=0755
-    when: not cloudlab
-    notify:
-    - run /etc/rc.local
-
-  - name: Touch ~/.ssh/config
-    file: path=/var/lib/nova state=directory
-
-  - name: (CloudLab) Set up extra disk space
-    shell: /usr/testbed/bin/mkextrafs /var/lib/nova
-      creates=/var/lib/nova/lost+found
-    when: cloudlab
-
-  handlers:
-  - name: run /etc/rc.local
-    shell: /etc/rc.local
diff --git a/legacy/cloudlab-compute.yml b/legacy/cloudlab-compute.yml
deleted file mode 100644
index c342483..0000000
--- a/legacy/cloudlab-compute.yml
+++ /dev/null
@@ -1,72 +0,0 @@
----
-- hosts: head
-  sudo: no
-  user: ubuntu
-  tasks:
-  - name: Remind user what is going to happen
-    pause: prompt="Install nova-compute on all unused machines managed by Juju"
-
-  - name: Deploy nova-compute
-    script: scripts/juju-compute-setup.py
-
-  - name: Try to avoid race condition
-    pause: seconds=5
-
-  - name: Wait until nova-compute is deployed
-    script: scripts/wait-for-services.sh
-
-  - name: Make sure we're using KVM
-    shell: juju set nova-compute virt-type=kvm
-
-  - name: Add nova-compute relations
-    script: scripts/juju-compute-relations.py
-
-  - name: Try to avoid race condition
-    pause: seconds=5
-
-  - name: Wait until relations are added
-    script: scripts/wait-for-services.sh
-
-# Play: set up ansible-pull for OpenCloud-specific files on nova-compute nodes
-- hosts: compute
-  sudo: yes
-  user: ubuntu
-  vars:
-
-    # schedule is fed directly to cron
-    schedule: '*/15 * * * *'
-
-    # User to run ansible-pull as from cron
-    cron_user: root
-
-    # File that ansible will use for logs
-    logfile: /var/log/ansible-pull.log
-
-    # Directory to where repository will be cloned
-    workdir: /var/lib/ansible/local
-
-    # Repository to check out
-    # repo must contain a local.yml file at top level
-    #repo_url: git://github.com/sfromm/ansible-playbooks.git
-    repo_url: git://github.com/andybavier/opencloud-nova-compute-ansible.git
-
-    # Branch or tag to checkout
-    repo_version: kilo
-
-  tasks:
-
-  - name: Install ansible
-    apt: name=ansible state=installed
-
-  - name: Basic ansible inventory
-    template: src=templates/etc/ansible/hosts.j2
-      dest=/etc/ansible/hosts
-
-  - name: Create local directory to work from
-    file: path={{workdir}} state=directory owner=root group=root mode=0751
-
-  - name: Create crontab entry to clone/pull git repository
-    template: src=templates/etc/cron.d/ansible-pull.j2 dest=/etc/cron.d/ansible-pull owner=root group=root mode=0644
-
-  - name: Create logrotate entry for ansible-pull.log
-    template: src=templates/etc/logrotate.d/ansible-pull.j2 dest=/etc/logrotate.d/ansible-pull owner=root group=root mode=0644
diff --git a/legacy/cloudlab-hosts b/legacy/cloudlab-hosts
deleted file mode 100644
index 963b8b3..0000000
--- a/legacy/cloudlab-hosts
+++ /dev/null
@@ -1,4 +0,0 @@
-head    ansible_ssh_host=ctl.install.xos-pg0.clemson.cloudlab.us
-
-[compute]
-cp-1.install.xos-pg0.clemson.cloudlab.us
diff --git a/legacy/cloudlab-setup.yml b/legacy/cloudlab-setup.yml
deleted file mode 100644
index 2d78e8c..0000000
--- a/legacy/cloudlab-setup.yml
+++ /dev/null
@@ -1,280 +0,0 @@
----
-# Play: set up head node
-# Assumes basic /etc/ansible/hosts file
-- hosts: head
-  user: ubuntu
-  sudo: yes
-  vars:
-    # Each OpenCloud cluster needs a unique mgmt_net_prefix
-    mgmt_net_prefix: 192.168.100
-  tasks:
-
-  - apt: name={{ item }} update_cache=yes
-    with_items:
-    - python-pycurl
-    - software-properties-common
-
-  - name: Add Juju repository
-    apt_repository: repo="ppa:juju/stable"
-
-  - name: Add Ansible repository
-    apt_repository: repo="ppa:ansible/ansible"
-
-  - name: Update Ansible cache
-    apt: update_cache=yes
-
-  - name: Install packages
-    apt: name={{ item }} state=latest
-    with_items:
-    - ansible
-    - uvtool
-    - git
-    - bzr
-    - juju-core
-    - python-novaclient
-    - python-neutronclient
-    - python-keystoneclient
-    - python-glanceclient
-
-  - name: Get juju-ansible git repo
-    git: repo=https://github.com/cmars/juju-ansible.git
-      dest=/usr/local/src/juju-ansible
-
-  - name: Set up juju-ansible symlink
-    file: dest=/usr/local/bin/juju-ansible
-      src=/usr/local/src/juju-ansible/juju-ansible
-      state=link
-
-  - name: Set up juju-ansible-playbook symlink
-    file: dest=/usr/local/bin/juju-ansible-playbook
-      src=/usr/local/src/juju-ansible/juju-ansible
-      state=link
-
-  - name: Generate key to use in VMs
-    user: name={{ ansible_env['SUDO_USER'] }} generate_ssh_key=yes
-
-  - name: (CloudLab) Set up extra disk space
-    shell: /usr/testbed/bin/mkextrafs /var/lib/uvtool/libvirt/images
-      creates=/var/lib/uvtool/libvirt/images/lost+found
-    when: cloudlab
-
-  - name: Add myself to libvirtd group
-    user: name={{ ansible_env['SUDO_USER'] }}
-      groups=libvirtd
-      append=yes
-
-  - name: Get trusty image for uvtool
-    shell: uvt-simplestreams-libvirt sync --source http://cloud-images.ubuntu.com/daily release=trusty arch=amd64
-
-  - name: Change the virbr0 subnet to {{ mgmt_net_prefix }}.0/24
-    template: src=templates/etc/libvirt/qemu/networks/default.xml.j2
-      dest=/etc/libvirt/qemu/networks/default.xml
-    notify:
-    - recreate default net
-
-  handlers:
-  - name: recreate default net
-    script: scripts/recreate-virbr0.sh
-
-# Play: create VMs to host OpenStack services
-- hosts: head
-  user: ubuntu
-  sudo: yes
-  tasks:
-  - name: Create VMs to host OpenCloud services
-    sudo: no
-    script: scripts/create-vms.sh
-
-  - include: tasks/vm-ips.yml
-
-  - name: Add VMs to /etc/hosts
-    template: src=templates/etc/hosts.j2
-      dest=/etc/hosts
-    notify:
-    - Reload dnsmasq
-
-  - name: Set up /etc/ansible/hosts
-    template: src=templates/etc/ansible/hosts.j2
-      dest=/etc/ansible/hosts
-
-  - name: Copy ansible.cfg to disable host key checking
-    sudo: no
-    copy: src=files/ansible.cfg
-      dest={{ ansible_env['PWD'] }}/.ansible.cfg
-
-  - name: Touch ~/.ssh/config
-    sudo: no
-    file: path={{ ansible_env['PWD'] }}/.ssh/config state=touch
-
-  - name: Disable host key checking in SSH
-    sudo: no
-    lineinfile: dest={{ ansible_env['PWD'] }}/.ssh/config
-      line="StrictHostKeyChecking no"
-
-  - name: Test that we can log into every VM
-    sudo: no
-    shell: ansible services -m ping -u ubuntu
-
-  handlers:
-  - name: Reload dnsmasq
-    shell: killall -HUP dnsmasq
-
-# Play: Install services using Juju
-- hosts: head
-  user: ubuntu
-  vars:
-    charm_src: /usr/local/src/charms/trusty
-  tasks:
-  - name: Initialize Juju
-    sudo: no
-    shell: juju generate-config
-      creates={{ ansible_env['PWD'] }}/.juju/environments.yaml
-
-  - shell: uvt-kvm ip juju
-    register: juju_ip
-
-  - name: Juju config file
-    sudo: no
-    template: src=templates/environments.yaml.j2
-      dest={{ ansible_env['PWD'] }}/.juju/environments.yaml
-
-  - name: Bootstrap Juju
-    sudo: no
-    shell: juju bootstrap
-      creates={{ ansible_env['PWD'] }}/.juju/environments/manual.jenv
-
-  - name: Copy openstack.cfg for Juju
-    sudo: yes
-    copy: src=files/openstack.cfg
-      dest=/usr/local/src/openstack.cfg
-
-  - name: Check for /usr/local/src/charms/trusty
-    sudo: yes
-    file: path={{ charm_src }}
-       state=directory
-       mode=0755
-
-  - name: Deploy OpenStack services with Juju
-    script: scripts/juju-setup.py
-
-  - name: Add all Juju relations between services
-    script: scripts/juju-relations.py
-
-  - name: Wait for relations to be fully added
-    script: scripts/wait-for-services.sh
-
-# Play: Use libvirt hooks to set up iptables
-- hosts: head
-  user: ubuntu
-  sudo: yes
-  tasks:
-  - name: Enable port forwarding for services
-    copy: src=files/{{ item }}
-      dest={{ item }}
-      mode=0755
-    notify:
-    - reload libvirt config
-    - run qemu hook
-    with_items:
-    - /etc/libvirt/hooks/daemon
-    - /etc/libvirt/hooks/qemu
-
-  handlers:
-  - name: reload libvirt config
-    shell: killall -HUP libvirtd
-
-  - name: run qemu hook
-    shell: /etc/libvirt/hooks/qemu start start
-
-# Play: Create credentials, set up some basic OpenStack
-- hosts: head
-  user: ubuntu
-  sudo: no
-  tasks:
-
-  - name: Get keystone admin password
-    shell: juju run --unit=keystone/0 "sudo cat /var/lib/keystone/keystone.passwd"
-    register: keystone_password
-
-  - shell: uvt-kvm ip keystone
-    register: keystone_ip
-
-  - name: Create credentials
-    template: src=templates/admin-openrc.sh.j2
-     dest={{ ansible_env['PWD'] }}/admin-openrc.sh
-
-  - name: (CloudLab) Make sure that /root/setup exists
-    file: path=/root/setup state=directory
-    sudo: yes
-    when: cloudlab
-
-  - name: (CloudLab) Copy credentials to /root/setup
-    shell: scp admin-openrc.sh /root/setup
-    sudo: yes
-    when: cloudlab
-
-  - name: Copy credentials to nova-cloud-controller
-    shell: "scp admin-openrc.sh ubuntu@nova-cloud-controller:"
-
-  - name: Copy network setup script
-    sudo: yes
-    copy: src=scripts/network-setup.sh
-      dest=/usr/local/src/network-setup.sh
-      mode=0644
-
-  - name: Run network setup script
-    shell: ansible nova-cloud-controller -m script -u ubuntu -a "/usr/local/src/network-setup.sh"
-
-  - name: Get public key
-    shell: cat {{ ansible_env['PWD'] }}/.ssh/id_rsa.pub
-    register: sshkey
-
-  - name: Copy CA certificate
-    shell: sudo juju scp nova-cloud-controller/0:/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt /usr/local/share/ca-certificates
-      creates=/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt
-
-  - name: Update CA ca-certificates
-    shell: update-ca-certificates
-    sudo: yes
-
-- hosts: compute
-  user: ubuntu
-  sudo: yes
-  vars:
-    control_net: "{{ hostvars['head']['ansible_virbr0']['ipv4']['network'] }}/24"
-    gateway: "{{ hostvars['head']['ansible_default_ipv4']['address'] }}"
-  tasks:
-  - name: Install package needed by Juju
-    apt: name=python-yaml state=present
-
-  - name: Add key
-    authorized_key: user="{{ ansible_env['SUDO_USER'] }}"
-      key="{{ hostvars['head']['sshkey']['stdout'] }}"
-
-  - name: Add route via /etc/rc.local
-    template: src=templates/etc/rc.local.cloudlab
-      dest=/etc/rc.local
-      mode=0755
-    when: cloudlab
-    notify:
-    - run /etc/rc.local
-
-  - name: Add route via /etc/rc.local
-    template: src=templates/etc/rc.local
-      dest=/etc/rc.local
-      mode=0755
-    when: not cloudlab
-    notify:
-    - run /etc/rc.local
-
-  - name: Touch ~/.ssh/config
-    file: path=/var/lib/nova state=directory
-
-  - name: (CloudLab) Set up extra disk space
-    shell: /usr/testbed/bin/mkextrafs /var/lib/nova
-      creates=/var/lib/nova/lost+found
-    when: cloudlab
-
-  handlers:
-  - name: run /etc/rc.local
-    shell: /etc/rc.local
diff --git a/legacy/cord-hosts b/legacy/cord-hosts
deleted file mode 100644
index c52b70b..0000000
--- a/legacy/cord-hosts
+++ /dev/null
@@ -1,23 +0,0 @@
-head    ansible_ssh_host=localhost connection=local
-
-[openstack]
-mysql
-rabbitmq-server
-keystone
-glance
-nova-cloud-controller
-openstack-dashboard
-ceilometer
-nagios
-neutron-api
-
-[openstack:vars]
-ansible_ssh_user=ubuntu
-
-[compute]
-node1
-node2
-node3
-
-[all:vars]
-ansible_ssh_user=cord
diff --git a/legacy/cord-post-install.yml b/legacy/cord-post-install.yml
deleted file mode 100644
index 460ed42..0000000
--- a/legacy/cord-post-install.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-# Play: Create credentials, set up some basic OpenStack
-- hosts: head
-  sudo: no
-  tasks:
-
-  - shell: uvt-kvm ip keystone
-    register: keystone_ip
-
-  - name: Create credentials
-    template: src=templates/admin-openrc-cord.sh.j2
-     dest={{ ansible_env['PWD'] }}/admin-openrc-cord.sh
-
-  - name: Copy credentials to nova-cloud-controller
-    shell: "scp admin-openrc.sh ubuntu@nova-cloud-controller:"
-
-#  - name: Copy network setup script
-#    sudo: yes
-#    copy: src=scripts/network-setup.sh
-#      dest=/usr/local/src/network-setup.sh
-#      mode=0644
-
-#  - name: Run network setup script
-#    shell: ansible nova-cloud-controller -m script -u ubuntu -a "/usr/local/src/network-setup.sh"
diff --git a/legacy/cord-setup.yml b/legacy/cord-setup.yml
deleted file mode 100644
index 994fc41..0000000
--- a/legacy/cord-setup.yml
+++ /dev/null
@@ -1,252 +0,0 @@
----
-# Play: set up head node
-- hosts: head
-  sudo: yes
-  tasks:
-
-  - apt: name={{ item }} update_cache=yes
-    with_items:
-    - python-pycurl
-    - software-properties-common
-
-  - name: Add Juju repository
-    apt_repository: repo="ppa:juju/stable"
-
-  - name: Add Ansible repository
-    apt_repository: repo="ppa:ansible/ansible"
-
-  - name: Update apt cache
-    apt: update_cache=yes
-
-  - name: Install packages
-    apt: name={{ item }} state=latest
-    with_items:
-    - ansible
-    - uvtool
-    - git
-    - bzr
-    - juju-core
-    - juju-quickstart
-    - python-novaclient
-    - python-neutronclient
-    - python-keystoneclient
-    - python-glanceclient
-
-  # On some systems ansible complains that the "/usr/share/ansible/source_control/git directory
-  # does not exist when there is an attempt to get juju-ansible. To work around this issue
-  # we precreate the directory
-  - name: Work Around - source_control directory creation
-    file: path="/usr/share/ansible/source_control/git" state=directory
-
-  - name: Get juju-ansible git repo
-    git: repo=https://github.com/cmars/juju-ansible.git
-      dest=/usr/local/src/juju-ansible
-
-  - name: Set up juju-ansible symlink
-    file: dest=/usr/local/bin/juju-ansible
-      src=/usr/local/src/juju-ansible/juju-ansible
-      state=link
-
-  - name: Set up juju-ansible-playbook symlink
-    file: dest=/usr/local/bin/juju-ansible-playbook
-      src=/usr/local/src/juju-ansible/juju-ansible
-      state=link
-
-  - name: Generate key to use in VMs
-    user: name={{ ansible_env['SUDO_USER'] }} generate_ssh_key=yes
-
-  - name: Get public key
-    shell: cat {{ ansible_env['PWD'] }}/.ssh/id_rsa.pub
-    register: sshkey
-
-  - name: Add key
-    authorized_key: user="{{ ansible_env['SUDO_USER'] }}"
-      key="{{ sshkey.stdout }}"
-
-  - name: Copy keypair to /tmp
-    shell: cp -f {{ ansible_env['PWD'] }}/.ssh/{{ item }} /tmp; chmod +r /tmp/{{ item }}
-    with_items:
-    - id_rsa
-    - id_rsa.pub
-
-  - name: Stat mkextrafs
-    stat: path="/usr/testbed/bin/mkextrafs"
-    register: is_mkextrafs
-
-  - name: Check whether we're on Cloudlab
-    set_fact:
-      is_cloudlab : "{{ is_mkextrafs.stat.exists }}"
-
-  - name: (CloudLab) Set up extra disk space
-    shell: /usr/testbed/bin/mkextrafs -f /var/lib/uvtool/libvirt/images
-      creates=/var/lib/uvtool/libvirt/images/lost+found
-    when: is_cloudlab
-
-  - name: Add myself to libvirtd group
-    user: name={{ ansible_env['SUDO_USER'] }}
-      groups=libvirtd
-      append=yes
-
-  - name: Get trusty image for uvtool
-    shell: uvt-simplestreams-libvirt sync --source http://cloud-images.ubuntu.com/daily release=trusty arch=amd64
-
-# Play: create VMs to host OpenStack services
-- hosts: head
-  sudo: yes
-  tasks:
-
-  # Yes I know
-  - name: Add local resolver to /etc/resolv.conf
-    lineinfile: dest=/etc/resolv.conf
-      insertafter=".*DO NOT EDIT THIS FILE.*"
-      line="nameserver 192.168.122.1"
-    when: test_setup is defined
-
-  - name: Touch .ssh/config
-    sudo: no
-    file: path={{ ansible_env['PWD'] }}/.ssh/config
-      state=touch
-
-  - name: Disable host key checking in SSH
-    sudo: no
-    lineinfile: dest={{ ansible_env['PWD'] }}/.ssh/config
-      line="StrictHostKeyChecking no"
-
-  - name: Create VMs to host OpenCloud services on mgmtbr
-    sudo: no
-    script: scripts/create-vms-cord.sh
-    when: test_setup is not defined
-
-  - name: Create VMs to host OpenCloud services on virbr0
-    sudo: no
-    script: scripts/create-vms-cord.sh --testing
-    when: test_setup is defined
-
-  - name: Set up /etc/ansible/hosts
-    template: src=templates/etc/ansible/cord-hosts.j2
-      dest=/etc/ansible/hosts
-
-  - name: Copy ansible.cfg to disable host key checking
-    sudo: no
-    copy: src=files/ansible.cfg
-      dest={{ ansible_env['PWD'] }}/.ansible.cfg
-
-  - name: Touch ~/.ssh/config
-    sudo: no
-    file: path={{ ansible_env['PWD'] }}/.ssh/config state=touch
-
-  - name: Test that we can log into every VM using Ansible
-    sudo: no
-    shell: ansible services -m ping -u ubuntu
-
-# Play: prepare compute nodes for installation
-- hosts: compute
-  sudo: yes
-  tasks:
-  - name: Install package needed by Juju
-    apt: name=python-yaml state=present
-
-  - name: Add key for standard user
-    authorized_key: user="{{ ansible_env['SUDO_USER'] }}"
-      key="{{ hostvars['head']['sshkey']['stdout'] }}"
-
-  - name: Add key for root
-    authorized_key: user="root"
-      key="{{ hostvars['head']['sshkey']['stdout'] }}"
-
-  - name: Stat mkextrafs
-    stat: path="/usr/testbed/bin/mkextrafs"
-    register: is_mkextrafs
-
-  - name: Check whether we're on Cloudlab
-    set_fact:
-      is_cloudlab : "{{ is_mkextrafs.stat.exists }}"
-
-  - name: Make sure that /var/lib/nova exists
-    file: path=/var/lib/nova state=directory
-    when: is_cloudlab
-
-  - name: (CloudLab) Set up extra disk space
-    shell: /usr/testbed/bin/mkextrafs -f /var/lib/nova
-      creates=/var/lib/nova/lost+found
-    when: is_cloudlab
-
-# Play: Install services using Juju
-- hosts: head
-  vars:
-    charm_src: /usr/local/src/charms/trusty
-  tasks:
-  - name: Initialize Juju
-    sudo: no
-    shell: juju generate-config
-      creates={{ ansible_env['PWD'] }}/.juju/environments.yaml
-
-  - name: Juju config file
-    sudo: no
-    template: src=templates/environments.yaml.j2
-      dest={{ ansible_env['PWD'] }}/.juju/environments.yaml
-
-  - name: Bootstrap Juju
-    sudo: no
-    shell: juju bootstrap
-      creates={{ ansible_env['PWD'] }}/.juju/environments/manual.jenv
-
-  - name: Check that 'juju status' works
-    sudo: no
-    shell: juju status
-
-  - name: Pause for 15 seconds (problem with mysql VM not being added to Juju)
-    pause: seconds=15
-
-  - name: Add virtual machines to Juju's control
-    shell: juju add-machine ssh:{{ item }}
-    with_items: "{{ groups['openstack'] }}"
-
-  - name: Add compute nodes to Juju's control
-    shell: juju add-machine ssh:{{ item }}
-    with_items: "{{ groups['compute'] }}"
-
-  - name: Copy cord.yaml bundle
-    template: src=templates/cord.yaml dest={{ ansible_env['PWD'] }}/cord.yaml
-
-  - name: Update root certificate database
-    sudo: yes
-    command: update-ca-certificates
-
-  - name: Deploy OpenStack services with Juju
-    shell: juju quickstart --no-browser cord.yaml
-
-- hosts: head
-  sudo: no
-  tasks:
-
-  - name: Create credentials
-    template: src=templates/admin-openrc-cord.sh.j2
-     dest={{ ansible_env['PWD'] }}/admin-openrc.sh
-
-  - name: Copy credentials to /tmp
-    shell: cp -f {{ ansible_env['PWD'] }}/admin-openrc.sh /tmp
-
-  - name: Copy credentials to nova-cloud-controller
-    shell: "scp admin-openrc.sh ubuntu@nova-cloud-controller:"
-
-- hosts: head
-  sudo: no
-  tasks:
-
-  - name: Copy over VM setup files
-    copy: src=files/tmp/{{ item }}
-      dest=/tmp
-    with_items:
-    - set-up-xos.yml
-    - set-up-onos.yml
-
-  - name: Copy over ONOS docker-compose.yml
-    copy: src=files/onos/docker-compose.yml
-      dest=/tmp
-
-  - name: Set up xos VM
-    shell: ansible-playbook /tmp/set-up-xos.yml
-
-  - name: Set up onos-cord VM
-    shell: ansible-playbook /tmp/set-up-onos.yml
diff --git a/legacy/cord-test-hosts b/legacy/cord-test-hosts
deleted file mode 100644
index 8f01e1f..0000000
--- a/legacy/cord-test-hosts
+++ /dev/null
@@ -1,21 +0,0 @@
-head    ansible_ssh_host=localhost connection=local
-
-[openstack]
-mysql
-rabbitmq-server
-keystone
-glance
-nova-cloud-controller
-openstack-dashboard
-ceilometer
-nagios
-neutron-api
-
-[openstack:vars]
-ansible_ssh_user=ubuntu
-
-[compute]
-nova-compute ansible_ssh_user=ubuntu
-
-[all:vars]
-test_setup=true
diff --git a/legacy/hawaii-compute.yml b/legacy/hawaii-compute.yml
deleted file mode 100644
index c342483..0000000
--- a/legacy/hawaii-compute.yml
+++ /dev/null
@@ -1,72 +0,0 @@
----
-- hosts: head
-  sudo: no
-  user: ubuntu
-  tasks:
-  - name: Remind user what is going to happen
-    pause: prompt="Install nova-compute on all unused machines managed by Juju"
-
-  - name: Deploy nova-compute
-    script: scripts/juju-compute-setup.py
-
-  - name: Try to avoid race condition
-    pause: seconds=5
-
-  - name: Wait until nova-compute is deployed
-    script: scripts/wait-for-services.sh
-
-  - name: Make sure we're using KVM
-    shell: juju set nova-compute virt-type=kvm
-
-  - name: Add nova-compute relations
-    script: scripts/juju-compute-relations.py
-
-  - name: Try to avoid race condition
-    pause: seconds=5
-
-  - name: Wait until relations are added
-    script: scripts/wait-for-services.sh
-
-# Play: set up ansible-pull for OpenCloud-specific files on nova-compute nodes
-- hosts: compute
-  sudo: yes
-  user: ubuntu
-  vars:
-
-    # schedule is fed directly to cron
-    schedule: '*/15 * * * *'
-
-    # User to run ansible-pull as from cron
-    cron_user: root
-
-    # File that ansible will use for logs
-    logfile: /var/log/ansible-pull.log
-
-    # Directory to where repository will be cloned
-    workdir: /var/lib/ansible/local
-
-    # Repository to check out
-    # repo must contain a local.yml file at top level
-    #repo_url: git://github.com/sfromm/ansible-playbooks.git
-    repo_url: git://github.com/andybavier/opencloud-nova-compute-ansible.git
-
-    # Branch or tag to checkout
-    repo_version: kilo
-
-  tasks:
-
-  - name: Install ansible
-    apt: name=ansible state=installed
-
-  - name: Basic ansible inventory
-    template: src=templates/etc/ansible/hosts.j2
-      dest=/etc/ansible/hosts
-
-  - name: Create local directory to work from
-    file: path={{workdir}} state=directory owner=root group=root mode=0751
-
-  - name: Create crontab entry to clone/pull git repository
-    template: src=templates/etc/cron.d/ansible-pull.j2 dest=/etc/cron.d/ansible-pull owner=root group=root mode=0644
-
-  - name: Create logrotate entry for ansible-pull.log
-    template: src=templates/etc/logrotate.d/ansible-pull.j2 dest=/etc/logrotate.d/ansible-pull owner=root group=root mode=0644
diff --git a/legacy/hawaii-setup.yml b/legacy/hawaii-setup.yml
deleted file mode 100644
index 9dde236..0000000
--- a/legacy/hawaii-setup.yml
+++ /dev/null
@@ -1,280 +0,0 @@
----
-# Play: set up head node
-# Assumes basic /etc/ansible/hosts file
-- hosts: head
-  user: ubuntu
-  sudo: yes
-  vars:
-    # Each OpenCloud cluster needs a unique mgmt_net_prefix
-    mgmt_net_prefix: 192.168.104
-  tasks:
-
-  - apt: name={{ item }} update_cache=yes
-    with_items:
-    - python-pycurl
-    - software-properties-common
-
-  - name: Add Juju repository
-    apt_repository: repo="ppa:juju/stable"
-
-  - name: Add Ansible repository
-    apt_repository: repo="ppa:ansible/ansible"
-
-  - name: Update Ansible cache
-    apt: update_cache=yes
-
-  - name: Install packages
-    apt: name={{ item }} state=latest
-    with_items:
-    - ansible
-    - uvtool
-    - git
-    - bzr
-    - juju-core
-    - python-novaclient
-    - python-neutronclient
-    - python-keystoneclient
-    - python-glanceclient
-
-  - name: Get juju-ansible git repo
-    git: repo=https://github.com/cmars/juju-ansible.git
-      dest=/usr/local/src/juju-ansible
-
-  - name: Set up juju-ansible symlink
-    file: dest=/usr/local/bin/juju-ansible
-      src=/usr/local/src/juju-ansible/juju-ansible
-      state=link
-
-  - name: Set up juju-ansible-playbook symlink
-    file: dest=/usr/local/bin/juju-ansible-playbook
-      src=/usr/local/src/juju-ansible/juju-ansible
-      state=link
-
-  - name: Generate key to use in VMs
-    user: name={{ ansible_env['SUDO_USER'] }} generate_ssh_key=yes
-
-  - name: (CloudLab) Set up extra disk space
-    shell: /usr/testbed/bin/mkextrafs /var/lib/uvtool/libvirt/images
-      creates=/var/lib/uvtool/libvirt/images/lost+found
-    when: cloudlab
-
-  - name: Add myself to libvirtd group
-    user: name={{ ansible_env['SUDO_USER'] }}
-      groups=libvirtd
-      append=yes
-
-  - name: Get trusty image for uvtool
-    shell: uvt-simplestreams-libvirt sync --source http://cloud-images.ubuntu.com/daily release=trusty arch=amd64
-
-  - name: Change the virbr0 subnet to {{ mgmt_net_prefix }}.0/24
-    template: src=templates/etc/libvirt/qemu/networks/default.xml.j2
-      dest=/etc/libvirt/qemu/networks/default.xml
-    notify:
-    - recreate default net
-
-  handlers:
-  - name: recreate default net
-    script: scripts/recreate-virbr0.sh
-
-# Play: create VMs to host OpenStack services
-- hosts: head
-  user: ubuntu
-  sudo: yes
-  tasks:
-  - name: Create VMs to host OpenCloud services
-    sudo: no
-    script: scripts/create-vms.sh
-
-  - include: tasks/vm-ips.yml
-
-  - name: Add VMs to /etc/hosts
-    template: src=templates/etc/hosts.j2
-      dest=/etc/hosts
-    notify:
-    - Reload dnsmasq
-
-  - name: Set up /etc/ansible/hosts
-    template: src=templates/etc/ansible/hosts.j2
-      dest=/etc/ansible/hosts
-
-  - name: Copy ansible.cfg to disable host key checking
-    sudo: no
-    copy: src=files/ansible.cfg
-      dest={{ ansible_env['PWD'] }}/.ansible.cfg
-
-  - name: Touch ~/.ssh/config
-    sudo: no
-    file: path={{ ansible_env['PWD'] }}/.ssh/config state=touch
-
-  - name: Disable host key checking in SSH
-    sudo: no
-    lineinfile: dest={{ ansible_env['PWD'] }}/.ssh/config
-      line="StrictHostKeyChecking no"
-
-  - name: Test that we can log into every VM
-    sudo: no
-    shell: ansible services -m ping -u ubuntu
-
-  handlers:
-  - name: Reload dnsmasq
-    shell: killall -HUP dnsmasq
-
-# Play: Install services using Juju
-- hosts: head
-  user: ubuntu
-  vars:
-    charm_src: /usr/local/src/charms/trusty
-  tasks:
-  - name: Initialize Juju
-    sudo: no
-    shell: juju generate-config
-      creates={{ ansible_env['PWD'] }}/.juju/environments.yaml
-
-  - shell: uvt-kvm ip juju
-    register: juju_ip
-
-  - name: Juju config file
-    sudo: no
-    template: src=templates/environments.yaml.j2
-      dest={{ ansible_env['PWD'] }}/.juju/environments.yaml
-
-  - name: Bootstrap Juju
-    sudo: no
-    shell: juju bootstrap
-      creates={{ ansible_env['PWD'] }}/.juju/environments/manual.jenv
-
-  - name: Copy openstack.cfg for Juju
-    sudo: yes
-    copy: src=files/openstack.cfg
-      dest=/usr/local/src/openstack.cfg
-
-  - name: Check for /usr/local/src/charms/trusty
-    sudo: yes
-    file: path={{ charm_src }}
-       state=directory
-       mode=0755
-
-  - name: Deploy OpenStack services with Juju
-    script: scripts/juju-setup.py
-
-  - name: Add all Juju relations between services
-    script: scripts/juju-relations.py
-
-  - name: Wait for relations to be fully added
-    script: scripts/wait-for-services.sh
-
-# Play: Use libvirt hooks to set up iptables
-- hosts: head
-  user: ubuntu
-  sudo: yes
-  tasks:
-  - name: Enable port forwarding for services
-    copy: src=files/{{ item }}
-      dest={{ item }}
-      mode=0755
-    notify:
-    - reload libvirt config
-    - run qemu hook
-    with_items:
-    - /etc/libvirt/hooks/daemon
-    - /etc/libvirt/hooks/qemu
-
-  handlers:
-  - name: reload libvirt config
-    shell: killall -HUP libvirtd
-
-  - name: run qemu hook
-    shell: /etc/libvirt/hooks/qemu start start
-
-# Play: Create credentials, set up some basic OpenStack
-- hosts: head
-  user: ubuntu
-  sudo: no
-  tasks:
-
-  - name: Get keystone admin password
-    shell: juju run --unit=keystone/0 "sudo cat /var/lib/keystone/keystone.passwd"
-    register: keystone_password
-
-  - shell: uvt-kvm ip keystone
-    register: keystone_ip
-
-  - name: Create credentials
-    template: src=templates/admin-openrc.sh.j2
-     dest={{ ansible_env['PWD'] }}/admin-openrc.sh
-
-  - name: (CloudLab) Make sure that /root/setup exists
-    file: path=/root/setup state=directory
-    sudo: yes
-    when: cloudlab
-
-  - name: (CloudLab) Copy credentials to /root/setup
-    shell: scp admin-openrc.sh /root/setup
-    sudo: yes
-    when: cloudlab
-
-  - name: Copy credentials to nova-cloud-controller
-    shell: "scp admin-openrc.sh ubuntu@nova-cloud-controller:"
-
-  - name: Copy network setup script
-    sudo: yes
-    copy: src=scripts/network-setup.sh
-      dest=/usr/local/src/network-setup.sh
-      mode=0644
-
-  - name: Run network setup script
-    shell: ansible nova-cloud-controller -m script -u ubuntu -a "/usr/local/src/network-setup.sh"
-
-  - name: Get public key
-    shell: cat {{ ansible_env['PWD'] }}/.ssh/id_rsa.pub
-    register: sshkey
-
-  - name: Copy CA certificate
-    shell: sudo juju scp nova-cloud-controller/0:/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt /usr/local/share/ca-certificates
-      creates=/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt
-
-  - name: Update CA ca-certificates
-    shell: update-ca-certificates
-    sudo: yes
-
-- hosts: compute
-  user: ubuntu
-  sudo: yes
-  vars:
-    control_net: "{{ hostvars['head']['ansible_virbr0']['ipv4']['network'] }}/24"
-    gateway: "{{ hostvars['head']['ansible_default_ipv4']['address'] }}"
-  tasks:
-  - name: Install package needed by Juju
-    apt: name=python-yaml state=present
-
-  - name: Add key
-    authorized_key: user="{{ ansible_env['SUDO_USER'] }}"
-      key="{{ hostvars['head']['sshkey']['stdout'] }}"
-
-  - name: Add route via /etc/rc.local
-    template: src=templates/etc/rc.local.cloudlab
-      dest=/etc/rc.local
-      mode=0755
-    when: cloudlab
-    notify:
-    - run /etc/rc.local
-
-  - name: Add route via /etc/rc.local
-    template: src=templates/etc/rc.local
-      dest=/etc/rc.local
-      mode=0755
-    when: not cloudlab
-    notify:
-    - run /etc/rc.local
-
-  - name: Touch ~/.ssh/config
-    file: path=/var/lib/nova state=directory
-
-  - name: (CloudLab) Set up extra disk space
-    shell: /usr/testbed/bin/mkextrafs /var/lib/nova
-      creates=/var/lib/nova/lost+found
-    when: cloudlab
-
-  handlers:
-  - name: run /etc/rc.local
-    shell: /etc/rc.local
diff --git a/legacy/princeton-hosts b/legacy/princeton-hosts
deleted file mode 100644
index 8d7bd73..0000000
--- a/legacy/princeton-hosts
+++ /dev/null
@@ -1,23 +0,0 @@
-head    ansible_ssh_host=node70.princeton.vicci.org
-
-[compute]
-node37.princeton.vicci.org
-node39.princeton.vicci.org
-node41.princeton.vicci.org
-node43.princeton.vicci.org
-node45.princeton.vicci.org
-node49.princeton.vicci.org
-node51.princeton.vicci.org
-node52.princeton.vicci.org
-node54.princeton.vicci.org
-node55.princeton.vicci.org
-node57.princeton.vicci.org
-node59.princeton.vicci.org
-node65.princeton.vicci.org
-node66.princeton.vicci.org
-node67.princeton.vicci.org
-node68.princeton.vicci.org
-node69.princeton.vicci.org
-
-[all:vars]
-cloudlab=false
diff --git a/legacy/princeton-setup.yml b/legacy/princeton-setup.yml
deleted file mode 100644
index 47895f6..0000000
--- a/legacy/princeton-setup.yml
+++ /dev/null
@@ -1,275 +0,0 @@
----
-# Play: set up head node
-# Assumes basic /etc/ansible/hosts file
-- hosts: head
-  user: ubuntu
-  sudo: yes
-  vars:
-    # Each OpenCloud cluster needs a unique mgmt_net_prefix
-    mgmt_net_prefix: 192.168.100
-  tasks:
-
-  - apt: name={{ item }} update_cache=yes
-    with_items:
-    - python-pycurl
-    - software-properties-common
-
-  - name: Add Juju repository
-    apt_repository: repo="ppa:juju/stable"
-
-  - name: Add Ansible repository
-    apt_repository: repo="ppa:ansible/ansible"
-
-  - name: Update Ansible cache
-    apt: update_cache=yes
-
-  - name: Install packages
-    apt: name={{ item }} state=latest
-    with_items:
-    - ansible
-    - uvtool
-    - git
-    - bzr
-    - juju-core
-    - python-novaclient
-    - python-neutronclient
-    - python-keystoneclient
-    - python-glanceclient
-
-  - name: Get juju-ansible git repo
-    git: repo=https://github.com/cmars/juju-ansible.git
-      dest=/usr/local/src/juju-ansible
-
-  - name: Set up juju-ansible symlink
-    file: dest=/usr/local/bin/juju-ansible
-      src=/usr/local/src/juju-ansible/juju-ansible
-      state=link
-
-  - name: Set up juju-ansible-playbook symlink
-    file: dest=/usr/local/bin/juju-ansible-playbook
-      src=/usr/local/src/juju-ansible/juju-ansible
-      state=link
-
-  - name: Generate key to use in VMs
-    user: name={{ ansible_env['SUDO_USER'] }} generate_ssh_key=yes
-
-  - name: (CloudLab) Set up extra disk space
-    shell: /usr/testbed/bin/mkextrafs /var/lib/uvtool/libvirt/images
-      creates=/var/lib/uvtool/libvirt/images/lost+found
-    when: cloudlab
-
-  - name: Add myself to libvirtd group
-    user: name={{ ansible_env['SUDO_USER'] }}
-      groups=libvirtd
-      append=yes
-
-  - name: Get trusty image for uvtool
-    shell: uvt-simplestreams-libvirt sync --source http://cloud-images.ubuntu.com/daily release=trusty arch=amd64
-
-  - name: Change the virbr0 subnet to {{ mgmt_net_prefix }}.0/24
-    template: src=templates/etc/libvirt/qemu/networks/default.xml.j2
-      dest=/etc/libvirt/qemu/networks/default.xml
-    notify:
-    - recreate default net
-
-  handlers:
-  - name: recreate default net
-    script: scripts/recreate-virbr0.sh
-
-# Play: create VMs to host OpenStack services
-- hosts: head
-  sudo: yes
-  tasks:
-  - name: Create VMs to host OpenCloud services
-    sudo: no
-    script: scripts/create-vms.sh
-
-  - include: tasks/vm-ips.yml
-
-  - name: Add VMs to /etc/hosts
-    template: src=templates/etc/hosts.j2
-      dest=/etc/hosts
-    notify:
-    - Reload dnsmasq
-
-  - name: Set up /etc/ansible/hosts
-    template: src=templates/etc/ansible/hosts.j2
-      dest=/etc/ansible/hosts
-
-  - name: Copy ansible.cfg to disable host key checking
-    sudo: no
-    copy: src=files/ansible.cfg
-      dest={{ ansible_env['PWD'] }}/.ansible.cfg
-
-  - name: Touch ~/.ssh/config
-    sudo: no
-    file: path={{ ansible_env['PWD'] }}/.ssh/config state=touch
-
-  - name: Disable host key checking in SSH
-    sudo: no
-    lineinfile: dest={{ ansible_env['PWD'] }}/.ssh/config
-      line="StrictHostKeyChecking no"
-
-  - name: Test that we can log into every VM
-    sudo: no
-    shell: ansible services -m ping -u ubuntu
-
-  handlers:
-  - name: Reload dnsmasq
-    shell: killall -HUP dnsmasq
-
-# Play: Install services using Juju
-- hosts: head
-  vars:
-    charm_src: /usr/local/src/charms/trusty
-  tasks:
-  - name: Initialize Juju
-    sudo: no
-    shell: juju generate-config
-      creates={{ ansible_env['PWD'] }}/.juju/environments.yaml
-
-  - shell: uvt-kvm ip juju
-    register: juju_ip
-
-  - name: Juju config file
-    sudo: no
-    template: src=templates/environments.yaml.j2
-      dest={{ ansible_env['PWD'] }}/.juju/environments.yaml
-
-  - name: Bootstrap Juju
-    sudo: no
-    shell: juju bootstrap
-      creates={{ ansible_env['PWD'] }}/.juju/environments/manual.jenv
-
-  - name: Copy openstack.cfg for Juju
-    sudo: yes
-    copy: src=files/openstack.cfg
-      dest=/usr/local/src/openstack.cfg
-
-  - name: Check for /usr/local/src/charms/trusty
-    sudo: yes
-    file: path={{ charm_src }}
-       state=directory
-       mode=0755
-
-  - name: Deploy OpenStack services with Juju
-    script: scripts/juju-setup.py
-
-  - name: Add all Juju relations between services
-    script: scripts/juju-relations.py
-
-  - name: Wait for relations to be fully added
-    script: scripts/wait-for-services.sh
-
-# Play: Use libvirt hooks to set up iptables
-- hosts: head
-  sudo: yes
-  tasks:
-  - name: Enable port forwarding for services
-    copy: src=files/{{ item }}
-      dest={{ item }}
-      mode=0755
-    notify:
-    - reload libvirt config
-    - run qemu hook
-    with_items:
-    - /etc/libvirt/hooks/daemon
-    - /etc/libvirt/hooks/qemu
-
-  handlers:
-  - name: reload libvirt config
-    shell: killall -HUP libvirtd
-
-  - name: run qemu hook
-    shell: /etc/libvirt/hooks/qemu start start
-
-# Play: Create credentials, set up some basic OpenStack
-- hosts: head
-  sudo: no
-  tasks:
-
-  - name: Get keystone admin password
-    shell: juju run --unit=keystone/0 "sudo cat /var/lib/keystone/keystone.passwd"
-    register: keystone_password
-
-  - shell: uvt-kvm ip keystone
-    register: keystone_ip
-
-  - name: Create credentials
-    template: src=templates/admin-openrc.sh.j2
-     dest={{ ansible_env['PWD'] }}/admin-openrc.sh
-
-  - name: (CloudLab) Make sure that /root/setup exists
-    file: path=/root/setup state=directory
-    sudo: yes
-    when: cloudlab
-
-  - name: (CloudLab) Copy credentials to /root/setup
-    shell: scp admin-openrc.sh /root/setup
-    sudo: yes
-    when: cloudlab
-
-  - name: Copy credentials to nova-cloud-controller
-    shell: "scp admin-openrc.sh ubuntu@nova-cloud-controller:"
-
-  - name: Copy network setup script
-    sudo: yes
-    copy: src=scripts/network-setup.sh
-      dest=/usr/local/src/network-setup.sh
-      mode=0644
-
-  - name: Run network setup script
-    shell: ansible nova-cloud-controller -m script -u ubuntu -a "/usr/local/src/network-setup.sh"
-
-  - name: Get public key
-    shell: cat {{ ansible_env['PWD'] }}/.ssh/id_rsa.pub
-    register: sshkey
-
-  - name: Copy CA certificate
-    shell: sudo juju scp nova-cloud-controller/0:/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt /usr/local/share/ca-certificates
-      creates=/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt
-
-  - name: Update CA ca-certificates
-    shell: update-ca-certificates
-    sudo: yes
-
-- hosts: compute
-  sudo: yes
-  vars:
-    control_net: "{{ hostvars['head']['ansible_virbr0']['ipv4']['network'] }}/24"
-    gateway: "{{ hostvars['head']['ansible_default_ipv4']['address'] }}"
-  tasks:
-  - name: Install package needed by Juju
-    apt: name=python-yaml state=present
-
-  - name: Add key
-    authorized_key: user="{{ ansible_env['SUDO_USER'] }}"
-      key="{{ hostvars['head']['sshkey']['stdout'] }}"
-
-  - name: Add route via /etc/rc.local
-    template: src=templates/etc/rc.local.cloudlab
-      dest=/etc/rc.local
-      mode=0755
-    when: cloudlab
-    notify:
-    - run /etc/rc.local
-
-  - name: Add route via /etc/rc.local
-    template: src=templates/etc/rc.local
-      dest=/etc/rc.local
-      mode=0755
-    when: not cloudlab
-    notify:
-    - run /etc/rc.local
-
-  - name: Touch ~/.ssh/config
-    file: path=/var/lib/nova state=directory
-
-  - name: (CloudLab) Set up extra disk space
-    shell: /usr/testbed/bin/mkextrafs /var/lib/nova
-      creates=/var/lib/nova/lost+found
-    when: cloudlab
-
-  handlers:
-  - name: run /etc/rc.local
-    shell: /etc/rc.local
diff --git a/legacy/setup-mgmtbr.sh b/legacy/setup-mgmtbr.sh
deleted file mode 100755
index 62f05c6..0000000
--- a/legacy/setup-mgmtbr.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-
-IFACE=$1
-
-# Assumes that mgmtbr is set up on 10.10.1.1 interface
-apt-get install bridge-utils dnsmasq
-brctl addbr mgmtbr
-ifconfig $IFACE 0.0.0.0
-brctl addif mgmtbr $IFACE
-ifconfig mgmtbr 10.10.1.1/24 up
-
-cat <<EOF > /etc/dnsmasq.d/cord
-dhcp-range=10.10.1.3,10.10.1.253
-interface=mgmtbr
-dhcp-option=option:router,10.10.1.1
-EOF
-
-service dnsmasq restart
-
-# Assumes eth0 is the public interface
-iptables -t nat -I POSTROUTING -s 10.10.1.0/24 \! -d 10.10.1.0/24 -j MASQUERADE
diff --git a/legacy/singapore-compute.yml b/legacy/singapore-compute.yml
deleted file mode 100644
index c342483..0000000
--- a/legacy/singapore-compute.yml
+++ /dev/null
@@ -1,72 +0,0 @@
----
-- hosts: head
-  sudo: no
-  user: ubuntu
-  tasks:
-  - name: Remind user what is going to happen
-    pause: prompt="Install nova-compute on all unused machines managed by Juju"
-
-  - name: Deploy nova-compute
-    script: scripts/juju-compute-setup.py
-
-  - name: Try to avoid race condition
-    pause: seconds=5
-
-  - name: Wait until nova-compute is deployed
-    script: scripts/wait-for-services.sh
-
-  - name: Make sure we're using KVM
-    shell: juju set nova-compute virt-type=kvm
-
-  - name: Add nova-compute relations
-    script: scripts/juju-compute-relations.py
-
-  - name: Try to avoid race condition
-    pause: seconds=5
-
-  - name: Wait until relations are added
-    script: scripts/wait-for-services.sh
-
-# Play: set up ansible-pull for OpenCloud-specific files on nova-compute nodes
-- hosts: compute
-  sudo: yes
-  user: ubuntu
-  vars:
-
-    # schedule is fed directly to cron
-    schedule: '*/15 * * * *'
-
-    # User to run ansible-pull as from cron
-    cron_user: root
-
-    # File that ansible will use for logs
-    logfile: /var/log/ansible-pull.log
-
-    # Directory to where repository will be cloned
-    workdir: /var/lib/ansible/local
-
-    # Repository to check out
-    # repo must contain a local.yml file at top level
-    #repo_url: git://github.com/sfromm/ansible-playbooks.git
-    repo_url: git://github.com/andybavier/opencloud-nova-compute-ansible.git
-
-    # Branch or tag to checkout
-    repo_version: kilo
-
-  tasks:
-
-  - name: Install ansible
-    apt: name=ansible state=installed
-
-  - name: Basic ansible inventory
-    template: src=templates/etc/ansible/hosts.j2
-      dest=/etc/ansible/hosts
-
-  - name: Create local directory to work from
-    file: path={{workdir}} state=directory owner=root group=root mode=0751
-
-  - name: Create crontab entry to clone/pull git repository
-    template: src=templates/etc/cron.d/ansible-pull.j2 dest=/etc/cron.d/ansible-pull owner=root group=root mode=0644
-
-  - name: Create logrotate entry for ansible-pull.log
-    template: src=templates/etc/logrotate.d/ansible-pull.j2 dest=/etc/logrotate.d/ansible-pull owner=root group=root mode=0644
diff --git a/legacy/singapore-hosts b/legacy/singapore-hosts
deleted file mode 100644
index 8c1b12b..0000000
--- a/legacy/singapore-hosts
+++ /dev/null
@@ -1,9 +0,0 @@
-head    ansible_ssh_host=opencloud0.sing.internet2.edu
-
-[compute]
-opencloud1.sing.internet2.edu
-opencloud2.sing.internet2.edu
-opencloud3.sing.internet2.edu
-
-[all:vars]
-cloudlab=false
diff --git a/legacy/singapore-setup.yml b/legacy/singapore-setup.yml
deleted file mode 100644
index 725d679..0000000
--- a/legacy/singapore-setup.yml
+++ /dev/null
@@ -1,280 +0,0 @@
----
-# Play: set up head node
-# Assumes basic /etc/ansible/hosts file
-- hosts: head
-  user: ubuntu
-  sudo: yes
-  vars:
-    # Each OpenCloud cluster needs a unique mgmt_net_prefix
-    mgmt_net_prefix: 192.168.103
-  tasks:
-
-  - apt: name={{ item }} update_cache=yes
-    with_items:
-    - python-pycurl
-    - software-properties-common
-
-  - name: Add Juju repository
-    apt_repository: repo="ppa:juju/stable"
-
-  - name: Add Ansible repository
-    apt_repository: repo="ppa:ansible/ansible"
-
-  - name: Update Ansible cache
-    apt: update_cache=yes
-
-  - name: Install packages
-    apt: name={{ item }} state=latest
-    with_items:
-    - ansible
-    - uvtool
-    - git
-    - bzr
-    - juju-core
-    - python-novaclient
-    - python-neutronclient
-    - python-keystoneclient
-    - python-glanceclient
-
-  - name: Get juju-ansible git repo
-    git: repo=https://github.com/cmars/juju-ansible.git
-      dest=/usr/local/src/juju-ansible
-
-  - name: Set up juju-ansible symlink
-    file: dest=/usr/local/bin/juju-ansible
-      src=/usr/local/src/juju-ansible/juju-ansible
-      state=link
-
-  - name: Set up juju-ansible-playbook symlink
-    file: dest=/usr/local/bin/juju-ansible-playbook
-      src=/usr/local/src/juju-ansible/juju-ansible
-      state=link
-
-  - name: Generate key to use in VMs
-    user: name={{ ansible_env['SUDO_USER'] }} generate_ssh_key=yes
-
-  - name: (CloudLab) Set up extra disk space
-    shell: /usr/testbed/bin/mkextrafs /var/lib/uvtool/libvirt/images
-      creates=/var/lib/uvtool/libvirt/images/lost+found
-    when: cloudlab
-
-  - name: Add myself to libvirtd group
-    user: name={{ ansible_env['SUDO_USER'] }}
-      groups=libvirtd
-      append=yes
-
-  - name: Get trusty image for uvtool
-    shell: uvt-simplestreams-libvirt sync --source http://cloud-images.ubuntu.com/daily release=trusty arch=amd64
-
-  - name: Change the virbr0 subnet to {{ mgmt_net_prefix }}.0/24
-    template: src=templates/etc/libvirt/qemu/networks/default.xml.j2
-      dest=/etc/libvirt/qemu/networks/default.xml
-    notify:
-    - recreate default net
-
-  handlers:
-  - name: recreate default net
-    script: scripts/recreate-virbr0.sh
-
-# Play: create VMs to host OpenStack services
-- hosts: head
-  user: ubuntu
-  sudo: yes
-  tasks:
-  - name: Create VMs to host OpenCloud services
-    sudo: no
-    script: scripts/create-vms.sh
-
-  - include: tasks/vm-ips.yml
-
-  - name: Add VMs to /etc/hosts
-    template: src=templates/etc/hosts.j2
-      dest=/etc/hosts
-    notify:
-    - Reload dnsmasq
-
-  - name: Set up /etc/ansible/hosts
-    template: src=templates/etc/ansible/hosts.j2
-      dest=/etc/ansible/hosts
-
-  - name: Copy ansible.cfg to disable host key checking
-    sudo: no
-    copy: src=files/ansible.cfg
-      dest={{ ansible_env['PWD'] }}/.ansible.cfg
-
-  - name: Touch ~/.ssh/config
-    sudo: no
-    file: path={{ ansible_env['PWD'] }}/.ssh/config state=touch
-
-  - name: Disable host key checking in SSH
-    sudo: no
-    lineinfile: dest={{ ansible_env['PWD'] }}/.ssh/config
-      line="StrictHostKeyChecking no"
-
-  - name: Test that we can log into every VM
-    sudo: no
-    shell: ansible services -m ping -u ubuntu
-
-  handlers:
-  - name: Reload dnsmasq
-    shell: killall -HUP dnsmasq
-
-# Play: Install services using Juju
-- hosts: head
-  user: ubuntu
-  vars:
-    charm_src: /usr/local/src/charms/trusty
-  tasks:
-  - name: Initialize Juju
-    sudo: no
-    shell: juju generate-config
-      creates={{ ansible_env['PWD'] }}/.juju/environments.yaml
-
-  - shell: uvt-kvm ip juju
-    register: juju_ip
-
-  - name: Juju config file
-    sudo: no
-    template: src=templates/environments.yaml.j2
-      dest={{ ansible_env['PWD'] }}/.juju/environments.yaml
-
-  - name: Bootstrap Juju
-    sudo: no
-    shell: juju bootstrap
-      creates={{ ansible_env['PWD'] }}/.juju/environments/manual.jenv
-
-  - name: Copy openstack.cfg for Juju
-    sudo: yes
-    copy: src=files/openstack.cfg
-      dest=/usr/local/src/openstack.cfg
-
-  - name: Check for /usr/local/src/charms/trusty
-    sudo: yes
-    file: path={{ charm_src }}
-       state=directory
-       mode=0755
-
-  - name: Deploy OpenStack services with Juju
-    script: scripts/juju-setup.py
-
-  - name: Add all Juju relations between services
-    script: scripts/juju-relations.py
-
-  - name: Wait for relations to be fully added
-    script: scripts/wait-for-services.sh
-
-# Play: Use libvirt hooks to set up iptables
-- hosts: head
-  user: ubuntu
-  sudo: yes
-  tasks:
-  - name: Enable port forwarding for services
-    copy: src=files/{{ item }}
-      dest={{ item }}
-      mode=0755
-    notify:
-    - reload libvirt config
-    - run qemu hook
-    with_items:
-    - /etc/libvirt/hooks/daemon
-    - /etc/libvirt/hooks/qemu
-
-  handlers:
-  - name: reload libvirt config
-    shell: killall -HUP libvirtd
-
-  - name: run qemu hook
-    shell: /etc/libvirt/hooks/qemu start start
-
-# Play: Create credentials, set up some basic OpenStack
-- hosts: head
-  user: ubuntu
-  sudo: no
-  tasks:
-
-  - name: Get keystone admin password
-    shell: juju run --unit=keystone/0 "sudo cat /var/lib/keystone/keystone.passwd"
-    register: keystone_password
-
-  - shell: uvt-kvm ip keystone
-    register: keystone_ip
-
-  - name: Create credentials
-    template: src=templates/admin-openrc.sh.j2
-     dest={{ ansible_env['PWD'] }}/admin-openrc.sh
-
-  - name: (CloudLab) Make sure that /root/setup exists
-    file: path=/root/setup state=directory
-    sudo: yes
-    when: cloudlab
-
-  - name: (CloudLab) Copy credentials to /root/setup
-    shell: scp admin-openrc.sh /root/setup
-    sudo: yes
-    when: cloudlab
-
-  - name: Copy credentials to nova-cloud-controller
-    shell: "scp admin-openrc.sh ubuntu@nova-cloud-controller:"
-
-  - name: Copy network setup script
-    sudo: yes
-    copy: src=scripts/network-setup.sh
-      dest=/usr/local/src/network-setup.sh
-      mode=0644
-
-  - name: Run network setup script
-    shell: ansible nova-cloud-controller -m script -u ubuntu -a "/usr/local/src/network-setup.sh"
-
-  - name: Get public key
-    shell: cat {{ ansible_env['PWD'] }}/.ssh/id_rsa.pub
-    register: sshkey
-
-  - name: Copy CA certificate
-    shell: sudo juju scp nova-cloud-controller/0:/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt /usr/local/share/ca-certificates
-      creates=/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt
-
-  - name: Update CA ca-certificates
-    shell: update-ca-certificates
-    sudo: yes
-
-- hosts: compute
-  user: ubuntu
-  sudo: yes
-  vars:
-    control_net: "{{ hostvars['head']['ansible_virbr0']['ipv4']['network'] }}/24"
-    gateway: "{{ hostvars['head']['ansible_default_ipv4']['address'] }}"
-  tasks:
-  - name: Install package needed by Juju
-    apt: name=python-yaml state=present
-
-  - name: Add key
-    authorized_key: user="{{ ansible_env['SUDO_USER'] }}"
-      key="{{ hostvars['head']['sshkey']['stdout'] }}"
-
-  - name: Add route via /etc/rc.local
-    template: src=templates/etc/rc.local.cloudlab
-      dest=/etc/rc.local
-      mode=0755
-    when: cloudlab
-    notify:
-    - run /etc/rc.local
-
-  - name: Add route via /etc/rc.local
-    template: src=templates/etc/rc.local
-      dest=/etc/rc.local
-      mode=0755
-    when: not cloudlab
-    notify:
-    - run /etc/rc.local
-
-  - name: Touch ~/.ssh/config
-    file: path=/var/lib/nova state=directory
-
-  - name: (CloudLab) Set up extra disk space
-    shell: /usr/testbed/bin/mkextrafs /var/lib/nova
-      creates=/var/lib/nova/lost+found
-    when: cloudlab
-
-  handlers:
-  - name: run /etc/rc.local
-    shell: /etc/rc.local
diff --git a/legacy/stanford-compute.yml b/legacy/stanford-compute.yml
deleted file mode 100644
index c342483..0000000
--- a/legacy/stanford-compute.yml
+++ /dev/null
@@ -1,72 +0,0 @@
----
-- hosts: head
-  sudo: no
-  user: ubuntu
-  tasks:
-  - name: Remind user what is going to happen
-    pause: prompt="Install nova-compute on all unused machines managed by Juju"
-
-  - name: Deploy nova-compute
-    script: scripts/juju-compute-setup.py
-
-  - name: Try to avoid race condition
-    pause: seconds=5
-
-  - name: Wait until nova-compute is deployed
-    script: scripts/wait-for-services.sh
-
-  - name: Make sure we're using KVM
-    shell: juju set nova-compute virt-type=kvm
-
-  - name: Add nova-compute relations
-    script: scripts/juju-compute-relations.py
-
-  - name: Try to avoid race condition
-    pause: seconds=5
-
-  - name: Wait until relations are added
-    script: scripts/wait-for-services.sh
-
-# Play: set up ansible-pull for OpenCloud-specific files on nova-compute nodes
-- hosts: compute
-  sudo: yes
-  user: ubuntu
-  vars:
-
-    # schedule is fed directly to cron
-    schedule: '*/15 * * * *'
-
-    # User to run ansible-pull as from cron
-    cron_user: root
-
-    # File that ansible will use for logs
-    logfile: /var/log/ansible-pull.log
-
-    # Directory to where repository will be cloned
-    workdir: /var/lib/ansible/local
-
-    # Repository to check out
-    # repo must contain a local.yml file at top level
-    #repo_url: git://github.com/sfromm/ansible-playbooks.git
-    repo_url: git://github.com/andybavier/opencloud-nova-compute-ansible.git
-
-    # Branch or tag to checkout
-    repo_version: kilo
-
-  tasks:
-
-  - name: Install ansible
-    apt: name=ansible state=installed
-
-  - name: Basic ansible inventory
-    template: src=templates/etc/ansible/hosts.j2
-      dest=/etc/ansible/hosts
-
-  - name: Create local directory to work from
-    file: path={{workdir}} state=directory owner=root group=root mode=0751
-
-  - name: Create crontab entry to clone/pull git repository
-    template: src=templates/etc/cron.d/ansible-pull.j2 dest=/etc/cron.d/ansible-pull owner=root group=root mode=0644
-
-  - name: Create logrotate entry for ansible-pull.log
-    template: src=templates/etc/logrotate.d/ansible-pull.j2 dest=/etc/logrotate.d/ansible-pull owner=root group=root mode=0644
diff --git a/legacy/stanford-hosts b/legacy/stanford-hosts
deleted file mode 100644
index 6a0a986..0000000
--- a/legacy/stanford-hosts
+++ /dev/null
@@ -1,66 +0,0 @@
-head    ansible_ssh_host=node1.stanford.vicci.org
-
-[compute]
-node2.stanford.vicci.org 
-node3.stanford.vicci.org 
-node5.stanford.vicci.org 
-node6.stanford.vicci.org 
-node7.stanford.vicci.org 
-node8.stanford.vicci.org 
-node9.stanford.vicci.org 
-node10.stanford.vicci.org 
-node11.stanford.vicci.org 
-node12.stanford.vicci.org 
-node13.stanford.vicci.org
-node14.stanford.vicci.org 
-node15.stanford.vicci.org 
-node16.stanford.vicci.org
-node17.stanford.vicci.org
-node18.stanford.vicci.org
-node19.stanford.vicci.org
-node20.stanford.vicci.org
-node21.stanford.vicci.org
-node22.stanford.vicci.org
-node23.stanford.vicci.org
-node24.stanford.vicci.org
-node25.stanford.vicci.org
-node26.stanford.vicci.org 
-node27.stanford.vicci.org
-node28.stanford.vicci.org
-node29.stanford.vicci.org
-node30.stanford.vicci.org
-node31.stanford.vicci.org
-node32.stanford.vicci.org
-node33.stanford.vicci.org
-node34.stanford.vicci.org
-node35.stanford.vicci.org
-node37.stanford.vicci.org
-node38.stanford.vicci.org
-node39.stanford.vicci.org
-node40.stanford.vicci.org
-node41.stanford.vicci.org
-node42.stanford.vicci.org
-node43.stanford.vicci.org
-node44.stanford.vicci.org
-node45.stanford.vicci.org
-node46.stanford.vicci.org
-node47.stanford.vicci.org
-node48.stanford.vicci.org
-node49.stanford.vicci.org
-node50.stanford.vicci.org
-node52.stanford.vicci.org 
-node54.stanford.vicci.org
-node55.stanford.vicci.org
-node57.stanford.vicci.org
-node58.stanford.vicci.org
-node59.stanford.vicci.org
-node60.stanford.vicci.org
-node61.stanford.vicci.org
-node62.stanford.vicci.org
-node63.stanford.vicci.org
-node64.stanford.vicci.org
-node67.stanford.vicci.org
-node68.stanford.vicci.org
-node69.stanford.vicci.org
-node70.stanford.vicci.org
-
diff --git a/legacy/stanford-setup.yml b/legacy/stanford-setup.yml
deleted file mode 100644
index c9513b1..0000000
--- a/legacy/stanford-setup.yml
+++ /dev/null
@@ -1,280 +0,0 @@
----
-# Play: set up head node
-# Assumes basic /etc/ansible/hosts file
-- hosts: head
-  user: ubuntu
-  sudo: yes
-  vars:
-    # Each OpenCloud cluster needs a unique mgmt_net_prefix
-    mgmt_net_prefix: 192.168.101
-  tasks:
-
-  - apt: name={{ item }} update_cache=yes
-    with_items:
-    - python-pycurl
-    - software-properties-common
-
-  - name: Add Juju repository
-    apt_repository: repo="ppa:juju/stable"
-
-  - name: Add Ansible repository
-    apt_repository: repo="ppa:ansible/ansible"
-
-  - name: Update Ansible cache
-    apt: update_cache=yes
-
-  - name: Install packages
-    apt: name={{ item }} state=latest
-    with_items:
-    - ansible
-    - uvtool
-    - git
-    - bzr
-    - juju-core
-    - python-novaclient
-    - python-neutronclient
-    - python-keystoneclient
-    - python-glanceclient
-
-  - name: Get juju-ansible git repo
-    git: repo=https://github.com/cmars/juju-ansible.git
-      dest=/usr/local/src/juju-ansible
-
-  - name: Set up juju-ansible symlink
-    file: dest=/usr/local/bin/juju-ansible
-      src=/usr/local/src/juju-ansible/juju-ansible
-      state=link
-
-  - name: Set up juju-ansible-playbook symlink
-    file: dest=/usr/local/bin/juju-ansible-playbook
-      src=/usr/local/src/juju-ansible/juju-ansible
-      state=link
-
-  - name: Generate key to use in VMs
-    user: name={{ ansible_env['SUDO_USER'] }} generate_ssh_key=yes
-
-  - name: (CloudLab) Set up extra disk space
-    shell: /usr/testbed/bin/mkextrafs /var/lib/uvtool/libvirt/images
-      creates=/var/lib/uvtool/libvirt/images/lost+found
-    when: cloudlab
-
-  - name: Add myself to libvirtd group
-    user: name={{ ansible_env['SUDO_USER'] }}
-      groups=libvirtd
-      append=yes
-
-  - name: Get trusty image for uvtool
-    shell: uvt-simplestreams-libvirt sync --source http://cloud-images.ubuntu.com/daily release=trusty arch=amd64
-
-  - name: Change the virbr0 subnet to {{ mgmt_net_prefix }}.0/24
-    template: src=templates/etc/libvirt/qemu/networks/default.xml.j2
-      dest=/etc/libvirt/qemu/networks/default.xml
-    notify:
-    - recreate default net
-
-  handlers:
-  - name: recreate default net
-    script: scripts/recreate-virbr0.sh
-
-# Play: create VMs to host OpenStack services
-- hosts: head
-  user: ubuntu
-  sudo: yes
-  tasks:
-  - name: Create VMs to host OpenCloud services
-    sudo: no
-    script: scripts/create-vms.sh
-
-  - include: tasks/vm-ips.yml
-
-  - name: Add VMs to /etc/hosts
-    template: src=templates/etc/hosts.j2
-      dest=/etc/hosts
-    notify:
-    - Reload dnsmasq
-
-  - name: Set up /etc/ansible/hosts
-    template: src=templates/etc/ansible/hosts.j2
-      dest=/etc/ansible/hosts
-
-  - name: Copy ansible.cfg to disable host key checking
-    sudo: no
-    copy: src=files/ansible.cfg
-      dest={{ ansible_env['PWD'] }}/.ansible.cfg
-
-  - name: Touch ~/.ssh/config
-    sudo: no
-    file: path={{ ansible_env['PWD'] }}/.ssh/config state=touch
-
-  - name: Disable host key checking in SSH
-    sudo: no
-    lineinfile: dest={{ ansible_env['PWD'] }}/.ssh/config
-      line="StrictHostKeyChecking no"
-
-  - name: Test that we can log into every VM
-    sudo: no
-    shell: ansible services -m ping -u ubuntu
-
-  handlers:
-  - name: Reload dnsmasq
-    shell: killall -HUP dnsmasq
-
-# Play: Install services using Juju
-- hosts: head
-  user: ubuntu
-  vars:
-    charm_src: /usr/local/src/charms/trusty
-  tasks:
-  - name: Initialize Juju
-    sudo: no
-    shell: juju generate-config
-      creates={{ ansible_env['PWD'] }}/.juju/environments.yaml
-
-  - shell: uvt-kvm ip juju
-    register: juju_ip
-
-  - name: Juju config file
-    sudo: no
-    template: src=templates/environments.yaml.j2
-      dest={{ ansible_env['PWD'] }}/.juju/environments.yaml
-
-  - name: Bootstrap Juju
-    sudo: no
-    shell: juju bootstrap
-      creates={{ ansible_env['PWD'] }}/.juju/environments/manual.jenv
-
-  - name: Copy openstack.cfg for Juju
-    sudo: yes
-    copy: src=files/openstack.cfg
-      dest=/usr/local/src/openstack.cfg
-
-  - name: Check for /usr/local/src/charms/trusty
-    sudo: yes
-    file: path={{ charm_src }}
-       state=directory
-       mode=0755
-
-  - name: Deploy OpenStack services with Juju
-    script: scripts/juju-setup.py
-
-  - name: Add all Juju relations between services
-    script: scripts/juju-relations.py
-
-  - name: Wait for relations to be fully added
-    script: scripts/wait-for-services.sh
-
-# Play: Use libvirt hooks to set up iptables
-- hosts: head
-  user: ubuntu
-  sudo: yes
-  tasks:
-  - name: Enable port forwarding for services
-    copy: src=files/{{ item }}
-      dest={{ item }}
-      mode=0755
-    notify:
-    - reload libvirt config
-    - run qemu hook
-    with_items:
-    - /etc/libvirt/hooks/daemon
-    - /etc/libvirt/hooks/qemu
-
-  handlers:
-  - name: reload libvirt config
-    shell: killall -HUP libvirtd
-
-  - name: run qemu hook
-    shell: /etc/libvirt/hooks/qemu start start
-
-# Play: Create credentials, set up some basic OpenStack
-- hosts: head
-  user: ubuntu
-  sudo: no
-  tasks:
-
-  - name: Get keystone admin password
-    shell: juju run --unit=keystone/0 "sudo cat /var/lib/keystone/keystone.passwd"
-    register: keystone_password
-
-  - shell: uvt-kvm ip keystone
-    register: keystone_ip
-
-  - name: Create credentials
-    template: src=templates/admin-openrc.sh.j2
-     dest={{ ansible_env['PWD'] }}/admin-openrc.sh
-
-  - name: (CloudLab) Make sure that /root/setup exists
-    file: path=/root/setup state=directory
-    sudo: yes
-    when: cloudlab
-
-  - name: (CloudLab) Copy credentials to /root/setup
-    shell: scp admin-openrc.sh /root/setup
-    sudo: yes
-    when: cloudlab
-
-  - name: Copy credentials to nova-cloud-controller
-    shell: "scp admin-openrc.sh ubuntu@nova-cloud-controller:"
-
-  - name: Copy network setup script
-    sudo: yes
-    copy: src=scripts/network-setup.sh
-      dest=/usr/local/src/network-setup.sh
-      mode=0644
-
-  - name: Run network setup script
-    shell: ansible nova-cloud-controller -m script -u ubuntu -a "/usr/local/src/network-setup.sh"
-
-  - name: Get public key
-    shell: cat {{ ansible_env['PWD'] }}/.ssh/id_rsa.pub
-    register: sshkey
-
-  - name: Copy CA certificate
-    shell: sudo juju scp nova-cloud-controller/0:/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt /usr/local/share/ca-certificates
-      creates=/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt
-
-  - name: Update CA ca-certificates
-    shell: update-ca-certificates
-    sudo: yes
-
-- hosts: compute
-  user: ubuntu
-  sudo: yes
-  vars:
-    control_net: "{{ hostvars['head']['ansible_virbr0']['ipv4']['network'] }}/24"
-    gateway: "{{ hostvars['head']['ansible_default_ipv4']['address'] }}"
-  tasks:
-  - name: Install package needed by Juju
-    apt: name=python-yaml state=present
-
-  - name: Add key
-    authorized_key: user="{{ ansible_env['SUDO_USER'] }}"
-      key="{{ hostvars['head']['sshkey']['stdout'] }}"
-
-  - name: Add route via /etc/rc.local
-    template: src=templates/etc/rc.local.cloudlab
-      dest=/etc/rc.local
-      mode=0755
-    when: cloudlab
-    notify:
-    - run /etc/rc.local
-
-  - name: Add route via /etc/rc.local
-    template: src=templates/etc/rc.local
-      dest=/etc/rc.local
-      mode=0755
-    when: not cloudlab
-    notify:
-    - run /etc/rc.local
-
-  - name: Touch ~/.ssh/config
-    file: path=/var/lib/nova state=directory
-
-  - name: (CloudLab) Set up extra disk space
-    shell: /usr/testbed/bin/mkextrafs /var/lib/nova
-      creates=/var/lib/nova/lost+found
-    when: cloudlab
-
-  handlers:
-  - name: run /etc/rc.local
-    shell: /etc/rc.local
diff --git a/legacy/tasks/vm-ips.yml b/legacy/tasks/vm-ips.yml
deleted file mode 100644
index 8406a7a..0000000
--- a/legacy/tasks/vm-ips.yml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-- shell: uvt-kvm ip juju
-  register: juju_ip
-
-- shell: uvt-kvm ip mysql
-  register: mysql_ip
-
-- shell: uvt-kvm ip rabbitmq-server
-  register: rabbitmq_ip
-
-- shell: uvt-kvm ip keystone
-  register: keystone_ip
-
-- shell: uvt-kvm ip glance
-  register: glance_ip
-
-- shell: uvt-kvm ip nova-cloud-controller
-  register: novacc_ip
-
-- shell: uvt-kvm ip neutron-gateway
-  register: neutron_ip
-
-- shell: uvt-kvm ip openstack-dashboard
-  register: horizon_ip
-
-- shell: uvt-kvm ip nagios
-  register: nagios_ip
-
-- shell: uvt-kvm ip ceilometer
-  register: ceilometer_ip
-
-- shell: uvt-kvm ip neutron-api
-  register: neutron_api_ip
diff --git a/legacy/unc-compute.yml b/legacy/unc-compute.yml
deleted file mode 100644
index c342483..0000000
--- a/legacy/unc-compute.yml
+++ /dev/null
@@ -1,72 +0,0 @@
----
-- hosts: head
-  sudo: no
-  user: ubuntu
-  tasks:
-  - name: Remind user what is going to happen
-    pause: prompt="Install nova-compute on all unused machines managed by Juju"
-
-  - name: Deploy nova-compute
-    script: scripts/juju-compute-setup.py
-
-  - name: Try to avoid race condition
-    pause: seconds=5
-
-  - name: Wait until nova-compute is deployed
-    script: scripts/wait-for-services.sh
-
-  - name: Make sure we're using KVM
-    shell: juju set nova-compute virt-type=kvm
-
-  - name: Add nova-compute relations
-    script: scripts/juju-compute-relations.py
-
-  - name: Try to avoid race condition
-    pause: seconds=5
-
-  - name: Wait until relations are added
-    script: scripts/wait-for-services.sh
-
-# Play: set up ansible-pull for OpenCloud-specific files on nova-compute nodes
-- hosts: compute
-  sudo: yes
-  user: ubuntu
-  vars:
-
-    # schedule is fed directly to cron
-    schedule: '*/15 * * * *'
-
-    # User to run ansible-pull as from cron
-    cron_user: root
-
-    # File that ansible will use for logs
-    logfile: /var/log/ansible-pull.log
-
-    # Directory to where repository will be cloned
-    workdir: /var/lib/ansible/local
-
-    # Repository to check out
-    # repo must contain a local.yml file at top level
-    #repo_url: git://github.com/sfromm/ansible-playbooks.git
-    repo_url: git://github.com/andybavier/opencloud-nova-compute-ansible.git
-
-    # Branch or tag to checkout
-    repo_version: kilo
-
-  tasks:
-
-  - name: Install ansible
-    apt: name=ansible state=installed
-
-  - name: Basic ansible inventory
-    template: src=templates/etc/ansible/hosts.j2
-      dest=/etc/ansible/hosts
-
-  - name: Create local directory to work from
-    file: path={{workdir}} state=directory owner=root group=root mode=0751
-
-  - name: Create crontab entry to clone/pull git repository
-    template: src=templates/etc/cron.d/ansible-pull.j2 dest=/etc/cron.d/ansible-pull owner=root group=root mode=0644
-
-  - name: Create logrotate entry for ansible-pull.log
-    template: src=templates/etc/logrotate.d/ansible-pull.j2 dest=/etc/logrotate.d/ansible-pull owner=root group=root mode=0644
diff --git a/legacy/unc-setup.yml b/legacy/unc-setup.yml
deleted file mode 100644
index 463a6e5..0000000
--- a/legacy/unc-setup.yml
+++ /dev/null
@@ -1,280 +0,0 @@
----
-# Play: set up head node
-# Assumes basic /etc/ansible/hosts file
-- hosts: head
-  user: ubuntu
-  sudo: yes
-  vars:
-    # Each OpenCloud cluster needs a unique mgmt_net_prefix
-    mgmt_net_prefix: 192.168.105
-  tasks:
-
-  - apt: name={{ item }} update_cache=yes
-    with_items:
-    - python-pycurl
-    - software-properties-common
-
-  - name: Add Juju repository
-    apt_repository: repo="ppa:juju/stable"
-
-  - name: Add Ansible repository
-    apt_repository: repo="ppa:ansible/ansible"
-
-  - name: Update Ansible cache
-    apt: update_cache=yes
-
-  - name: Install packages
-    apt: name={{ item }} state=latest
-    with_items:
-    - ansible
-    - uvtool
-    - git
-    - bzr
-    - juju-core
-    - python-novaclient
-    - python-neutronclient
-    - python-keystoneclient
-    - python-glanceclient
-
-  - name: Get juju-ansible git repo
-    git: repo=https://github.com/cmars/juju-ansible.git
-      dest=/usr/local/src/juju-ansible
-
-  - name: Set up juju-ansible symlink
-    file: dest=/usr/local/bin/juju-ansible
-      src=/usr/local/src/juju-ansible/juju-ansible
-      state=link
-
-  - name: Set up juju-ansible-playbook symlink
-    file: dest=/usr/local/bin/juju-ansible-playbook
-      src=/usr/local/src/juju-ansible/juju-ansible
-      state=link
-
-  - name: Generate key to use in VMs
-    user: name={{ ansible_env['SUDO_USER'] }} generate_ssh_key=yes
-
-  - name: (CloudLab) Set up extra disk space
-    shell: /usr/testbed/bin/mkextrafs /var/lib/uvtool/libvirt/images
-      creates=/var/lib/uvtool/libvirt/images/lost+found
-    when: cloudlab
-
-  - name: Add myself to libvirtd group
-    user: name={{ ansible_env['SUDO_USER'] }}
-      groups=libvirtd
-      append=yes
-
-  - name: Get trusty image for uvtool
-    shell: uvt-simplestreams-libvirt sync --source http://cloud-images.ubuntu.com/daily release=trusty arch=amd64
-
-  - name: Change the virbr0 subnet to {{ mgmt_net_prefix }}.0/24
-    template: src=templates/etc/libvirt/qemu/networks/default.xml.j2
-      dest=/etc/libvirt/qemu/networks/default.xml
-    notify:
-    - recreate default net
-
-  handlers:
-  - name: recreate default net
-    script: scripts/recreate-virbr0.sh
-
-# Play: create VMs to host OpenStack services
-- hosts: head
-  user: ubuntu
-  sudo: yes
-  tasks:
-  - name: Create VMs to host OpenCloud services
-    sudo: no
-    script: scripts/create-vms.sh
-
-  - include: tasks/vm-ips.yml
-
-  - name: Add VMs to /etc/hosts
-    template: src=templates/etc/hosts.j2
-      dest=/etc/hosts
-    notify:
-    - Reload dnsmasq
-
-  - name: Set up /etc/ansible/hosts
-    template: src=templates/etc/ansible/hosts.j2
-      dest=/etc/ansible/hosts
-
-  - name: Copy ansible.cfg to disable host key checking
-    sudo: no
-    copy: src=files/ansible.cfg
-      dest={{ ansible_env['PWD'] }}/.ansible.cfg
-
-  - name: Touch ~/.ssh/config
-    sudo: no
-    file: path={{ ansible_env['PWD'] }}/.ssh/config state=touch
-
-  - name: Disable host key checking in SSH
-    sudo: no
-    lineinfile: dest={{ ansible_env['PWD'] }}/.ssh/config
-      line="StrictHostKeyChecking no"
-
-  - name: Test that we can log into every VM
-    sudo: no
-    shell: ansible services -m ping -u ubuntu
-
-  handlers:
-  - name: Reload dnsmasq
-    shell: killall -HUP dnsmasq
-
-# Play: Install services using Juju
-- hosts: head
-  user: ubuntu
-  vars:
-    charm_src: /usr/local/src/charms/trusty
-  tasks:
-  - name: Initialize Juju
-    sudo: no
-    shell: juju generate-config
-      creates={{ ansible_env['PWD'] }}/.juju/environments.yaml
-
-  - shell: uvt-kvm ip juju
-    register: juju_ip
-
-  - name: Juju config file
-    sudo: no
-    template: src=templates/environments.yaml.j2
-      dest={{ ansible_env['PWD'] }}/.juju/environments.yaml
-
-  - name: Bootstrap Juju
-    sudo: no
-    shell: juju bootstrap
-      creates={{ ansible_env['PWD'] }}/.juju/environments/manual.jenv
-
-  - name: Copy openstack.cfg for Juju
-    sudo: yes
-    copy: src=files/openstack.cfg
-      dest=/usr/local/src/openstack.cfg
-
-  - name: Check for /usr/local/src/charms/trusty
-    sudo: yes
-    file: path={{ charm_src }}
-       state=directory
-       mode=0755
-
-  - name: Deploy OpenStack services with Juju
-    script: scripts/juju-setup.py
-
-  - name: Add all Juju relations between services
-    script: scripts/juju-relations.py
-
-  - name: Wait for relations to be fully added
-    script: scripts/wait-for-services.sh
-
-# Play: Use libvirt hooks to set up iptables
-- hosts: head
-  user: ubuntu
-  sudo: yes
-  tasks:
-  - name: Enable port forwarding for services
-    copy: src=files/{{ item }}
-      dest={{ item }}
-      mode=0755
-    notify:
-    - reload libvirt config
-    - run qemu hook
-    with_items:
-    - /etc/libvirt/hooks/daemon
-    - /etc/libvirt/hooks/qemu
-
-  handlers:
-  - name: reload libvirt config
-    shell: killall -HUP libvirtd
-
-  - name: run qemu hook
-    shell: /etc/libvirt/hooks/qemu start start
-
-# Play: Create credentials, set up some basic OpenStack
-- hosts: head
-  user: ubuntu
-  sudo: no
-  tasks:
-
-  - name: Get keystone admin password
-    shell: juju run --unit=keystone/0 "sudo cat /var/lib/keystone/keystone.passwd"
-    register: keystone_password
-
-  - shell: uvt-kvm ip keystone
-    register: keystone_ip
-
-  - name: Create credentials
-    template: src=templates/admin-openrc.sh.j2
-     dest={{ ansible_env['PWD'] }}/admin-openrc.sh
-
-  - name: (CloudLab) Make sure that /root/setup exists
-    file: path=/root/setup state=directory
-    sudo: yes
-    when: cloudlab
-
-  - name: (CloudLab) Copy credentials to /root/setup
-    shell: scp admin-openrc.sh /root/setup
-    sudo: yes
-    when: cloudlab
-
-  - name: Copy credentials to nova-cloud-controller
-    shell: "scp admin-openrc.sh ubuntu@nova-cloud-controller:"
-
-  - name: Copy network setup script
-    sudo: yes
-    copy: src=scripts/network-setup.sh
-      dest=/usr/local/src/network-setup.sh
-      mode=0755
-
-  - name: Run network setup script
-    shell: ansible nova-cloud-controller -m script -u ubuntu -a "/usr/local/src/network-setup.sh"
-
-  - name: Get public key
-    shell: cat {{ ansible_env['PWD'] }}/.ssh/id_rsa.pub
-    register: sshkey
-
-  - name: Copy CA certificate
-    shell: sudo juju scp nova-cloud-controller/0:/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt /usr/local/share/ca-certificates
-      creates=/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt
-
-  - name: Update CA ca-certificates
-    shell: update-ca-certificates
-    sudo: yes
-
-- hosts: compute
-  user: ubuntu
-  sudo: yes
-  vars:
-    control_net: "{{ hostvars['head']['ansible_virbr0']['ipv4']['network'] }}/24"
-    gateway: "{{ hostvars['head']['ansible_default_ipv4']['address'] }}"
-  tasks:
-  - name: Install package needed by Juju
-    apt: name=python-yaml state=present
-
-  - name: Add key
-    authorized_key: user="{{ ansible_env['SUDO_USER'] }}"
-      key="{{ hostvars['head']['sshkey']['stdout'] }}"
-
-  - name: Add route via /etc/rc.local
-    template: src=templates/etc/rc.local.cloudlab
-      dest=/etc/rc.local
-      mode=0755
-    when: cloudlab
-    notify:
-    - run /etc/rc.local
-
-  - name: Add route via /etc/rc.local
-    template: src=templates/etc/rc.local
-      dest=/etc/rc.local
-      mode=0755
-    when: not cloudlab
-    notify:
-    - run /etc/rc.local
-
-  - name: Touch ~/.ssh/config
-    file: path=/var/lib/nova state=directory
-
-  - name: (CloudLab) Set up extra disk space
-    shell: /usr/testbed/bin/mkextrafs /var/lib/nova
-      creates=/var/lib/nova/lost+found
-    when: cloudlab
-
-  handlers:
-  - name: run /etc/rc.local
-    shell: /etc/rc.local
diff --git a/roles/common-prep/defaults/main.yml b/roles/common-prep/defaults/main.yml
new file mode 100644
index 0000000..0bcd73c
--- /dev/null
+++ b/roles/common-prep/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+
+run_dist_upgrade: true
diff --git a/roles/common-prep/tasks/main.yml b/roles/common-prep/tasks/main.yml
index e2368e9..39bced7 100644
--- a/roles/common-prep/tasks/main.yml
+++ b/roles/common-prep/tasks/main.yml
@@ -8,14 +8,12 @@
     update_cache: yes
     cache_valid_time: 3600
 
-- name: Determine if a reboot if required
+- name: Reboot if required after dist_upgrade
   when: run_dist_upgrade
-  stat: path=/var/run/reboot-required
-  register: reboot-required
-
-- name: Perform a reboot if required
-  when: run_dist_upgrade and reboot-required.exists is defined
-  debug: msg="System will reboot"
+  stat:
+    path: /var/run/reboot-required
+  register: reboot_required
+  changed_when: reboot_required.stat.exists
   notify:
     - restart host
     - wait for host
diff --git a/roles/compute-diag/tasks/main.yml b/roles/compute-diag/tasks/main.yml
index 1f25508..1bbf755 100644
--- a/roles/compute-diag/tasks/main.yml
+++ b/roles/compute-diag/tasks/main.yml
@@ -1,13 +1,16 @@
 ---
 # compute-diag/tasks/main.yml
-#
+
 - name: Create diag_dir subdir
   file:
-    dest="/tmp/{{ hostvars[groups['head'][0]]['diag_dir'] }}/{{ inventory_hostname }}"
-    state=directory
+    dest: "/tmp/{{ hostvars[groups['head'][0]]['diag_dir'] }}/{{ inventory_hostname }}"
+    state: directory
+  register: compute_diag_dir
 
 - name: Compute node diag collection
   shell: "{{ item }} > /tmp/{{ hostvars[groups['head'][0]]['diag_dir'] }}/{{ inventory_hostname }}/{{ item | regex_replace('[^\\w-]', '_')}}"
+  args:
+    creates: "compute_diag_dir.stat.path/{{ item | regex_replace('[^\\w-]', '_')}}"
   with_items:
    - "date"
    - "arp -n"
diff --git a/roles/config-virt/tasks/main.yml b/roles/config-virt/tasks/main.yml
index 89c0c7d..8937824 100644
--- a/roles/config-virt/tasks/main.yml
+++ b/roles/config-virt/tasks/main.yml
@@ -1,9 +1,18 @@
 ---
 # roles/config-virt/tasks/main.yml
 
+- name: Check to see if we already have a uvtool image
+  find:
+    path: "/var/lib/uvtool/libvirt/images"
+    patterns: "x-uvt-b64-*"
+  register: uvtool_image
+
 - name: Get ubuntu image for uvtool
+  when: "{{ uvtool_image.matched < 1 }}"
   command: uvt-simplestreams-libvirt sync --source http://cloud-images.ubuntu.com/daily \
     release={{ ansible_distribution_release }} arch=amd64
+  args:
+    creates: "/var/lib/uvtool/libvirt/images/x-uvt-b64-*"
   async: 1200
   poll: 0
   register: uvt_sync
@@ -50,6 +59,7 @@
   with_items: '{{ virt_nets }}'
 
 - name: Wait for uvt-kvm image to be available
+  when: "{{ uvtool_image.matched < 1 }}"
   async_status: jid={{ uvt_sync.ansible_job_id }}
   register: uvt_sync_result
   until: uvt_sync_result.finished
diff --git a/roles/create-vms/tasks/main.yml b/roles/create-vms/tasks/main.yml
index 266bad8..77244f0 100644
--- a/roles/create-vms/tasks/main.yml
+++ b/roles/create-vms/tasks/main.yml
@@ -2,9 +2,10 @@
 # file: create-vms/tasks/main.yml
 
 - name: create Virtual Machines with uvt-kvm
-  shell: uvt-kvm create {{ item.name }} release={{ ansible_distribution_release }} \
+  command: uvt-kvm create {{ item.name }} release={{ ansible_distribution_release }} \
     --cpu={{ item.cpu }} --memory={{ item.memMB }} --disk={{ item.diskGB }} --bridge="mgmtbr"
-    creates=/var/lib/uvtool/libvirt/images/{{ item.name }}.qcow
+  args:
+    creates: "/var/lib/uvtool/libvirt/images/{{ item.name }}.qcow"
   with_items: "{{ head_vm_list }}"
 
 - name: Have VMs autostart on reboot
@@ -51,16 +52,24 @@
 
 - name: Verify that we can log into every VM
   command: ansible services -m ping -u ubuntu
+  tags:
+    - skip_ansible_lint # connectivity check
 
 - name: Have VM's use the apt-cache
   command: ansible services -b -u ubuntu -m lineinfile -a "dest=/etc/apt/apt.conf.d/02apt-cacher-ng create=yes mode=0644 owner=root group=root regexp='^Acquire' line='Acquire::http { Proxy \"http://{{ apt_cacher_name }}:{{ apt_cacher_port | default('3142') }}\"; };'"
+  tags:
+    - skip_ansible_lint # running a sub job
 
 - name: Update apt cache
   command: ansible services -m apt -b -u ubuntu -a "update_cache=yes cache_valid_time=3600"
+  tags:
+    - skip_ansible_lint # running a sub job
 
 - name: Update software in all the VMs
   when: run_dist_upgrade
   command: ansible services -m apt -b -u ubuntu -a "upgrade=dist"
+  tags:
+    - skip_ansible_lint # running a sub job
 
 - name: Create VM's eth0 interface config file for DNS config via resolvconf program
   when: not on_maas
@@ -84,6 +93,8 @@
 - name: Enable root ssh login on VM's that require it
   command: ansible {{ item.name }} -b -u ubuntu -m authorized_key -a "user='root' key='{{ sshkey.stdout }}'"
   with_items: "{{ head_vm_list | selectattr('root_ssh_login', 'defined') | list }}"
+  tags:
+    - skip_ansible_lint # FIXME, ssh key mangling
 
 - name: Copy over docker installation playbook and docker apt-key
   copy:
@@ -95,4 +106,7 @@
 
 - name: Install docker in VM's that require it
   command: ansible-playbook "{{ ansible_user_dir }}/docker-install-playbook.yml"
+  tags:
+    - skip_ansible_lint # running a sub job
+
 
diff --git a/roles/dell-virt/tasks/main.yml b/roles/dell-virt/tasks/main.yml
index cfc60a6..1d153f3 100644
--- a/roles/dell-virt/tasks/main.yml
+++ b/roles/dell-virt/tasks/main.yml
@@ -21,6 +21,8 @@
   shell: lsmod | grep kvm_
   ignore_errors: true
   register: virtualization_enabled
+  tags:
+    - skip_ansible_lint # just used to register result
 
 - name: Enable virtualization in BIOS
   command: /opt/dell/toolkit/bin/syscfg --virtualization=enable
diff --git a/roles/dns-configure/tasks/main.yml b/roles/dns-configure/tasks/main.yml
index 1481bbe..07b0d5d 100644
--- a/roles/dns-configure/tasks/main.yml
+++ b/roles/dns-configure/tasks/main.yml
@@ -10,4 +10,6 @@
 - name: Check that VM's can be found in DNS
   shell: "dig +short {{ item.name }}.{{ site_suffix }} | grep {{ item.ipv4_last_octet }}"
   with_items: "{{ head_vm_list }}"
+  tags:
+   - skip_ansible_lint # purely a way to pass/fail config done so far. Ansible needs a "dns_query" module
 
diff --git a/roles/head-diag/tasks/main.yml b/roles/head-diag/tasks/main.yml
index 1dd4a9b..8a8c750 100644
--- a/roles/head-diag/tasks/main.yml
+++ b/roles/head-diag/tasks/main.yml
@@ -14,6 +14,8 @@
 
 - name: Head node diag collection
   shell: "{{ item }} > ~/{{ diag_dir }}/head/{{ item | regex_replace('[^\\w-]', '_')}}"
+  args:
+    creates: "~/{{ diag_dir }}/head/{{ item | regex_replace('[^\\w-]', '_')}}"
   with_items:
    - "ifconfig -a"
    - "route -n"
@@ -26,6 +28,8 @@
 
 - name: Juju diag collection
   shell: "{{ item }} > ~/{{ diag_dir }}/juju/{{ item | regex_replace('[^\\w-]', '_')}}"
+  args:
+    creates: "~/{{ diag_dir }}/juju/{{ item | regex_replace('[^\\w-]', '_')}}"
   with_items:
    - "juju status --format=summary"
    - "juju status --format=json"
@@ -34,6 +38,7 @@
   shell: "source ~/admin-openrc.sh && {{ item }} > ~/{{ diag_dir }}/openstack/{{ item | regex_replace('[^\\w-]', '_')}}"
   args:
     executable: "/bin/bash"
+    creates: "~/{{ diag_dir }}/openstack/{{ item | regex_replace('[^\\w-]', '_')}}"
   with_items:
    - "glance image-list"
    - "nova list --all-tenants"
@@ -46,11 +51,16 @@
 
 - name: ONOS diag collection - REST API
   shell: "curl -X GET -u karaf:karaf http://onos-cord-1:8181/onos/v1/{{ item }} | python -m json.tool > ~/{{ diag_dir }}/onos/rest_{{ item | regex_replace('[^\\w-]', '_') }}"
+  args:
+    creates: "~/{{ diag_dir }}/onos/rest_{{ item | regex_replace('[^\\w-]', '_')}}"
+    warn: False # get_url or uri can't easily redirect to a file
   with_items:
    - "hosts"
 
 - name: ONOS diag collection - ONOS CLI
   shell: "sshpass -p 'karaf' ssh -p 8101 karaf@onos-cord {{ item }} > ~/{{ diag_dir }}/onos/{{ item | regex_replace('[^\\w-]', '_') }}"
+  args:
+    creates: "~/{{ diag_dir }}/onos/{{ item | regex_replace('[^\\w-]', '_')}}"
   with_items:
    - "apps -s -a"
    - "bundle:list"
@@ -66,6 +76,8 @@
 
 - name: XOS diag collection
   shell: "ssh ubuntu@xos-1 \"{{ item }}\" > ~/{{ diag_dir }}/xos/{{ item | regex_replace('[^\\w-]', '_')}}"
+  args:
+    creates: "~/{{ diag_dir }}/xos/{{ item | regex_replace('[^\\w-]', '_')}}"
   with_items:
    - "docker ps"
    - "arp -n"
@@ -73,6 +85,8 @@
 
 - name: Copy/run/retrieve XOS docker logs
   command: "{{ item }}"
+  tags:
+   - skip_ansible_lint # don't know the name of docker containers for all configurations
   with_items:
    - "scp {{ role_path }}/files/docker_logs.sh ubuntu@xos-1:~/docker_logs.sh"
    - "ssh ubuntu@xos-1 'bash ~/docker_logs.sh'"
diff --git a/roles/head-prep/defaults/main.yml b/roles/head-prep/defaults/main.yml
new file mode 100644
index 0000000..8e379dd
--- /dev/null
+++ b/roles/head-prep/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+
+on_maas: false
+
diff --git a/roles/head-prep/tasks/main.yml b/roles/head-prep/tasks/main.yml
index b2b300e..0fd1a7b 100644
--- a/roles/head-prep/tasks/main.yml
+++ b/roles/head-prep/tasks/main.yml
@@ -45,6 +45,8 @@
 - name: Register public key in variable
   shell: cat {{ ansible_user_dir }}/.ssh/id_rsa.pub
   register: sshkey
+  tags:
+    - skip_ansible_lint # FIXME: this should be done a different way
 
 - name: Add public key to this user account
   authorized_key:
@@ -64,19 +66,19 @@
     dest={{ ansible_user_dir }}/.ansible.cfg
     owner={{ ansible_user_id }} mode=0644
 
-- name: Copy node key (MaaS)
-  when: on_maas
+- name: Copy node key
+  when: not on_maas
   copy:
-    src={{ maas_node_key }}
+    src={{ ansible_user_dir }}/.ssh/id_rsa
     dest={{ ansible_user_dir }}/node_key
     owner={{ ansible_user }}
     mode=0600
     remote_src=True
 
-- name: Copy node key (without MaaS)
-  when: not on_maas
+- name: Copy node key (MaaS)
+  when: on_maas
   copy:
-    src={{ ansible_user_dir }}/.ssh/id_rsa
+    src={{ maas_node_key }}
     dest={{ ansible_user_dir }}/node_key
     owner={{ ansible_user }}
     mode=0600
diff --git a/roles/juju-compute-setup/defaults/main.yml b/roles/juju-compute-setup/defaults/main.yml
index 2c0208b..f6cabc4 100644
--- a/roles/juju-compute-setup/defaults/main.yml
+++ b/roles/juju-compute-setup/defaults/main.yml
@@ -6,4 +6,4 @@
 
 juju_config_path: /usr/local/src/juju_config.yml
 charm_versions: {}
-  
+
diff --git a/roles/juju-compute-setup/tasks/main.yml b/roles/juju-compute-setup/tasks/main.yml
index 0222775..7473a06 100644
--- a/roles/juju-compute-setup/tasks/main.yml
+++ b/roles/juju-compute-setup/tasks/main.yml
@@ -16,6 +16,7 @@
 # iptables -A FORWARD -i mgmtbr -o eth0 -s <vmnet> -d <extnet> -j ACCEPT
 
 - name: Add machines to Juju
+  when: "{{ groups['compute'] | difference( juju_machines.keys() ) | length }}"
   command: "juju add-machine ssh:{{ item }}"
   with_items: "{{ groups['compute'] | difference( juju_machines.keys() ) }}"
 
@@ -24,10 +25,10 @@
   juju_facts:
 
 - name: Deploy nova-compute service if needed
-  command: "juju deploy {{ charm_versions[item] | default(item) }} --to {{ juju_machines[groups['compute'][0]]['machine_id'] }} --config={{ juju_config_path }}"
-  with_items: 
-    - "nova-compute"
   when: '"nova-compute" not in juju_services.keys()'
+  command: "juju deploy {{ charm_versions[item] | default(item) }} --to {{ juju_machines[groups['compute'][0]]['machine_id'] }} --config={{ juju_config_path }}"
+  with_items:
+    - "nova-compute"
 
 - name: Create relations between nova-compute and other services if needed
   command: "juju add-relation '{{ item.0.name }}' '{{ item.1 }}'"
@@ -36,15 +37,19 @@
   with_subelements:
     - "{{ compute_relations }}"
     - relations
+  tags:
+   - skip_ansible_lint # benign to do this more than once, hard to check for
 
 # run another time
 - name: Obtain Juju Facts after deploying nova-compute
-  juju_facts:
   when: '"nova-compute" not in juju_services.keys()'
+  juju_facts:
 
 - name: Add more nova-compute units
   command: "juju add-unit nova-compute --to {{ juju_machines[item]['machine_id'] }}"
   with_items: "{{ groups['compute'] | difference( juju_compute_nodes.keys() ) }}"
+  tags:
+   - skip_ansible_lint # benign to do this more than once, hard to check for
 
 - name: Pause to let Juju settle
   pause:
@@ -66,3 +71,6 @@
   retries: 5
   delay: 5
   with_items: "{{ groups['compute'] }}"
+  tags:
+   - skip_ansible_lint # this really should be the os_server module, but ansible doesn't know about juju created openstack
+
diff --git a/roles/juju-setup/tasks/main.yml b/roles/juju-setup/tasks/main.yml
index 6bd790d..2646ec0 100644
--- a/roles/juju-setup/tasks/main.yml
+++ b/roles/juju-setup/tasks/main.yml
@@ -34,6 +34,7 @@
 # list of active juju_services names: juju_services.keys()
 
 - name: Add machines to Juju
+  when: "{{ head_vm_list | map(attribute='service') | list | reject('undefined') | map('format_string', '%s.'~site_suffix ) | difference( juju_machines.keys() ) | length }}"
   command: "juju add-machine ssh:{{ item }}"
   with_items: "{{ head_vm_list | map(attribute='service') | list | reject('undefined') | map('format_string', '%s.'~site_suffix ) | difference( juju_machines.keys() ) }}"
 
@@ -42,14 +43,16 @@
   juju_facts:
 
 - name: Deploy services that are hosted in their own VM
+  when: "{{ vm_service_list | difference( juju_services.keys() ) | length }}"
   command: "juju deploy {{ charm_versions[item] | default(item) }} --to {{ juju_machines[item~'.'~site_suffix]['machine_id'] }} --config={{ juju_config_path }}"
   with_items: "{{ vm_service_list | difference( juju_services.keys() ) }}"
 
 - name: Deploy mongodb to ceilometer VM
-  command: "juju deploy {{ charm_versions['mongodb'] | default('mongodb') }} --to {{ juju_machines['ceilometer.'~site_suffix]['machine_id'] }} --config={{ juju_config_path }}"
   when: juju_services['mongodb'] is undefined
+  command: "juju deploy {{ charm_versions['mongodb'] | default('mongodb') }} --to {{ juju_machines['ceilometer.'~site_suffix]['machine_id'] }} --config={{ juju_config_path }}"
 
 - name: Deploy services that don't have their own VM
+  when: "{{ standalone_service_list | difference( juju_services.keys() ) | length }}"
   command: "juju deploy {{ charm_versions[item] | default(item) }} --config={{ juju_config_path }}"
   with_items: "{{ standalone_service_list | difference( juju_services.keys() ) }}"
 
@@ -60,6 +63,8 @@
   with_subelements:
     - "{{ service_relations }}"
     - relations
+  tags:
+   - skip_ansible_lint # benign to do this more than once, hard to check for
 
 # run another time, so services will be in juju_services list
 - name: Obtain Juju Facts after service creation
@@ -77,36 +82,46 @@
 # secondary wait, as waiting on ports isn't enough. Probably only need one of these...
 # 160*15s = 2400s = 40m max wait
 - name: Wait for juju services to start
-  action: command juju status --format=summary
+  command: juju status --format=summary
   register: juju_summary
   until: juju_summary.stdout.find("pending:") == -1
   retries: 160
   delay: 15
+  tags:
+   - skip_ansible_lint # checking/waiting on a system to be up
 
 - name: Create admin-openrc.sh credentials file
   template:
    src=admin-openrc.sh.j2
    dest={{ ansible_user_dir }}/admin-openrc.sh
 
-
 - name: Copy nova-cloud-controller CA certificate to head
   command: juju scp {{ juju_services['nova-cloud-controller']['units'].keys()[0] }}:/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt {{ ansible_user_dir }}
   register: result
   until: result | success
   retries: 40
   delay: 15
+  tags:
+   - skip_ansible_lint # checking/waiting on file availibilty
 
 - name: Copy cert to system location
   become: yes
-  command: cp {{ ansible_user_dir }}/keystone_juju_ca_cert.crt /usr/local/share/ca-certificates
+  copy:
+    src: "{{ ansible_user_dir }}/keystone_juju_ca_cert.crt"
+    dest: "/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt"
+    remote_src: true
+  register: copied_cert
 
 - name: update-ca-certificates
+  when: copied_cert.changed
   become: yes
   command: update-ca-certificates
 
 - name: Move cert to all service VM's
+  when: copied_cert.changed
   command: ansible services -b -u ubuntu -m copy -a "src={{ ansible_user_dir }}/keystone_juju_ca_cert.crt dest=/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt owner=root group=root mode=0644"
 
 - name: update-ca-certificates in service VM's
+  when: copied_cert.changed
   command: ansible services -b -u ubuntu -m command -a "update-ca-certificates"
 
diff --git a/roles/onos-load-apps/tasks/main.yml b/roles/onos-load-apps/tasks/main.yml
index 32a8a21..515c3c4 100644
--- a/roles/onos-load-apps/tasks/main.yml
+++ b/roles/onos-load-apps/tasks/main.yml
@@ -4,6 +4,8 @@
 - name: Disable loading of CORD apps from Maven repo
   command: ansible xos-1 -u ubuntu -m lineinfile \
     -a "dest=~/service-profile/{{ xos_configuration }}/make-vtn-external-yaml.sh state=absent regexp='install_dependencies'"
+  tags:
+    - skip_ansible_lint # running a sub-job
 
 - name: Create directory for CORD apps and load script
   file:
@@ -26,6 +28,11 @@
   with_items:
    - onos-app
 
+- name: Wait for ONOS to be ready
+  wait_for:
+    host: "{{ onos_cord_vm_hostname }}"
+    port: 8181
+
 - name: Download CORD apps from maven repo
   maven_artifact:
     repository_url: "{{ cord_apps_repo_url }}"
@@ -35,12 +42,10 @@
     extension: "oar"
     dest: "{{ ansible_user_dir }}/cord_apps/{{ item.name }}.oar"
   with_items: "{{ cord_apps }}"
+  register: maven_artifact_dl
 
-- name: Wait for ONOS to be ready
-  wait_for:
-    host: "{{ onos_cord_vm_hostname }}"
-    port: 8181
-
+# assumes no interruption between this and previous steps...
 - name: Install CORD apps
+  when: maven_artifact_dl.changed
   command: "{{ ansible_user_dir }}/cord_apps/cord_app_loader.sh"
 
diff --git a/roles/onos-vm-install/files/onos-setup-playbook.yml b/roles/onos-vm-install/files/onos-setup-playbook.yml
index fe33054..56bf06e 100644
--- a/roles/onos-vm-install/files/onos-setup-playbook.yml
+++ b/roles/onos-vm-install/files/onos-setup-playbook.yml
@@ -12,10 +12,11 @@
         path: "{{ ansible_user_dir }}/cord"
         state: directory
 
-# Should replace with http://docs.ansible.com/ansible/docker_module.html, when replacements are stable
     - name: Pull docker image for ONOS
       become: yes
       command: "docker pull {{ onos_docker_image }}"
+      tags:
+        - skip_ansible_lint # Should replace with http://docs.ansible.com/ansible/docker_module.html, when replacements are stable
 
 # Setup specific for onos-cord VM
 - hosts: onos-cord-1
diff --git a/roles/onos-vm-install/tasks/main.yml b/roles/onos-vm-install/tasks/main.yml
index bd3d073..1f2eedc 100644
--- a/roles/onos-vm-install/tasks/main.yml
+++ b/roles/onos-vm-install/tasks/main.yml
@@ -26,4 +26,6 @@
   async: 1800
   poll: 0
   register: onos_setup_playbook
+  tags:
+    - skip_ansible_lint # running a sub-job
 
diff --git a/roles/prereqs-common/tasks/main.yml b/roles/prereqs-common/tasks/main.yml
index aecde2c..642a1b4 100644
--- a/roles/prereqs-common/tasks/main.yml
+++ b/roles/prereqs-common/tasks/main.yml
@@ -23,6 +23,10 @@
   until: dns_lookup_check_result.rc == 0
   retries: 3
   delay: 1
+  tags:
+   - skip_ansible_lint # tried assert + dig (below), but it fails quickly and won't loop
+  #  assert:
+  #    that: "{{ lookup('dig', dns_check_domain ) == dns_check_ipv4 }}"
 
 - name: DNS Global Root Connectivity Check
   shell: "dig @{{ item }} +trace +short {{ dns_check_domain }} | grep {{ dns_check_ipv4 }}"
@@ -31,6 +35,8 @@
   until: dns_global_check_result.rc == 0
   retries: 3
   delay: 1
+  tags:
+   - skip_ansible_lint # too complex for lookup('dig', ...) to handle
 
 - name: HTTP Download Check
   get_url:
diff --git a/roles/simulate-fabric/files/simulate-fabric-playbook.yml b/roles/simulate-fabric/files/simulate-fabric-playbook.yml
index da1e876..ad326e1 100644
--- a/roles/simulate-fabric/files/simulate-fabric-playbook.yml
+++ b/roles/simulate-fabric/files/simulate-fabric-playbook.yml
@@ -67,6 +67,8 @@
       command: "iptables -t nat -C POSTROUTING -s 10.168.0.0/16 ! -d 10.168.0.0/16 -j MASQUERADE"
       register: iptables_check
       failed_when: "iptables_check|failed and 'No chain/target/match by that name' not in iptables_check.stderr"
+      tags:
+        - skip_ansible_lint # FIXME: should use iptables module when it supports inversion of ranges
 
     - name: Create iptables rule
       when: "iptables_check.rc != 0"
diff --git a/roles/simulate-fabric/tasks/main.yml b/roles/simulate-fabric/tasks/main.yml
index c9e834b..3f919ff 100644
--- a/roles/simulate-fabric/tasks/main.yml
+++ b/roles/simulate-fabric/tasks/main.yml
@@ -13,4 +13,6 @@
 
 - name: Setup simulated fabric on nova-compute-1 using playbook
   command: ansible-playbook {{ ansible_user_dir }}/simulate-fabric-playbook.yml
+  tags:
+    - skip_ansible_lint # running a sub-job
 
diff --git a/roles/test-client-install/files/test-client-playbook.yml b/roles/test-client-install/files/test-client-playbook.yml
index 7526cf4..c802a83 100644
--- a/roles/test-client-install/files/test-client-playbook.yml
+++ b/roles/test-client-install/files/test-client-playbook.yml
@@ -27,10 +27,14 @@
     - name: Create testclient
       become: yes
       shell: lxc-ls | grep testclient || lxc-create -t ubuntu -n testclient
+      tags:
+        - skip_ansible_lint # FIXME: should used lxc_container module
 
     - name: Start testclient
       become: yes
       shell: lxc-info -n testclient -s | grep RUNNING || lxc-start -n testclient
+      tags:
+        - skip_ansible_lint # FIXME: should used lxc_container module
 
     - name: Set up networking inside the testclient for testing sample CORD subscriber
       become: yes
@@ -40,3 +44,5 @@
       - "lxc-attach -n testclient -- bash -c 'ip link show eth0.222.111 || ip link add link eth0.222 name eth0.222.111 type vlan id 111'"
       - "lxc-attach -n testclient -- ifconfig eth0.222 up"
       - "lxc-attach -n testclient -- ifconfig eth0.222.111 up"
+      tags:
+        - skip_ansible_lint # non-trivial use case
diff --git a/roles/test-client-install/tasks/main.yml b/roles/test-client-install/tasks/main.yml
index d10512d..fdf4eaf 100644
--- a/roles/test-client-install/tasks/main.yml
+++ b/roles/test-client-install/tasks/main.yml
@@ -11,4 +11,6 @@
   async: 3600
   poll: 0
   register: test_client_playbook
+  tags:
+    - skip_ansible_lint # running a sub-job
 
diff --git a/roles/test-exampleservice/tasks/main.yml b/roles/test-exampleservice/tasks/main.yml
index 2ae3813..92bf70b 100644
--- a/roles/test-exampleservice/tasks/main.yml
+++ b/roles/test-exampleservice/tasks/main.yml
@@ -6,6 +6,8 @@
 - name: Onboard ExampleService and instantiate a VM
   command: ansible xos-1 -u ubuntu -m shell \
     -a "cd ~/service-profile/cord-pod; make exampleservice"
+  tags:
+    - skip_ansible_lint # running a sub job
 
 - name: Pause 60 seconds (work around bug in synchronizer)
   pause: seconds=60
@@ -13,6 +15,8 @@
 - name: Re-run 'make vtn' (work around bug in synchronizer)
   command: ansible xos-1 -u ubuntu -m shell \
     -a "cd ~/service-profile/cord-pod; make vtn"
+  tags:
+    - skip_ansible_lint # running a sub job
 
 - name: Wait for ExampleService VM to come up
   shell: bash -c "source ~/admin-openrc.sh; nova list --all-tenants|grep 'exampleservice.*ACTIVE' > /dev/null"
@@ -20,18 +24,26 @@
   until: result | success
   retries: 10
   delay: 60
+  tags:
+    - skip_ansible_lint # running a sub job
 
 - name: Get ID of VM
   shell: bash -c "source ~/admin-openrc.sh; nova list --all-tenants|grep mysite_exampleservice|cut -d '|' -f 2"
   register: nova_id
+  tags:
+    - skip_ansible_lint # running a sub job
 
 - name: Get mgmt IP of VM
   shell: bash -c "source ~/admin-openrc.sh; nova interface-list {{ nova_id.stdout }}|grep -o -m 1 172.27.[[:digit:]]*.[[:digit:]]*"
   register: mgmt_ip
+  tags:
+    - skip_ansible_lint # running a sub job
 
 - name: Get public IP of VM
   shell: bash -c "source ~/admin-openrc.sh; nova interface-list {{ nova_id.stdout }}|grep -o -m 1 10.168.[[:digit:]]*.[[:digit:]]*"
   register: public_ip
+  tags:
+    - skip_ansible_lint # running a sub job
 
 - name: Wait for Apache to come up inside VM
   shell: ssh -o ProxyCommand="ssh -W %h:%p ubuntu@nova-compute-1" ubuntu@{{ mgmt_ip.stdout }} "ls /var/run/apache2/apache2.pid" > /dev/null
@@ -39,15 +51,21 @@
   until: result | success
   retries: 20
   delay: 60
+  tags:
+    - skip_ansible_lint # running a sub job
 
 - name: Install curl in testclient
   command: ansible nova-compute-1 -u ubuntu -m shell \
     -s -a "lxc-attach -n testclient -- apt-get -y install curl"
+  tags:
+    - skip_ansible_lint # running a sub job
 
 - name: Test connectivity to ExampleService from test client
   command: ansible nova-compute-1 -u ubuntu -m shell \
     -s -a "lxc-attach -n testclient -- curl -s http://{{ public_ip.stdout }}"
   register: curltest
+  tags:
+    - skip_ansible_lint # running a sub job
 
 - name: Output from curl test
   debug: var=curltest.stdout_lines
diff --git a/roles/test-prep/tasks/main.yml b/roles/test-prep/tasks/main.yml
deleted file mode 100644
index 1ebf604..0000000
--- a/roles/test-prep/tasks/main.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# roles/test-prep/tasks/main.yml
-
-- name: Add local resolver to /etc/resolv.conf
-  lineinfile:
-    dest=/etc/resolv.conf
-    insertafter=".*DO NOT EDIT THIS FILE.*" 
-    line="nameserver 192.168.122.1"
-
diff --git a/roles/test-vsg/tasks/main.yml b/roles/test-vsg/tasks/main.yml
index 14ed325..eaad0a5 100644
--- a/roles/test-vsg/tasks/main.yml
+++ b/roles/test-vsg/tasks/main.yml
@@ -6,6 +6,8 @@
 - name: Create a sample CORD subscriber
   command: ansible xos-1 -u ubuntu -m shell \
     -a "cd ~/service-profile/cord-pod; make cord-subscriber"
+  tags:
+    - skip_ansible_lint # running a sub job
 
 - name: Pause 60 seconds (work around bug in synchronizer)
   pause: seconds=60
@@ -13,6 +15,8 @@
 - name: Re-run 'make vtn' (work around bug in synchronizer)
   command: ansible xos-1 -u ubuntu -m shell \
     -a "cd ~/service-profile/cord-pod; make vtn"
+  tags:
+    - skip_ansible_lint # running a sub job
 
 - name: Wait for vSG VM to come up
   shell: bash -c "source ~/admin-openrc.sh; nova list --all-tenants|grep 'vsg.*ACTIVE' > /dev/null"
@@ -20,14 +24,20 @@
   until: result | success
   retries: 10
   delay: 60
+  tags:
+    - skip_ansible_lint # running a sub job
 
 - name: Get ID of VM
   shell: bash -c "source ~/admin-openrc.sh; nova list --all-tenants|grep mysite_vsg|cut -d '|' -f 2"
   register: nova_id
+  tags:
+    - skip_ansible_lint # running a sub job
 
 - name: Get mgmt IP of VM
   shell: bash -c "source ~/admin-openrc.sh; nova interface-list {{ nova_id.stdout }}|grep -o -m 1 172.27.[[:digit:]]*.[[:digit:]]*"
   register: mgmt_ip
+  tags:
+    - skip_ansible_lint # running a sub job
 
 - name: Wait for Docker container inside VM to come up
   shell: ssh -o ProxyCommand="ssh -W %h:%p ubuntu@nova-compute-1" ubuntu@{{ mgmt_ip.stdout }} "sudo docker ps|grep vcpe" > /dev/null
@@ -35,15 +45,21 @@
   until: result | success
   retries: 20
   delay: 60
+  tags:
+    - skip_ansible_lint # running a sub job
 
 - name: Run dhclient inside testclient to get IP address from vSG
   command: ansible nova-compute-1 -u ubuntu -m shell \
     -s -a "lxc-attach -n testclient -- dhclient eth0.222.111"
+  tags:
+    - skip_ansible_lint # running a sub job
 
 - name: Test external connectivity in test client
   command: ansible nova-compute-1 -u ubuntu -m shell \
     -s -a "lxc-attach -n testclient -- ping -c 3 8.8.8.8"
   register: pingtest
+  tags:
+    - skip_ansible_lint # running a sub job
 
 - name: Output from ping test
-  debug: var=pingtest.stdout_lines
\ No newline at end of file
+  debug: var=pingtest.stdout_lines
diff --git a/roles/xos-compute-setup/tasks/main.yml b/roles/xos-compute-setup/tasks/main.yml
index b2689a7..a7b8414 100644
--- a/roles/xos-compute-setup/tasks/main.yml
+++ b/roles/xos-compute-setup/tasks/main.yml
@@ -5,3 +5,5 @@
 
 - name: ssh to XOS VM and run 'make new-nodes'
   command: ssh ubuntu@xos "cd {{ service_profile_repo_dest }}/{{ xos_configuration }}/; make new-nodes"
+  tags:
+    - skip_ansible_lint # running a sub-job
diff --git a/roles/xos-install/tasks/main.yml b/roles/xos-install/tasks/main.yml
index 22a045a..7008edb 100644
--- a/roles/xos-install/tasks/main.yml
+++ b/roles/xos-install/tasks/main.yml
@@ -2,24 +2,28 @@
 # tasks for xos-install role
 
 - name: checkout XOS repo
-  git: repo={{ xos_repo_url }}
-       dest={{ xos_repo_dest }}
-       version={{ xos_repo_branch }}
+  git:
+   repo: "{{ xos_repo_url }}"
+   dest: "{{ xos_repo_dest }}"
+   version: "{{ xos_repo_branch }}"
 
 - name: checkout service-profile repo
-  git: repo={{ service_profile_repo_url }}
-       dest={{ service_profile_repo_dest }}
-       version={{ service_profile_repo_branch }}
+  git:
+    repo: "{{ service_profile_repo_url }}"
+    dest: "{{ service_profile_repo_dest }}"
+    version: "{{ service_profile_repo_branch }}"
 
 - name: Rebuild XOS containers
   when: xos_container_rebuild
-  command: make {{ item }}
-    chdir="{{ service_profile_repo_dest }}/{{ xos_configuration }}/"
+  make:
+    target: "{{ item }}"
+    chdir: "{{ service_profile_repo_dest }}/{{ xos_configuration }}/"
   with_items:
     - common_cloudlab
     - base
 
 - name: Initial build of XOS
-  command: make
-    chdir="{{ service_profile_repo_dest }}/{{ xos_configuration }}/"
+  make:
+    target: "{{ item }}"
+    chdir: "{{ service_profile_repo_dest }}/{{ xos_configuration }}/"
 
diff --git a/roles/xos-start/tasks/main.yml b/roles/xos-start/tasks/main.yml
index 0c98c67..6c7ebb1 100644
--- a/roles/xos-start/tasks/main.yml
+++ b/roles/xos-start/tasks/main.yml
@@ -4,10 +4,14 @@
 - name: Build XOS containers
   command: ansible xos-1 -u ubuntu -m shell \
     -a "bash -c \"set -o pipefail; cd {{ service_profile_repo_dest }}/{{ xos_configuration }}; make local_containers |& tee xos-build.out\""
+  tags:
+    - skip_ansible_lint
 
 - name: Onboard services and start XOS
   command: ansible xos-1 -u ubuntu -m shell \
     -a "bash -c \"set -o pipefail; cd {{ service_profile_repo_dest }}/{{ xos_configuration }}; make xos |& tee xos-onboard.out\""
+  tags:
+    - skip_ansible_lint
 
 - name: Pause to let XOS initialize
   pause: seconds=120
@@ -15,14 +19,23 @@
 - name: Initial VTN configuration
   command: ansible xos-1 -u ubuntu -m shell \
     -a "cd {{ service_profile_repo_dest }}/{{ xos_configuration }}; make vtn"
+  tags:
+    - skip_ansible_lint
 
 - name: Initial fabric configuration
   command: ansible xos-1 -u ubuntu -m shell \
     -a "cd {{ service_profile_repo_dest }}/{{ xos_configuration }}; make fabric"
+  tags:
+    - skip_ansible_lint
 
 - name: Pause to let ONOS initialize
   pause: seconds=20
+  tags:
+    - skip_ansible_lint
 
 - name: Configure CORD services
   command: ansible xos-1 -u ubuntu -m shell \
     -a "cd {{ service_profile_repo_dest }}/{{ xos_configuration }}; make cord"
+  tags:
+    - skip_ansible_lint
+
diff --git a/roles/xos-vm-install/files/xos-setup-cord-pod-playbook.yml b/roles/xos-vm-install/files/xos-setup-cord-pod-playbook.yml
index d258d54..5b646a5 100644
--- a/roles/xos-vm-install/files/xos-setup-cord-pod-playbook.yml
+++ b/roles/xos-vm-install/files/xos-setup-cord-pod-playbook.yml
@@ -63,6 +63,8 @@
       with_items:
         - xosproject/xos-postgres
         - xosproject/cord-app-build
+      tags:
+        - skip_ansible_lint # FIXME: use new 2.2 docker modules when available
 
     - name: Pull docker images for XOS
       when: not xos_container_rebuild
@@ -77,3 +79,5 @@
         chdir="{{ xos_repo_dest }}/containers/xos/"
       with_items:
        - base
+
+
diff --git a/roles/xos-vm-install/files/xos-setup-devel-playbook.yml b/roles/xos-vm-install/files/xos-setup-devel-playbook.yml
index 517f77d..f7d3851 100644
--- a/roles/xos-vm-install/files/xos-setup-devel-playbook.yml
+++ b/roles/xos-vm-install/files/xos-setup-devel-playbook.yml
@@ -62,6 +62,8 @@
       command: docker pull {{ item }}
       with_items:
         - xosproject/xos-postgres
+      tags:
+        - skip_ansible_lint # Should replace with http://docs.ansible.com/ansible/docker_module.html, when replacements are stable
 
     - name: Pull docker images for XOS
       when: not xos_container_rebuild
diff --git a/roles/xos-vm-install/tasks/main.yml b/roles/xos-vm-install/tasks/main.yml
index a4fc803..5ee7905 100644
--- a/roles/xos-vm-install/tasks/main.yml
+++ b/roles/xos-vm-install/tasks/main.yml
@@ -18,4 +18,6 @@
   async: 4800
   poll: 0
   register: xos_setup_playbook
+  tags:
+    - skip_ansible_lint # running a sub-job
 
diff --git a/scripts/compute-ext-net-tutorial.sh b/scripts/compute-ext-net-tutorial.sh
deleted file mode 100755
index 50090a9..0000000
--- a/scripts/compute-ext-net-tutorial.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/sh
-
-apt-get update
-apt-get install bridge-utils
-brctl addbr databr
-ifconfig databr 10.168.0.1/24 up
-ip link add address 02:42:0a:a8:00:01 type veth
-ifconfig veth0 up
-ifconfig veth1 up
-brctl addif databr veth0
-ip addr add 10.168.1.1/24 dev databr
-iptables -t nat -A POSTROUTING -s 10.168.0.0/16 \! -d 10.168.0.0/16 -j MASQUERADE
-sysctl -w net.ipv4.ip_forward=1
-sysctl -w net.ipv4.conf.all.send_redirects=0
-sysctl -w net.ipv4.conf.default.send_redirects=0
-sysctl -w net.ipv4.conf.eth0.send_redirects=0
-sysctl -w net.ipv4.conf.databr.send_redirects=0
\ No newline at end of file
diff --git a/scripts/compute-ext-net.sh b/scripts/compute-ext-net.sh
deleted file mode 100755
index 762d37f..0000000
--- a/scripts/compute-ext-net.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh
-
-apt-get update
-apt-get install bridge-utils
-brctl addbr databr
-ifconfig databr 10.168.0.1/24 up
-ip link add address 02:42:0a:a8:00:01 type veth
-ifconfig veth0 up
-ifconfig veth1 up
-brctl addif databr veth0
-iptables -t nat -A POSTROUTING -s 10.168.0.0/24 \! -d 10.168.0.0/24 -j MASQUERADE
-sysctl -w net.ipv4.ip_forward=1
diff --git a/scripts/create-vms-cord.sh b/scripts/create-vms-cord.sh
deleted file mode 100755
index a0af3ed..0000000
--- a/scripts/create-vms-cord.sh
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/bin/bash
-
-TESTING=false
-
-while [[ $# > 0 ]]
-do
-key="$1"
-
-case $key in
-    --testing)
-    TESTING=true
-    ;;
-    *)
-    ;;
-esac
-shift
-done
-
-function create-vm {
-	NAME=$1
-	CPU=$2
-	MEM_MB=$3
-	DISK_GB=$4
-	uvt-kvm list | grep $1
-	if [ "$?" -ne "0" ]
-	then
-		if $TESTING
-		then
-			# Don't use mgmtbr for testing
-			uvt-kvm create $NAME release=trusty --cpu=$CPU --memory=$MEM_MB --disk=$DISK_GB
-		else
-			uvt-kvm create $NAME release=trusty --cpu=$CPU --memory=$MEM_MB --disk=$DISK_GB --bridge mgmtbr
-		fi
-		# uvt-kvm wait --insecure $NAME
-	fi
-}
-
-function wait-for-vm {
-  NAME=$1
-  until dig $NAME && ssh ubuntu@$NAME "ls"
-  do
-    sleep 1
-  done
-}
-
-create-vm juju 1 2048 20
-create-vm mysql 2 4096 40
-create-vm rabbitmq-server 2 4096 40
-create-vm keystone 2 4096 40
-create-vm glance 2 4096 160
-create-vm nova-cloud-controller 2 4096 40
-create-vm neutron-api 2 4096 40
-create-vm openstack-dashboard 1 2048 20
-create-vm ceilometer 1 2048 20
-create-vm nagios 1 2048 20
-
-create-vm xos 2 4096 40
-create-vm onos-cord 2 4096 40
-create-vm onos-fabric 2 4096 40
-if $TESTING
-then
-	create-vm nova-compute 6 16384 240
-fi
-
-# Wait for everything to get set up
-wait-for-vm juju
-wait-for-vm mysql
-wait-for-vm rabbitmq-server
-wait-for-vm keystone
-wait-for-vm glance
-wait-for-vm nova-cloud-controller
-wait-for-vm neutron-api
-wait-for-vm openstack-dashboard
-wait-for-vm ceilometer
-wait-for-vm nagios
-
-wait-for-vm xos
-wait-for-vm onos-cord
-wait-for-vm onos-fabric
-if $TESTING
-then
-	wait-for-vm nova-compute
-fi
diff --git a/scripts/create-vms.sh b/scripts/create-vms.sh
deleted file mode 100755
index 1f1c789..0000000
--- a/scripts/create-vms.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-function create-vm {
-	NAME=$1
-	CPU=$2
-	MEM_MB=$3
-	DISK_GB=$4
-	uvt-kvm list | grep $1
-	if [ "$?" -ne "0" ]
-	then
-		uvt-kvm create $NAME release=trusty --cpu=$CPU --memory=$MEM_MB --disk=$DISK_GB
-		uvt-kvm wait --insecure $NAME
-	fi
-}
-
-create-vm juju 1 2048 20
-create-vm mysql 2 4096 40
-create-vm rabbitmq-server 2 4096 40
-create-vm keystone 2 4096 40
-create-vm glance 2 4096 160
-create-vm nova-cloud-controller 2 4096 40
-create-vm neutron-gateway 2 4096 40
-create-vm neutron-api 2 4096 40
-create-vm openstack-dashboard 1 2048 20
-create-vm ceilometer 1 2048 20
-create-vm nagios 1 2048 20
diff --git a/scripts/juju-compute-relations.py b/scripts/juju-compute-relations.py
deleted file mode 100755
index 535b5d4..0000000
--- a/scripts/juju-compute-relations.py
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/python
-
-import subprocess
-import time
-import argparse
-
-sleep_interval = 1
-
-relations = [
-    "nova-compute:shared-db mysql:shared-db",
-    "nova-compute:amqp rabbitmq-server:amqp",
-    "nova-compute glance",
-    "nova-compute nova-cloud-controller",
-    "nova-compute neutron-openvswitch",
-    "ntp nova-compute",
-    "nova-compute nagios",
-    "nova-compute nrpe",
-    "nova-compute:nova-ceilometer ceilometer-agent:nova-ceilometer",
-    ]
-
-def addrelation(relation):
-    subprocess.check_call("juju add-relation %s" % relation, shell=True)
-
-def destroyrelation(relation):
-    subprocess.check_call("juju destroy-relation %s" % relation, shell=True)
-
-def addrelations():
-    for relation in relations:
-        print "Adding relation %s" % relation
-        try:
-            addrelation(relation)
-            time.sleep(sleep_interval)
-        except:
-            pass
-
-def destroyrelations():
-    for relation in relations:
-        print "Destroying relation %s" % relation
-        try:
-            destroyrelation(relation)
-            time.sleep(sleep_interval)
-        except:
-            pass
-
-def main():
-    parser = argparse.ArgumentParser(description='Deploy OpenStack controller services')
-    parser.add_argument('--destroy', action='store_true',
-                       help='Destroy the relations instead of adding them')
-
-    args = parser.parse_args()
-    if args.destroy:
-        destroyrelations()
-    else:
-        addrelations()
-
-if  __name__ =='__main__':
-    main()
diff --git a/scripts/juju-compute-setup.py b/scripts/juju-compute-setup.py
deleted file mode 100755
index 326eaf3..0000000
--- a/scripts/juju-compute-setup.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/python
-
-import subprocess
-import json
-import time
-
-jujuconfig="/usr/local/src/openstack.cfg"
-
-services = {
-#    "nova-compute" : "--config=%s cs:~andybavier/trusty/nova-compute" % jujuconfig,
-    "nova-compute" : "--config=%s nova-compute" % jujuconfig,
-}
-
-def get_free_machines(status):
-    for (service, svcinfo) in status['services'].iteritems():
-        if 'units' in svcinfo:
-            for (unit, unitinfo) in svcinfo['units'].iteritems():
-                if 'machine' in unitinfo:
-                    machine = unitinfo['machine']
-                    status['machines'][machine]['unit'] = unit
-
-    free = {}
-    for (machine, mchinfo) in status['machines'].iteritems():
-        if machine == "0":
-            continue
-
-        if 'unit' not in mchinfo:
-            # print "%s: %s" % (machine, mchinfo['dns-name'])
-            free[machine] = mchinfo
-
-    return free
-
-
-def deploy(status, service, cmd):
-    # Deploy nova-compute to all free machines
-    machines = get_free_machines(status)
-
-    for (machine, mchinfo) in machines.iteritems():
-        if service in status['services']:
-            print "Adding unit %s on %s" % (service, mchinfo['dns-name'])
-            subprocess.check_call("juju add-unit --to=%s %s" % (machine, service), shell=True)
-        else:
-            print "Deploying service %s on %s" % (service, mchinfo['dns-name'])
-            subprocess.check_call("juju deploy --to=%s %s" % (machine, cmd), shell=True)
-            status['services'][service] = "installed"
-            time.sleep(10)
-
-def get_juju_status():
-    output = subprocess.check_output("juju status --format=json", shell=True)
-    status = json.loads(output)
-    return status
-
-def addservices():
-    status = get_juju_status()
-
-    for service, cmd in services.iteritems():
-        try:
-            deploy(status, service, cmd)
-        except:
-            pass
-
-def main():
-    addservices()
-
-if  __name__ =='__main__':
-    main()
diff --git a/scripts/juju-relations.py b/scripts/juju-relations.py
deleted file mode 100755
index 9c4b1f7..0000000
--- a/scripts/juju-relations.py
+++ /dev/null
@@ -1,88 +0,0 @@
-#!/usr/bin/python
-
-import subprocess
-import time
-import argparse
-
-sleep_interval = 1
-
-relations = ["keystone mysql",
-             "nova-cloud-controller mysql",
-             "nova-cloud-controller rabbitmq-server",
-             "nova-cloud-controller glance",
-             "nova-cloud-controller keystone",
-             "glance mysql",
-             "glance keystone",
-             "neutron-gateway neutron-api",
-             "neutron-gateway:amqp rabbitmq-server:amqp",
-             "neutron-gateway nova-cloud-controller",
-             "neutron-gateway mysql",
-             "neutron-api keystone",
-             "neutron-api neutron-openvswitch",
-             "neutron-api mysql",
-             "neutron-api rabbitmq-server",
-             "neutron-api nova-cloud-controller",
-             "neutron-openvswitch rabbitmq-server",
-             "openstack-dashboard keystone",
-#             "mysql nagios",
-#             "rabbitmq-server nagios",
-#             "keystone nagios",
-#             "glance nagios",
-#             "nova-cloud-controller nagios",
-#             "neutron-gateway nagios",
-#             "openstack-dashboard nagios",
-#             "neutron-api nagios",
-             "nagios nrpe",
-             "mysql:juju-info nrpe:general-info",
-             "rabbitmq-server nrpe",
-             "keystone nrpe",
-             "glance nrpe",
-             "nova-cloud-controller nrpe",
-             "neutron-gateway nrpe",
-             "openstack-dashboard nrpe",
-             "neutron-api nrpe",
-             "ceilometer mongodb",
-             "ceilometer rabbitmq-server",
-             "ceilometer:identity-service keystone:identity-service",
-             "ceilometer:ceilometer-service ceilometer-agent:ceilometer-service",
-             "ceilometer nagios",
-             "ceilometer nrpe",
-             ]
-
-def addrelation(relation):
-    subprocess.check_call("juju add-relation %s" % relation, shell=True)
-
-def destroyrelation(relation):
-    subprocess.check_call("juju destroy-relation %s" % relation, shell=True)
-
-def addrelations():
-    for relation in relations:
-        print "Adding relation %s" % relation
-        try:
-            addrelation(relation)
-            time.sleep(sleep_interval)
-        except:
-            pass
-
-def destroyrelations():
-    for relation in relations:
-        print "Destroying relation %s" % relation
-        try:
-            destroyrelation(relation)
-            time.sleep(sleep_interval)
-        except:
-            pass
-
-def main():
-    parser = argparse.ArgumentParser(description='Deploy OpenStack controller services')
-    parser.add_argument('--destroy', action='store_true',
-                       help='Destroy the relations instead of adding them')
-
-    args = parser.parse_args()
-    if args.destroy:
-        destroyrelations()
-    else:
-        addrelations()
-
-if  __name__ =='__main__':
-    main()
diff --git a/scripts/juju-setup.py b/scripts/juju-setup.py
deleted file mode 100755
index 3a75b07..0000000
--- a/scripts/juju-setup.py
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/usr/bin/python
-
-import subprocess
-import json
-import socket
-
-jujuconfig="/usr/local/src/openstack.cfg"
-
-# Assumption: VMs have same hostname as service that runs inside
-machines = ["mysql", "rabbitmq-server", "keystone", "glance", "nova-cloud-controller",
-            "neutron-gateway", "openstack-dashboard", "ceilometer", "nagios", "neutron-api"]
-
-services = {
-    "mysql" : "mysql",
-    "rabbitmq-server" : "rabbitmq-server",
-    "keystone" : "--config=%s keystone" % jujuconfig,
-    "glance" : "--config=%s glance" % jujuconfig,
-#    "nova-cloud-controller" : "--config=%s cs:~andybavier/trusty/nova-cloud-controller" % jujuconfig,
-    "nova-cloud-controller" : "--config=%s nova-cloud-controller" % jujuconfig,
-    "neutron-gateway" : "--config=%s cs:~andybavier/trusty/neutron-gateway" % jujuconfig,
-#    "neutron-gateway" : "--config=%s neutron-gateway" % jujuconfig,
-    "neutron-api" : "--config=%s neutron-api" % jujuconfig,
-    "neutron-openvswitch" : "--config=%s neutron-openvswitch" % jujuconfig,
-    "openstack-dashboard" : "--config=%s openstack-dashboard" % jujuconfig,
-    "nagios" : "nagios",
-    "mongodb" : "mongodb",   # deploy to ceilometer machine
-    "ceilometer" : "ceilometer",
-    "nrpe" : "nrpe",
-    "ntp" : "ntp",
-    "ceilometer-agent" : "ceilometer-agent"
-}
-
-# Figure out Juju ID of machine we should install on
-def get_machine(status, service):
-    if service == "mongodb":
-        service = "ceilometer"
-    for key, value in status['machines'].iteritems():
-        (hostname, aliaslist, ipaddrlist) = socket.gethostbyaddr(value['dns-name'])
-        if hostname == service:
-            return key
-    return None
-
-def deploy(status, service, cmd):
-    if service in status['services']:
-        return
-
-    print "Installing %s" % service
-    machine = get_machine(status, service)
-    if machine:
-        subprocess.check_call("juju deploy --to=%s %s" % (machine, cmd), shell=True)
-    else:
-        subprocess.check_call("juju deploy %s" % cmd, shell=True)
-
-def get_juju_status():
-    output = subprocess.check_output("juju status --format=json", shell=True)
-    status = json.loads(output)
-    return status
-
-def addservices():
-    status = get_juju_status()
-
-    for service, cmd in services.iteritems():
-        try:
-            deploy(status, service, cmd)
-        except:
-            pass
-
-def addmachines():
-    status = get_juju_status()
-
-    for machine in machines:
-        if get_machine(status, machine) == None:
-            ipaddr = socket.gethostbyname(machine)
-            subprocess.check_call("juju add-machine ssh:%s" % ipaddr, shell=True)
-
-def main():
-    addmachines()
-    addservices()
-
-if  __name__ =='__main__':
-    main()
diff --git a/scripts/network-setup.sh b/scripts/network-setup.sh
deleted file mode 100755
index 05e4c12..0000000
--- a/scripts/network-setup.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-source ~/admin-openrc.sh
-
-function create-flat-net {
-    NAME=$1
-    neutron net-show $NAME-net 2>&1 > /dev/null
-    if [ "$?" -ne 0 ]
-    then
-	neutron net-create --provider:physical_network=$NAME --provider:network_type=flat --shared $NAME-net
-    fi
-}
-
-function create-subnet {
-    NAME=$1
-    CIDR=$2
-    GW=$3
-
-    neutron subnet-show $NAME-net 2>&1 > /dev/null
-    if [ "$?" -ne 0 ]
-    then
-	neutron subnet-create $NAME-net --name $NAME-net $CIDR --gateway=$GW --disable-dhcp
-    fi
-}
-
-function create-subnet-no-gateway {
-    NAME=$1
-    CIDR=$2
-
-    neutron subnet-show $NAME-net 2>&1 > /dev/null
-    if [ "$?" -ne 0 ]
-    then
-	neutron subnet-create $NAME-net --name $NAME-net $CIDR --no-gateway --disable-dhcp
-    fi
-}
-
-create-flat-net nat
-create-subnet nat 172.16.0.0/16 172.16.0.1
-
-create-flat-net ext
diff --git a/scripts/recreate-virbr0.sh b/scripts/recreate-virbr0.sh
deleted file mode 100644
index d38a6d5..0000000
--- a/scripts/recreate-virbr0.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-
-FILE=/etc/libvirt/qemu/networks/default.xml
-
-cp $FILE $FILE.tmp
-virsh net-destroy default
-virsh net-undefine default
-
-cp $FILE.tmp $FILE
-virsh net-create $FILE
diff --git a/scripts/single-node-pod.sh b/scripts/single-node-pod.sh
index 70a56aa..3f24801 100755
--- a/scripts/single-node-pod.sh
+++ b/scripts/single-node-pod.sh
@@ -27,7 +27,7 @@
 function bootstrap() {
     cd ~
     sudo apt-get update
-    sudo apt-get -y install software-properties-common curl git mosh tmux dnsutils python-netaddr
+    sudo apt-get -y install software-properties-common curl git tmux dnsutils python-netaddr python-dnspython
     sudo add-apt-repository -y ppa:ansible/ansible
     sudo apt-get update
     sudo apt-get install -y ansible
@@ -52,7 +52,7 @@
     if [[ "$XOS_REPO_URL" != "" ]]; then
         extra_vars="$extra_vars xos_repo_url=$XOS_REPO_URL"
     fi
-    if [[ "$XOS_BRANCH" != "" ]]; then 
+    if [[ "$XOS_BRANCH" != "" ]]; then
         extra_vars="$extra_vars xos_repo_branch=$XOS_BRANCH"
     fi
 
diff --git a/scripts/wait-for-services.sh b/scripts/wait-for-services.sh
deleted file mode 100755
index da4ef1f..0000000
--- a/scripts/wait-for-services.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-# Wait for there to be no services in pending state
-while $( juju status --format=summary|grep -q pending )
-do
-  sleep 10
-done