CORD-1346, remove unused roles

Change-Id: Ica4d44d074ab143fbce83a66603d7add66845ee8
diff --git a/roles/config-virt/defaults/main.yml b/roles/config-virt/defaults/main.yml
deleted file mode 100644
index a134d20..0000000
--- a/roles/config-virt/defaults/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# roles/config-virt/defaults/main.yml
-
-virt_nets:
-  - mgmtbr
-
diff --git a/roles/config-virt/handlers/main.yml b/roles/config-virt/handlers/main.yml
deleted file mode 100644
index 3761533..0000000
--- a/roles/config-virt/handlers/main.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-# roles/config-virt-net/handlers/tasks.yml
-
-- name: reload libvirt-bin
-  service:
-    name=libvirt-bin
-    state=restarted
-
-- name: run qemu hook
-  command: /etc/libvirt/hooks/qemu start start
diff --git a/roles/config-virt/tasks/main.yml b/roles/config-virt/tasks/main.yml
deleted file mode 100644
index f81de29..0000000
--- a/roles/config-virt/tasks/main.yml
+++ /dev/null
@@ -1,82 +0,0 @@
----
-# roles/config-virt/tasks/main.yml
-
-- name: Check to see if we already have a uvtool image
-  find:
-    path: "/var/lib/uvtool/libvirt/images"
-    patterns: "x-uvt-b64-*"
-  register: uvtool_image
-
-- name: Get ubuntu image for uvtool
-  when: "{{ uvtool_image.matched < 1 }}"
-  command: uvt-simplestreams-libvirt sync --source http://cloud-images.ubuntu.com/daily \
-    release={{ ansible_distribution_release }} arch=amd64
-  args:
-    creates: "/var/lib/uvtool/libvirt/images/x-uvt-b64-*"
-  async: 1200
-  poll: 0
-  register: uvt_sync
-
-- name: collect libvirt network facts
-  virt_net:
-    command=facts
-
-- name: Tear down libvirt's default network
-  when: not on_maas and ansible_libvirt_networks["default"] is defined
-  virt_net:
-    command={{ item }}
-    name=default
-  with_items:
-    - destroy
-    - undefine
-
-# note, this isn't idempotent, so may need manual fixing if it changes
-- name: define libvirt networks IP/DHCP/DNS settings
-  when: not on_maas
-  virt_net:
-    name=xos-{{ item.name }}
-    command=define
-    xml='{{ lookup("template", "virt_net.xml.j2") }}'
-  with_items: '{{ virt_nets }}'
-
-- name: collect libvirt network facts after defining new network
-  when: not on_maas
-  virt_net:
-    command=facts
-
-- name: start libvirt networks
-  when: not on_maas and ansible_libvirt_networks["xos-{{ item.name }}"].state != "active"
-  virt_net:
-    name=xos-{{ item.name }}
-    command=create
-  with_items: '{{ virt_nets }}'
-
-- name: have libvirt networks autostart
-  when: not on_maas and ansible_libvirt_networks["xos-{{ item.name }}"].autostart != "yes"
-  virt_net:
-    name=xos-{{ item.name }}
-    autostart=yes
-  with_items: '{{ virt_nets }}'
-
-
-- name: Have libvirt enable port forwarding to VM's
-  become: yes
-  template:
-    src={{ item }}.j2
-    dest=/etc/libvirt/hooks/{{ item }}
-    mode=0755 owner=root
-  with_items:
-    - daemon
-    - qemu
-  notify:
-    - reload libvirt-bin
-    - run qemu hook
-  when: not on_maas
-
-- name: Wait for uvt-kvm image to be available
-  when: "{{ uvtool_image.matched < 1 }}"
-  async_status: jid={{ uvt_sync.ansible_job_id }}
-  register: uvt_sync_result
-  until: uvt_sync_result.finished
-  delay: 10
-  retries: 120
diff --git a/roles/config-virt/templates/daemon.j2 b/roles/config-virt/templates/daemon.j2
deleted file mode 100644
index 852aef6..0000000
--- a/roles/config-virt/templates/daemon.j2
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/sh
-
-SHELL="/bin/bash"
-
-NIC=$( route|grep default|awk '{print $NF}' )
-
-NAME="${1}"
-OP="${2}"
-SUBOP="${3}"
-ARGS="${4}"
-
-add_port_fwd_rule() {
-    DPORT=$1
-    VMIP=$2
-    TOPORT=$3
-
-    iptables -t nat -C PREROUTING -p tcp -i $NIC --dport $DPORT -j DNAT --to-destination $VMIP:$TOPORT
-    if [ "$?" -ne 0 ]
-    then
-        iptables -t nat -A PREROUTING -p tcp -i $NIC --dport $DPORT -j DNAT --to-destination $VMIP:$TOPORT
-    fi
-}
-
-if [ "$OP" = "start" ] || [ "$OP" = "reload" ]
-then
-    iptables -t nat -F
-
-{% for vm in head_vm_list -%}
-{% if vm.forwarded_ports is defined -%}
-{% set vm_net = ( virt_nets | selectattr("head_vms", "defined") | first ) %}
-{% for port in vm.forwarded_ports -%}
-    add_port_fwd_rule {{ port.ext }} "{{ vm_net.ipv4_prefix }}.{{ vm.ipv4_last_octet }}" {{ port.int }}
-{% endfor -%}
-{% endif -%}
-{% endfor -%}
-
-    # Also flush the filter table before rules re-added
-    iptables -F
-fi
-
diff --git a/roles/config-virt/templates/qemu.j2 b/roles/config-virt/templates/qemu.j2
deleted file mode 100644
index 9d20379..0000000
--- a/roles/config-virt/templates/qemu.j2
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/sh
-
-SHELL="/bin/bash"
-
-NIC=$( route|grep default|awk '{print $NF}' )
-PORTAL=$( dig +short portal.opencloud.us | tail -1 )
-
-SUBNET=$( ip addr show $NIC|grep "inet "|awk '{print $2}' )
-{% set vm_net = ( virt_nets | selectattr("head_vms", "defined") | first ) %}
-PRIVATENET=$( ip addr show {{ vm_net.name }} |grep "inet "|awk '{print $2}' )
-
-NAME="${1}"
-OP="${2}"
-SUBOP="${3}"
-ARGS="${4}"
-
-add_rule() {
-    CHAIN=$1
-    ARGS=$2
-    iptables -C $CHAIN $ARGS
-    if [ "$?" -ne 0 ]
-    then
-        iptables -I $CHAIN 1 $ARGS
-    fi
-}
-
-add_local_access_rules() {
-    add_rule "FORWARD" "-s $SUBNET -j ACCEPT"
-    # Don't NAT traffic from service VMs destined to the local subnet
-    add_rule "POSTROUTING" "-t nat -s $PRIVATENET -d $SUBNET -j RETURN"
-}
-
-add_portal_access_rules() {
-    add_rule "FORWARD" "-s $PORTAL -j ACCEPT"
-}
-
-add_web_access_rules() {
-    add_rule "FORWARD" "-p tcp --dport 80 -j ACCEPT"
-}
-
-if [ "$OP" = "start" ]
-then
-	add_local_access_rules
-	add_portal_access_rules
-	add_web_access_rules
-fi
diff --git a/roles/config-virt/templates/virt_net.xml.j2 b/roles/config-virt/templates/virt_net.xml.j2
deleted file mode 100644
index ad043e9..0000000
--- a/roles/config-virt/templates/virt_net.xml.j2
+++ /dev/null
@@ -1,28 +0,0 @@
-<network>
-  <name>xos-{{ item.name }}</name>
-  <bridge name="{{ item.name }}"/>
-  <forward/>
-  <domain name="{{ site_suffix }}" localonly="no"/>
-  <dns>
-{% if unbound_listen_on_default %}
-{% for host in groups['head'] %}
-  <forwarder addr="{{ hostvars[host].ansible_default_ipv4.address }}"/>
-{% endfor %}
-{% endif %}
-{% if dns_servers is defined %}
-{% for ns in dns_servers %}
-  <forwarder addr="{{ ns }}"/>
-{% endfor %}
-{% endif %}
-  </dns>
-  <ip address="{{ item.ipv4_prefix }}.1" netmask="255.255.255.0">
-    <dhcp>
-      <range start="{{ item.ipv4_prefix }}.2" end="{{ item.ipv4_prefix }}.254"/>
-{% if item.head_vms %}
-{% for vm in head_vm_list %}
-      <host name='{{ vm.name }}' ip='{{ item.ipv4_prefix }}.{{ vm.ipv4_last_octet }}'/>
-{% endfor %}
-{% endif %}
-    </dhcp>
-  </ip>
-</network>
diff --git a/roles/create-vms/files/docker-install-playbook.yml b/roles/create-vms/files/docker-install-playbook.yml
deleted file mode 100644
index 484f196..0000000
--- a/roles/create-vms/files/docker-install-playbook.yml
+++ /dev/null
@@ -1,59 +0,0 @@
----
-# Installs docker with apt, docker-compose with pip, adds user to group
-# Must be run as root
-
-- hosts: docker
-  remote_user: ubuntu
-  become: yes
-
-  tasks:
-
-    # https://docs.docker.com/engine/installation/linux/ubuntulinux/
-    - name: Prereqs and SSL support for apt for SSL
-      apt:
-        name={{ item }}
-        update_cache=yes
-        cache_valid_time=3600
-      with_items:
-        - apt-transport-https
-        - ca-certificates
-        - python-pip
-
-    - name: Trust docker apt key
-      apt_key:
-        data="{{ lookup('file', 'docker_apt_key.gpg') }}"
-
-    - name: Add docker apt repo
-      apt_repository:
-        repo: "{{ docker_apt_repo | default('deb https://apt.dockerproject.org/repo ubuntu-trusty main') }}"
-
-    - name: Install docker
-      apt:
-        update_cache=yes
-        cache_valid_time=3600
-        name=docker-engine
-
-    - name: Install docker-compose from web
-      get_url:
-        url=https://github.com/docker/compose/releases/download/1.7.1/docker-compose-Linux-x86_64
-        checksum=sha256:3166bb74bc648e68c3154bc704fddf6bccf59f03a0c90fc48aefac034535e4ae
-        dest=/usr/local/bin/docker-compose
-        owner=root mode=0755
-
-    # This installs a bunch of prereqs that currently breaks SSL and CA's
-    # https://docs.docker.com/compose/install/#install-using-pip
-    #- name: Install docker-compose from PyPi
-    #  pip:
-    #    name=docker-compose
-
-    - name: Make ubuntu user part of the Docker group
-      user:
-        name="ubuntu"
-        groups="docker"
-        append=yes
-
-    - name: restart Docker daemon to get new group membership
-      service:
-        name=docker
-        state=restarted
-
diff --git a/roles/create-vms/files/docker_apt_key.gpg b/roles/create-vms/files/docker_apt_key.gpg
deleted file mode 100644
index f63466b..0000000
--- a/roles/create-vms/files/docker_apt_key.gpg
+++ /dev/null
@@ -1,47 +0,0 @@
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: SKS 1.1.5
-
-mQINBFWln24BEADrBl5p99uKh8+rpvqJ48u4eTtjeXAWbslJotmC/CakbNSqOb9oddfzRvGV
-eJVERt/Q/mlvEqgnyTQy+e6oEYN2Y2kqXceUhXagThnqCoxcEJ3+KM4RmYdoe/BJ/J/6rHOj
-q7Omk24z2qB3RU1uAv57iY5VGw5p45uZB4C4pNNsBJXoCvPnTGAs/7IrekFZDDgVraPx/hdi
-wopQ8NltSfZCyu/jPpWFK28TR8yfVlzYFwibj5WKdHM7ZTqlA1tHIG+agyPf3Rae0jPMsHR6
-q+arXVwMccyOi+ULU0z8mHUJ3iEMIrpTX+80KaN/ZjibfsBOCjcfiJSB/acn4nxQQgNZigna
-32velafhQivsNREFeJpzENiGHOoyC6qVeOgKrRiKxzymj0FIMLru/iFF5pSWcBQB7PYlt8J0
-G80lAcPr6VCiN+4cNKv03SdvA69dCOj79PuO9IIvQsJXsSq96HB+TeEmmL+xSdpGtGdCJHHM
-1fDeCqkZhT+RtBGQL2SEdWjxbF43oQopocT8cHvyX6Zaltn0svoGs+wX3Z/H6/8P5anog43U
-65c0A+64Jj00rNDr8j31izhtQMRo892kGeQAaaxg4Pz6HnS7hRC+cOMHUU4HA7iMzHrouAdY
-eTZeZEQOA7SxtCME9ZnGwe2grxPXh/U/80WJGkzLFNcTKdv+rwARAQABtDdEb2NrZXIgUmVs
-ZWFzZSBUb29sIChyZWxlYXNlZG9ja2VyKSA8ZG9ja2VyQGRvY2tlci5jb20+iQIcBBABCgAG
-BQJWw7vdAAoJEFyzYeVS+w0QHysP/i37m4SyoOCVcnybl18vzwBEcp4VCRbXvHvOXty1gccV
-IV8/aJqNKgBV97lY3vrpOyiIeB8ETQegsrxFE7t/Gz0rsLObqfLEHdmn5iBJRkhLfCpzjeOn
-yB3Z0IJB6UogO/msQVYe5CXJl6uwr0AmoiCBLrVlDAktxVh9RWch0l0KZRX2FpHu8h+uM0/z
-ySqIidlYfLa3y5oHscU+nGU1i6ImwDTD3ysZC5jp9aVfvUmcESyAb4vvdcAHR+bXhA/RW8QH
-eeMFliWw7Z2jYHyuHmDnWG2yUrnCqAJTrWV+OfKRIzzJFBs4e88ru5h2ZIXdRepw/+COYj34
-LyzxR2cxr2u/xvxwXCkSMe7F4KZAphD+1ws61FhnUMi/PERMYfTFuvPrCkq4gyBjt3fFpZ2N
-R/fKW87QOeVcn1ivXl9id3MMs9KXJsg7QasT7mCsee2VIFsxrkFQ2jNpD+JAERRn9Fj4ArHL
-5TbwkkFbZZvSi6fr5h2GbCAXIGhIXKnjjorPY/YDX6X8AaHOW1zblWy/CFr6VFl963jrjJga
-g0G6tNtBZLrclZgWhOQpeZZ5Lbvz2ZA5CqRrfAVcwPNW1fObFIRtqV6vuVluFOPCMAAnOnqR
-02w9t17iVQjO3oVN0mbQi9vjuExXh1YoScVetiO6LSmlQfVEVRTqHLMgXyR/EMo7iQIcBBAB
-CgAGBQJXSWBlAAoJEFyzYeVS+w0QeH0QAI6btAfYwYPuAjfRUy9qlnPhZ+xt1rnwsUzsbmo8
-K3XTNh+l/R08nu0dsczw30Q1wju28fh1N8ay223+69f0+yICaXqR18AbGgFGKX7vo0gfEVax
-dItUN3eHNydGFzmeOKbAlrxIMECnSTG/TkFVYO9Ntlv9vSN2BupmTagTRErxLZKnVsWRzp+X
-elwlgU5BCZ6U6Ze8+bIc6F1bZstf17X8i6XNV/rOCLx2yP0hn1osoljoLPpW8nzkwvqYsYbC
-A28lMt1aqe0UWvRCqR0zxlKn17NZQqjbxcajEMCajoQ01MshmO5GWePViv2abCZ/iaC5zKqV
-T3deMJHLq7lum6qhA41E9gJH9QoqT+qgadheeFfoC1QP7cke+tXmYg2R39p3l5Hmm+JQbP4f
-9V5mpWExvHGCSbcatr35tnakIJZugq2ogzsm1djCSz9222RXl9OoFqsm1bNzA78+/cOt5N2c
-yhU0bM2T/zgh42YbDD+JDU/HSmxUIpU+wrGvZGM2FU/up0DRxOC4U1fL6HHlj8liNJWfEg3v
-hougOh66gGF9ik5j4eIlNoz6lst+gmvlZQ9/9hRDeoG+AbhZeIlQ4CCw+Y1j/+fUxIzKHPVK
-+aFJd+oJVNvbojJW/SgDdSMtFwqOvXyYcHl30Ws0gZUeDyAmNGZeJ3kFklnApDmeKK+OiQI4
-BBMBAgAiBQJVpZ9uAhsvBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRD3YiFXLFJgnbRf
-EAC9Uai7Rv20QIDlDogRzd+Vebg4ahyoUdj0CH+nAk40RIoq6G26u1e+sdgjpCa8jF6vrx+s
-mpgd1HeJdmpahUX0XN3X9f9qU9oj9A4I1WDalRWJh+tP5WNv2ySy6AwcP9QnjuBMRTnTK27p
-k1sEMg9oJHK5p+ts8hlSC4SluyMKH5NMVy9c+A9yqq9NF6M6d6/ehKfBFFLG9BX+XLBATvf1
-ZemGVHQusCQebTGv0C0V9yqtdPdRWVIEhHxyNHATaVYOafTj/EF0lDxLl6zDT6trRV5n9F1V
-CEh4Aal8L5MxVPcIZVO7NHT2EkQgn8CvWjV3oKl2GopZF8V4XdJRl90U/WDv/6cmfI08GkzD
-YBHhS8ULWRFwGKobsSTyIvnbk4NtKdnTGyTJCQ8+6i52s+C54PiNgfj2ieNn6oOR7d+bNCcG
-1CdOYY+ZXVOcsjl73UYvtJrO0Rl/NpYERkZ5d/tzw4jZ6FCXgggA/Zxcjk6Y1ZvIm8Mt8wLR
-FH9Nww+FVsCtaCXJLP8DlJLASMD9rl5QS9Ku3u7ZNrr5HWXPHXITX660jglyshch6CWeiUAT
-qjIAzkEQom/kEnOrvJAtkypRJ59vYQOedZ1sFVELMXg2UCkD/FwojfnVtjzYaTCeGwFQeqzH
-mM241iuOmBYPeyTY5veF49aBJA1gEJOQTvBR8Q==
-=74V2
------END PGP PUBLIC KEY BLOCK-----
diff --git a/roles/create-vms/library/host_dns_check.py b/roles/create-vms/library/host_dns_check.py
deleted file mode 100755
index cbd39ae..0000000
--- a/roles/create-vms/library/host_dns_check.py
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/env python
-
-import sys
-import json
-import shlex
-import subprocess
-
-# Assume nothing has changed
-result = {
-    "changed" : False,
-    "everyone" : "OK"
-}
-
-# read the argument string from the arguments file
-args_file = sys.argv[1]
-args_data = file(args_file).read()
-
-# Variables for the task options
-host_list = []
-command_on_fail = None
-
-# parse the task options
-arguments = shlex.split(args_data)
-
-for arg in arguments:
-    # ignore any arguments without an equals in it
-    if "=" in arg:
-        (key, value) = arg.split("=")
-
-    if key == "hosts":
-        # The list of hosts comes as a string that looks sort of like a python list,
-        # so some replace magic so we can parse it in to a real list
-        try:
-            value = value.replace("u'", "").replace("'", "")
-            value = json.loads(value)
-            host_list = value
-        except Exception as e:
-            result["everyone"] = "Not OK"
-            result["failed"] = True
-            result["msg"] = "Unable to parse 'hosts' argument to module : '%s'" % (e)
-            print json.dumps(result)
-            sys.stdout.flush()
-            sys.exit(1)
-    if key == "command_on_fail":
-        command_on_fail = value
-
-for host in  host_list:
-    # Attempt to resolve hostname, if a host can't be resolved then fail the task
-    try:
-        if subprocess.check_output(["dig", "+short", "+search", host]) == '':
-            result["everyone"] = "Not OK"
-            result["failed"] = True
-            result["msg"] = "Unable to resolve host '%s'" % (host)
-    except Exception as e:
-        result["everyone"] = "Not OK"
-        result["failed"] = True
-        result["msg"] = "Error encountered while resolving '%s' : '%s'" % (host, e)
-        print json.dumps(result)
-        sys.stdout.flush()
-        sys.exit(1)
-
-# If not all hosts were resolved and a failure command was specified then call that
-# command and capture the results.
-if command_on_fail != None:
-    result["command_on_fail"] = {}
-    result["command_on_fail"]["command"] = command_on_fail
-    try:
-        cmd_out = subprocess.check_output(shlex.split(command_on_fail), stderr=subprocess.STDOUT)
-        result["command_on_fail"]["retcode"] = 0
-        result["command_on_fail"]["out"] = cmd_out
-    except subprocess.CalledProcessError as e:
-        result["command_on_fail"]["retcode"] = e.returncode
-        result["command_on_fail"]["out"] = e.output
-
-# Output the results
-print json.dumps(result)
-
-if result["failed"]:
-    sys.exit(1)
diff --git a/roles/create-vms/tasks/main.yml b/roles/create-vms/tasks/main.yml
deleted file mode 100644
index a070aa4..0000000
--- a/roles/create-vms/tasks/main.yml
+++ /dev/null
@@ -1,110 +0,0 @@
----
-# file: create-vms/tasks/main.yml
-- name: Ensure DIG
-  become: yes
-  apt:
-    name: dnsutils=1:9*
-    state: present
-
-- name: create Virtual Machines with uvt-kvm
-  command: uvt-kvm create {{ item.name }} release={{ ansible_distribution_release }} \
-    --cpu={{ item.cpu }} --memory={{ item.memMB }} --disk={{ item.diskGB }} --bridge="mgmtbr"
-  args:
-    creates: "/var/lib/uvtool/libvirt/images/{{ item.name }}.qcow"
-  with_items: "{{ head_vm_list }}"
-
-- name: Have VMs autostart on reboot
-  become: yes
-  virt:
-    name: "{{ item.name }}"
-    command: autostart
-  with_items: "{{ head_vm_list }}"
-
-- name: fetch IP of DHCP harvester
-  when: on_maas
-  command: docker-ip harvester
-  register: harvester_ip
-  changed_when: False
-
-- name: force a harvest to get VM name resolution
-  when: on_maas
-  uri:
-    url: http://{{ harvester_ip.stdout }}:8954/harvest
-    method: POST
-
-- name: wait for VM name resolution
-  when: on_maas
-  host_dns_check:
-    hosts: "{{ head_vm_list | map(attribute='name') | list | to_json }}"
-    command_on_fail: "curl -sS --connect-timeout 3 -XPOST http://{{ harvester_ip.stdout }}:8954/harvest"
-  register: all_resolved
-  until: all_resolved.everyone == "OK"
-  retries: 5
-  delay: 10
-  failed_when: all_resolved.everyone != "OK"
-
-- name: wait for VM's to come up
-  wait_for:
-    host={{ item.name }}
-    port=22
-  with_items: "{{ head_vm_list }}"
-
-- name: Verify that we can log into every VM
-  command: ansible vms -m ping -u ubuntu
-  tags:
-    - skip_ansible_lint # connectivity check
-
-- name: Have VM's use the apt-cache
-  command: ansible vms -b -u ubuntu -m lineinfile -a "dest=/etc/apt/apt.conf.d/02apt-cacher-ng create=yes mode=0644 owner=root group=root regexp='^Acquire' line='Acquire::http { Proxy \"http://{{ apt_cacher_name }}:{{ apt_cacher_port | default('3142') }}\"; };'"
-  tags:
-    - skip_ansible_lint # running a sub job
-
-- name: Update apt cache
-  command: ansible vms -m apt -b -u ubuntu -a "update_cache=yes cache_valid_time=3600"
-  tags:
-    - skip_ansible_lint # running a sub job
-
-- name: Update software in all the VMs
-  when: run_dist_upgrade
-  command: ansible vms -m apt -b -u ubuntu -a "upgrade=dist"
-  tags:
-    - skip_ansible_lint # running a sub job
-
-- name: Create VM's eth0 interface config file for DNS config via resolvconf program
-  when: not on_maas
-  template:
-    src=eth0.cfg.j2
-    dest={{ ansible_user_dir }}/eth0.cfg
-
-- name: Copy eth0 interface config file to all VMs
-  when: not on_maas
-  command: ansible vms -b -u ubuntu -m copy -a "src={{ ansible_user_dir }}/eth0.cfg dest=/etc/network/interfaces.d/eth0.cfg owner=root group=root mode=0644"
-
-- name: Restart eth0 interface on all VMs
-  when: not on_maas
-  command: ansible vms -b -u ubuntu -m shell -a "ifdown eth0 ; ifup eth0"
-
-- name: Verify that we can log into every VM after restarting network interfaces
-  when: not on_maas
-  command: ansible vms -m ping -u ubuntu
-
-# sshkey is registered in head-prep task
-- name: Enable root ssh login on VM's that require it
-  command: ansible {{ item.name }} -b -u ubuntu -m authorized_key -a "user='root' key='{{ sshkey.stdout }}'"
-  with_items: "{{ head_vm_list | selectattr('root_ssh_login', 'defined') | list }}"
-  tags:
-    - skip_ansible_lint # FIXME, ssh key mangling
-
-- name: Copy over docker installation playbook and docker apt-key
-  copy:
-    src="{{ item }}"
-    dest="{{ ansible_user_dir }}/{{ item }}"
-  with_items:
-    - "docker-install-playbook.yml"
-    - "docker_apt_key.gpg"
-
-- name: Install docker in VM's that require it
-  command: ansible-playbook "{{ ansible_user_dir }}/docker-install-playbook.yml"
-  tags:
-    - skip_ansible_lint # running a sub job
-
diff --git a/roles/create-vms/templates/eth0.cfg.j2 b/roles/create-vms/templates/eth0.cfg.j2
deleted file mode 100644
index 94c1062..0000000
--- a/roles/create-vms/templates/eth0.cfg.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-# The primary network interface
-auto eth0
-iface eth0 inet dhcp
-{% if unbound_listen_on_default %}
-    dns-nameservers{% for host in groups['head'] %} {{ hostvars[host].ansible_default_ipv4.address }}{% endfor %} 
-{% endif %}
-{% if dns_servers is defined %}
-    dns-nameservers{% for ns in dns_servers %} {{ ns }}{% endfor %} 
-{% endif %}
-{% if dns_search is defined %}
-    dns-search{% for searchdom in dns_search %} {{ searchdom }}{% endfor %}
-{% endif %}
diff --git a/roles/docker-compose/tasks/main.yml b/roles/docker-compose/tasks/main.yml
deleted file mode 100644
index 1c1b0d0..0000000
--- a/roles/docker-compose/tasks/main.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-# docker-compose/tasks/main.yml
-
-# The following two tests are equivalent and both evaluate to 0 when
-# `onos-cord-1` VM isn't set to be created, but the `equalto` test only exists
-# in Jinja v2.7.4, which is later than what's in Ubuntu 14.04 (v2.7.2).
-#
-# {{ head_vm_list | selectattr('name', 'equalto', 'onos-cord-1') | list | length }}
-# {{ head_vm_list | map(attribute='name') | list | intersect(['onos-cord-1']) | list | length }}
-
-#- name: Wait for onos_setup_playbook to complete
-#  when: "{{ head_vm_list | map(attribute='name') | list | intersect(['onos-cord-1']) | list | length }}"
-#  async_status: jid={{ onos_setup_playbook.ansible_job_id }}
-#  register: onos_setup_playbook_result
-#  until: onos_setup_playbook_result.finished
-#  delay: 10
-#  retries: 120
-
-#- name: Copy SSL Certs to ONOS so docker-compose can find it
-#  when: "{{ head_vm_list | map(attribute='name') | list | intersect(['onos-cord-1']) | list | length }}"
-#  command: ansible onos-cord-1 -u ubuntu -m copy \
-#    -a "src=/usr/local/share/ca-certificates/cord_ca_cert.pem dest=~/cord/xos-certs.crt"
-
-#- name: Build ONOS image with docker-compose
-#  when: "{{ head_vm_list | map(attribute='name') | list | intersect(['onos-cord-1']) | list | length }}"
-#  command: ansible onos-cord-1 -u ubuntu -m command \
-#    -a "docker-compose build chdir=cord"
-
-#- name: Start ONOS
-#  when: "{{ head_vm_list | map(attribute='name') | list | intersect(['onos-cord-1']) | list | length }}"
-#  command: ansible onos-cord-1:onos-fabric-1 -u ubuntu -m command \
-#    -a "docker-compose up -d chdir=cord"
-
-- name: Wait for test client to complete installation
-  when: test_client_install is defined and test_client_install
-  async_status: jid={{ test_client_playbook.ansible_job_id }}
-  register: test_client_playbook_result
-  until: test_client_playbook_result.finished
-  delay: 10
-  retries: 120
-
diff --git a/roles/exampleservice-onboard/defaults/main.yml b/roles/exampleservice-onboard/defaults/main.yml
deleted file mode 100644
index 463326e..0000000
--- a/roles/exampleservice-onboard/defaults/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-# exampleservice-onboard/defaults/main.yml
-
-cord_dir: "{{ ansible_user_dir + '/cord' }}"
-cord_profile_dir: "{{ ansible_user_dir + '/cord_profile' }}"
-
-xos_bootstrap_ui_port: 9001
-
diff --git a/roles/exampleservice-onboard/tasks/main.yml b/roles/exampleservice-onboard/tasks/main.yml
deleted file mode 100644
index 44f9e90..0000000
--- a/roles/exampleservice-onboard/tasks/main.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-# exampleservice-onboard/tasks/main.yml
-
-- name: Disable onboarding
-  command: "python /opt/xos/tosca/run.py {{ xos_admin_user }} /opt/cord_profile/disable-onboarding.yaml"
-  tags:
-    - skip_ansible_lint # TOSCA loading should be idempotent
-
-- name: Have XOS container mount exampleservice volume
-  command: "python /opt/xos/tosca/run.py {{ xos_admin_user }} /opt/cord_profile/xos-exampleservice.yaml"
-  tags:
-    - skip_ansible_lint # TOSCA loading should be idempotent
-
-- name: Onboard exampleservice
-  command: "python /opt/xos/tosca/run.py {{ xos_admin_user }} /opt/cord_profile/exampleservice-onboard.yaml"
-  tags:
-    - skip_ansible_lint # TOSCA loading should be idempotent
-
-- name: Enable onboarding
-  command: "python /opt/xos/tosca/run.py {{ xos_admin_user }} /opt/cord_profile/enable-onboarding.yaml"
-  tags:
-    - skip_ansible_lint # TOSCA loading should be idempotent
-
-- name: Wait for exampleservice to be onboarded
-  uri:
-    url: "http://localhost:{{ xos_bootstrap_ui_port }}/api/utility/onboarding/services/exampleservice/ready/"
-    method: GET
-    return_content: yes
-  register: xos_onboard_status
-  until: '"true" in xos_onboard_status.content'
-  retries: 60
-  delay: 10
-
-- name: Wait for XOS to be onboarded after exampleservice onboarding
-  uri:
-    url: "http://localhost:{{ xos_bootstrap_ui_port }}/api/utility/onboarding/xos/ready/"
-    method: GET
-    return_content: yes
-  register: xos_onboard_status
-  until: '"true" in xos_onboard_status.content'
-  retries: 60
-  delay: 10
-
diff --git a/roles/onos-vm-install/defaults/main.yml b/roles/onos-vm-install/defaults/main.yml
deleted file mode 100644
index 4516334..0000000
--- a/roles/onos-vm-install/defaults/main.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# onos-vm-install/defaults/main.yml
-
-trust_store_pw: 222222
-
-# ONOS 1.7 not tagged yet, but latest is 1.7
-onos_docker_image: "onosproject/onos:latest"
-
-
diff --git a/roles/onos-vm-install/files/fabric-docker-compose.yml b/roles/onos-vm-install/files/fabric-docker-compose.yml
deleted file mode 100644
index a7713bf..0000000
--- a/roles/onos-vm-install/files/fabric-docker-compose.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-# Basic docker-compose file for ONOS
-version: '2'
-
-services:
-
-   xos-onos:
-      image: onosproject/onos:latest
-      expose:
-      - "6653"
-      - "8101"
-      - "8181"
-      - "9876"
-      network_mode: host
-
diff --git a/roles/onos-vm-install/files/onos-docker-compose.yml b/roles/onos-vm-install/files/onos-docker-compose.yml
deleted file mode 100644
index c57afcb..0000000
--- a/roles/onos-vm-install/files/onos-docker-compose.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# ONOS with XOS features for docker-compose
-version: '2'
-
-services:
-
-   xos-onos:
-      build:
-       context: .
-       dockerfile: Dockerfile.xos-onos
-      image: xos/onos
-      expose:
-      - "6653"
-      - "8101"
-      - "8181"
-      - "9876"
-      network_mode: host
-      volumes:
-      - ./node_key:/root/node_key:ro
diff --git a/roles/onos-vm-install/files/onos-setup-playbook.yml b/roles/onos-vm-install/files/onos-setup-playbook.yml
deleted file mode 100644
index 56bf06e..0000000
--- a/roles/onos-vm-install/files/onos-setup-playbook.yml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-# Common ONOS setup
-- hosts: onos-cord-1:onos-fabric-1
-  remote_user: ubuntu
-
-  tasks:
-    - name: Include configuration vars
-      include_vars: onos-setup-vars.yml
-
-    - name: Create CORD directory
-      file:
-        path: "{{ ansible_user_dir }}/cord"
-        state: directory
-
-    - name: Pull docker image for ONOS
-      become: yes
-      command: "docker pull {{ onos_docker_image }}"
-      tags:
-        - skip_ansible_lint # Should replace with http://docs.ansible.com/ansible/docker_module.html, when replacements are stable
-
-# Setup specific for onos-cord VM
-- hosts: onos-cord-1
-  remote_user: ubuntu
-
-  tasks:
-    - name: Copy over SSH key
-      copy:
-        src: "{{ node_private_key }}"
-        dest: "{{ ansible_user_dir }}/cord/node_key"
-        owner: "{{ ansible_user_id }}"
-        mode: 0600
-
-    - name: Copy over files to build XOS variant of ONOS
-      copy:
-        src: "~/{{ item }}"
-        dest: "{{ ansible_user_dir }}/cord/{{ item }}"
-      with_items:
-       - Dockerfile.xos-onos
-       - onos-service
-
-    - name: Copy over & rename docker-compose file
-      copy:
-        src: "~/onos-docker-compose.yml"
-        dest: "{{ ansible_user_dir }}/cord/docker-compose.yml"
-
-- hosts: onos-fabric-1
-  remote_user: ubuntu
-
-  tasks:
-    - name: Copy over & rename docker-compose file
-      copy:
-        src: "~/fabric-docker-compose.yml"
-        dest: "{{ ansible_user_dir }}/cord/docker-compose.yml"
diff --git a/roles/onos-vm-install/tasks/main.yml b/roles/onos-vm-install/tasks/main.yml
deleted file mode 100644
index 1f2eedc..0000000
--- a/roles/onos-vm-install/tasks/main.yml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-# onos-vm-install/tasks/main.yml
-#
-# Install ONOS on a sub vm by calling ansible
-
-- name: Create templated ONOS files
-  template:
-    src: "{{ item }}.j2"
-    dest: "{{ ansible_user_dir }}/{{ item }}"
-  with_items:
-    - onos-setup-vars.yml
-    - Dockerfile.xos-onos
-    - onos-service
-
-- name: Copy over ONOS playbook and other files
-  copy:
-    src: "{{ item }}"
-    dest: "{{ ansible_user_dir }}/{{ item }}"
-  with_items:
-    - onos-setup-playbook.yml
-    - onos-docker-compose.yml
-    - fabric-docker-compose.yml
-
-- name: Run the ONOS ansible playbook
-  command: ansible-playbook {{ ansible_user_dir }}/onos-setup-playbook.yml
-  async: 1800
-  poll: 0
-  register: onos_setup_playbook
-  tags:
-    - skip_ansible_lint # running a sub-job
-
diff --git a/roles/onos-vm-install/templates/Dockerfile.xos-onos.j2 b/roles/onos-vm-install/templates/Dockerfile.xos-onos.j2
deleted file mode 100644
index a9973be..0000000
--- a/roles/onos-vm-install/templates/Dockerfile.xos-onos.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-# ONOS dockerfile with XOS/CORD additions
-
-FROM {{ onos_docker_image }}
-MAINTAINER Zack Williams <zdw@cs.arizona.edu>
-
-# Add SSL certs
-COPY xos-certs.crt /usr/local/share/ca-certificates/xos-certs.crt
-RUN update-ca-certificates
-
-# Create Java KeyStore from certs
-RUN openssl x509 -in /usr/local/share/ca-certificates/xos-certs.crt \
-      -outform der -out /usr/local/share/ca-certificates/xos-certs.der && \
-    keytool -import -noprompt -storepass {{ trust_store_pw }} -alias xos-certs \
-      -file /usr/local/share/ca-certificates/xos-certs.der \
-      -keystore /usr/local/share/ca-certificates/xos-certs.jks
-
-# Updated onos-service to use the jks
-COPY onos-service /root/onos/bin/onos-service
-RUN chmod 755 /root/onos/bin/onos-service
-
diff --git a/roles/onos-vm-install/templates/onos-service.j2 b/roles/onos-vm-install/templates/onos-service.j2
deleted file mode 100644
index 7eef6f5..0000000
--- a/roles/onos-vm-install/templates/onos-service.j2
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/bin/bash
-# -----------------------------------------------------------------------------
-# Starts ONOS Apache Karaf container
-# -----------------------------------------------------------------------------
-
-# uncomment the following line for performance testing
-#export JAVA_OPTS="${JAVA_OPTS:--Xms8G -Xmx8G -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode -XX:+PrintGCDetails -XX:+PrintGCTimeStamps}"
-
-# uncomment the following line for Netty TLS encryption
-# Do modify the keystore location/password and truststore location/password accordingly
-#export JAVA_OPTS="${JAVA_OPTS:--DenableNettyTLS=true -Djavax.net.ssl.keyStore=/home/ubuntu/onos.jks -Djavax.net.ssl.keyStorePassword=222222 -Djavax.net.ssl.trustStore=/home/ubuntu/onos.jks -Djavax.net.ssl.trustStorePassword=222222}"
-
-export JAVA_OPTS="-Djavax.net.ssl.trustStore=/usr/local/share/ca-certificates/xos-certs.jks -Djavax.net.ssl.trustStorePassword={{ trust_store_pw }}" 
-
-set -e  # exit on error
-set -u  # exit on undefined variable
-
-# If ONOS_HOME is set, respect its value.
-# If ONOS_HOME is not set (e.g. in the init or service environment),
-# set it based on this script's path.
-ONOS_HOME=${ONOS_HOME:-$(cd $(dirname $0)/.. >/dev/null 2>&1 && pwd)}
-KARAF_ARGS=
-SYS_APPS=drivers
-ONOS_APPS=${ONOS_APPS:-}  # Empty means don't activate any new apps
-
-cd $ONOS_HOME
-
-# Parse out arguments destinted for karaf invocation v. arguments that
-# will be processed in line
-while [ $# -gt 0 ]; do
-  case $1 in
-    apps-clean)
-      # Deactivate all applications
-      find ${ONOS_HOME}/apps -name "active" -exec rm \{\} \;
-      ;;
-    *)
-      KARAF_ARGS+=" $1"
-      ;;
-  esac
-  shift
-done
-
-# Activate the system required applications (SYS_APPS) as well as any
-# specified applications in the var ONOS_APPS
-for app in ${SYS_APPS//,/ } ${ONOS_APPS//,/ }; do
-  if [[ "$app" =~ \. ]]; then
-    touch ${ONOS_HOME}/apps/$app/active
-  else
-    touch ${ONOS_HOME}/apps/org.onosproject.$app/active
-  fi
-done
-
-exec ${ONOS_HOME}/apache-karaf-3.0.5/bin/karaf $KARAF_ARGS
diff --git a/roles/onos-vm-install/templates/onos-setup-vars.yml.j2 b/roles/onos-vm-install/templates/onos-setup-vars.yml.j2
deleted file mode 100644
index 0d870f2..0000000
--- a/roles/onos-vm-install/templates/onos-setup-vars.yml.j2
+++ /dev/null
@@ -1,7 +0,0 @@
----
-# onos-vm-install/templates/onos-setup-vars.yml.j2
-
-node_private_key: "{{ ansible_user_dir }}/node_key"
-
-onos_docker_image: "{{ onos_docker_image }}"
-
diff --git a/roles/xos-bootstrap/defaults/main.yml b/roles/xos-bootstrap/defaults/main.yml
deleted file mode 100644
index ada7671..0000000
--- a/roles/xos-bootstrap/defaults/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-# xos-bootstrap/defaults/main.yml
-
-cord_profile_dir: "{{ ansible_user_dir + '/cord_profile' }}"
-
-xos_docker_networks:
-  - "xos"
-
diff --git a/roles/xos-bootstrap/tasks/main.yml b/roles/xos-bootstrap/tasks/main.yml
deleted file mode 100644
index d962165..0000000
--- a/roles/xos-bootstrap/tasks/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# xos-bootstrap/tasks/main.yml
-
-- name: Create docker networks
-  docker_network:
-    name: "{{ item }}"
-  with_items: "{{ xos_docker_networks }}"
-
-- name: Start XOS bootstrap containers
-  docker_service:
-    project_name: "{{ cord_profile | regex_replace('\\W','') }}bs"
-    project_src: "{{ cord_profile_dir }}"
-    files: "xos-bootstrap-docker-compose.yaml"
-  register: xos_bootstrap_out
diff --git a/roles/xos-build/defaults/main.yml b/roles/xos-build/defaults/main.yml
deleted file mode 100644
index 4467e45..0000000
--- a/roles/xos-build/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-
-xos_repo_dir: "{{ playbook_dir }}/../../orchestration/xos/containers/xos"
diff --git a/roles/xos-build/tasks/main.yml b/roles/xos-build/tasks/main.yml
deleted file mode 100644
index 847a4e7..0000000
--- a/roles/xos-build/tasks/main.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-# tasks for xos-build role
-
-- name: Check to see if orchestration/xos directory exists
-  local_action: stat path={{ xos_repo_dir }}
-  register: xosdir
-
-- name: build XOS base container image
-  local_action: shell cd {{ xos_repo_dir }} && make base
-  when:
-      xosdir.stat.exists == True
-
-- name: tag xos base container image
-  local_action: command docker tag xosproject/xos-base {{ deploy_docker_registry }}/xosproject/xos-base:{{ deploy_docker_tag }}
-  when:
-      xosdir.stat.exists == True
-
-- name: publish XOS base container image to local repo
-  local_action: shell cd {{ xos_repo_dir }} && docker push {{ deploy_docker_registry }}/xosproject/xos-base:{{ deploy_docker_tag }}
-  when:
-      xosdir.stat.exists == True
diff --git a/roles/xos-container-test-onboarding/tasks/main.yml b/roles/xos-container-test-onboarding/tasks/main.yml
deleted file mode 100644
index 456dd90..0000000
--- a/roles/xos-container-test-onboarding/tasks/main.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-#xos-onboarding-sync tests
-
-- name: Check XOS Onboarding Process
-  shell: ps -f | grep "python onboarding-synchronizer.py -C /opt/xos/synchronizers/onboarding/onboarding_synchronizer_config"
-  register: result
-  until: result | success
-  retries: 10
-  delay: 5
-  tags:
-    - skip_ansible_lint # running a sub job
-
-- name: Get Volume Mounts on XOS Onboarding Container
-  shell: mount -l #| grep xos_services
-  register: volumes
-  tags:
-    - skip_ansible_lint # running a sub job
-
-- name: Validate Volume Mounts on XOS Onboarding Container
-  assert:
-    that: "'/opt/xos_services/{{ item.name }}' in volumes.stdout|lower"
-  with_items: "{{ xos_services }}"
-  when:
-    - "'{{ item.name }}' != 'vnodlocal'"
-    - "'{{ item.name }}' != 'volt'"
\ No newline at end of file
diff --git a/roles/xos-container-test-postgres/tasks/main.yml b/roles/xos-container-test-postgres/tasks/main.yml
deleted file mode 100644
index f024261..0000000
--- a/roles/xos-container-test-postgres/tasks/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-#xos-postgres tests
-
-- name: Check PSQL Service
-  shell: service postgresql status | grep "{{ item }}"
-  register: result
-  until: result | success
-  retries: 10
-  delay: 5
-  with_items:
-    - 9.3/main
-    - port 5432
-    - online
-  tags:
-    - skip_ansible_lint # running a sub job
diff --git a/roles/xos-container-test-volt/tasks/main.yml b/roles/xos-container-test-volt/tasks/main.yml
deleted file mode 100644
index 1d9c672..0000000
--- a/roles/xos-container-test-volt/tasks/main.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-#xos-volt-sync tests
-
-- name: Check XOS VOLT Synchronizer Process
-  shell: ps -f | grep "python volt-synchronizer.py -C /opt/xos/synchronizers/volt/volt_synchronizer_config"
-  register: result
-  until: result | success
-  retries: 10
-  delay: 5
-  tags:
-    - skip_ansible_lint # running a sub job
-
-- name: Get Volume Mounts on XOS VOLT Synchronizer Container
-  shell: mount -l | grep xos_services
-  register: volumes
-  tags:
-    - skip_ansible_lint # running a sub job
-
-- name: Validate Volume Mounts on XOS VOLT Synchronizer Container
-  assert:
-    that: "'/opt/xos_services/{{ item.name }}' in volumes.stdout|lower"
-  with_items: "{{ xos_services }}"
-  when:
-    - "'{{ item.name }}' != 'vnodlocal'"
-    - "'{{ item.name }}' != 'volt'"
\ No newline at end of file
diff --git a/roles/xos-head-start/tasks/main.yml b/roles/xos-head-start/tasks/main.yml
deleted file mode 100644
index 09d0e1a..0000000
--- a/roles/xos-head-start/tasks/main.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# xos-head-start/tasks/main.yml
-
-- name: Run service-profile make targets
-  make:
-    chdir: "{{ service_profile_repo_dest }}/{{ xos_configuration }}"
-    target: "{{ item }}"
-  with_items: "{{ xos_config_targets }}"
-
diff --git a/roles/xos-onboarding/defaults/main.yml b/roles/xos-onboarding/defaults/main.yml
deleted file mode 100644
index fa6ce10..0000000
--- a/roles/xos-onboarding/defaults/main.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-# xos-service-onboard/defaults/main.yml
-
-cord_dir: "{{ ansible_user_dir + '/cord' }}"
-
-xos_bootstrap_ui_port: 9001
-
-xos_libraries:
-  - "ng-xos-lib"
-
-xos_components:
-  - name: gui-extensions-store
-    recipe_filename: onboard-gui-extensions-store.yaml
-  - name: Chameleon
-    recipe_filename: onboard-chameleon.yaml
-  - name: xos-ws
-    recipe_filename: onboard-xos-ws.yaml
-  - name: xos-gui
-    recipe_filename: onboard-xos-gui.yaml
-
-xos_services: []
-
diff --git a/roles/xos-onboarding/tasks/main.yml b/roles/xos-onboarding/tasks/main.yml
deleted file mode 100644
index f1980fa..0000000
--- a/roles/xos-onboarding/tasks/main.yml
+++ /dev/null
@@ -1,92 +0,0 @@
----
-# xos-onboarding/tasks/main.yml
-
-- name: Wait for XOS to be ready
-  wait_for:
-    host: localhost
-    port: "{{ xos_bootstrap_ui_port }}"
-    timeout: 240
-
-- name: Bootstrap XOS database - create site, deployment, admin user
-  command: "python /opt/xos/tosca/run.py none /opt/cord_profile/{{ item }}"
-  with_items:
-    - "fixtures.yaml"
-    - "deployment.yaml"
-  tags:
-    - skip_ansible_lint # TOSCA loading should be idempotent
-
-- name: Configure XOS with xos.yaml TOSCA
-  command: "python /opt/xos/tosca/run.py {{ xos_admin_user }} /opt/cord_profile/xos.yaml"
-  tags:
-    - skip_ansible_lint # TOSCA loading should be idempotent
-
-- name: Wait for XOS to be onboarded
-  uri:
-    url: "http://localhost:{{ xos_bootstrap_ui_port }}/api/utility/onboarding/xos/ready/"
-    method: GET
-    return_content: yes
-  register: xos_onboard_status
-  until: '"true" in xos_onboard_status.content'
-  retries: 60
-  delay: 10
-
-- name: Disable onboarding
-  command: "python /opt/xos/tosca/run.py {{ xos_admin_user }} /opt/cord_profile/disable-onboarding.yaml"
-  tags:
-    - skip_ansible_lint # TOSCA loading should be idempotent
-
-- name: Onboard libraries
-  command: "python /opt/xos/tosca/run.py {{ xos_admin_user }} /opt/xos_libraries/{{ item }}/{{ item }}-onboard.yaml"
-  with_items: "{{ xos_libraries }}"
-  tags:
-    - skip_ansible_lint # TOSCA loading should be idempotent
-
-- name: Onboard services
-  command: "python /opt/xos/tosca/run.py {{ xos_admin_user }} /opt/xos_services/{{ item.path | basename }}/xos/{{ item.name }}-{{ item.operation | default('onboard') }}.yaml"
-  with_items: "{{ xos_services }}"
-  tags:
-    - skip_ansible_lint # TOSCA loading should be idempotent
-
-- name: Onboard components
-  command: "python /opt/xos/tosca/run.py {{ xos_admin_user }} /opt/cord_profile/{{ item.recipe_filename }}"
-  with_items: "{{ xos_components }}"
-  tags:
-    - skip_ansible_lint # TOSCA loading should be idempotent
-
-- name: Enable onboarding
-  command: "python /opt/xos/tosca/run.py {{ xos_admin_user }} /opt/cord_profile/enable-onboarding.yaml"
-  tags:
-    - skip_ansible_lint # TOSCA loading should be idempotent
-
-- name: Wait for libraries to be onboarded
-  uri:
-    url: "http://localhost:{{ xos_bootstrap_ui_port }}/api/utility/onboarding/services/{{ item }}/ready/"
-    method: GET
-    return_content: yes
-  register: xos_onboard_status
-  until: '"true" in xos_onboard_status.content'
-  retries: 60
-  delay: 10
-  with_items: "{{ xos_libraries }}"
-
-- name: Wait for services to be onboarded
-  uri:
-    url: "http://localhost:{{ xos_bootstrap_ui_port }}/api/utility/onboarding/services/{{ item.name }}/ready/"
-    method: GET
-    return_content: yes
-  register: xos_onboard_status
-  until: '"true" in xos_onboard_status.content'
-  retries: 60
-  delay: 10
-  with_items: "{{ xos_services }}"
-
-- name: Wait for XOS to be onboarded after service onboarding
-  uri:
-    url: "http://localhost:{{ xos_bootstrap_ui_port }}/api/utility/onboarding/xos/ready/"
-    method: GET
-    return_content: yes
-  register: xos_onboard_status
-  until: '"true" in xos_onboard_status.content'
-  retries: 60
-  delay: 10
-
diff --git a/roles/xos-uninstall/defaults/main.yml b/roles/xos-uninstall/defaults/main.yml
deleted file mode 100644
index 74d34c1..0000000
--- a/roles/xos-uninstall/defaults/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-# default variables for xos-uninstall role
-
-xos_repo_dest: "{{ ansible_user_dir }}/xos"
-service_profile_repo_dest: "{{ ansible_user_dir }}/service-profile"
-
-xos_libraries_dest: "{{ ansible_user_dir }}/xos_libraries"
-xos_services_dest: "{{ ansible_user_dir }}/xos_services"
diff --git a/roles/xos-uninstall/tasks/main.yml b/roles/xos-uninstall/tasks/main.yml
deleted file mode 100644
index c29c2fc..0000000
--- a/roles/xos-uninstall/tasks/main.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-# xos-head-start/tasks/main.yml
-
-- name: Check to see if service-profile exists
-  stat: path="{{ service_profile_repo_dest }}"
-  register: service_profile_dir
-
-- name: Run make cleanup
-  make:
-    chdir: "{{ service_profile_repo_dest }}/{{ xos_configuration }}"
-    target: "cleanup"
-  when: service_profile_dir.stat.exists == True
-
-- name: Remove service-profile directory
-  file: path="{{ service_profile_repo_dest }}" state=absent
-
-- name: Remove xos directory
-  file: path="{{ xos_repo_dest }}" state=absent
-
-- name: Remove xos services
-  file: path="{{ xos_services_dest }}" state=absent
-
-- name: Remove xos libraries
-  file: path="{{ xos_libraries_dest }}" state=absent
-