VOL-241 VOL-239 VOL-257 VOL-258 This update solves multiple problems
and cleans up the ansible tree somewhat.
VOL-241 Removes the error messages during dependent software installation
VOL-239 Removes the dependency on apt-get -y -f
VOL-257 Adds a config file to specify the docker containers required for
        a production deployment of vOLT-HA
VOL-258 Adds error checking after the voltha VM executes the vOLT-HA
        build to stop on errors rather than continuing and having the
        installer fail much later when docker containers are missing.
- General cleanup of the ansible tree.
  - Removal of ansible centos conditionals since they're not required.
  - Removal of the check for puppet and chef, not required.
- Adds a cleanup script that will remove temporary files added
  during an install so they don't get submitted to the repo by
  accident.

Note there are lots of commented out lines in the ansible scripts.
These will be removed in a subsequent update.

Change-Id: I92da352408dbfed1a05d13a1e10003f169be6a66
diff --git a/install/BuildVoltha.sh b/install/BuildVoltha.sh
index 7a08577..b37a998 100755
--- a/install/BuildVoltha.sh
+++ b/install/BuildVoltha.sh
@@ -47,4 +47,20 @@
 
 
 # Run all the build commands
-ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .vagrant/machines/voltha${uId}/libvirt/private_key vagrant@$ipAddr "cd /cord/incubator/voltha && . env.sh && make fetch && make build"
+ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .vagrant/machines/voltha${uId}/libvirt/private_key vagrant@$ipAddr "cd /cord/incubator/voltha && . env.sh && make fetch && make production" | tee voltha_build.tmp
+
+rtrn=$#
+
+if [ $rtrn -ne 0 ]; then
+	rm -f voltha_build.tmp
+	exit 1
+fi
+
+egrep 'Makefile:[0-9]+: recipe for target .* failed' voltha_build.tmp
+
+rtrn=$#
+rm -f voltha_build.tmp
+if [ $rtrn -eq 0 ]; then
+	# An error occured, notify the caller
+	exit 1
+fi
diff --git a/install/CreateInstaller.sh b/install/CreateInstaller.sh
index 8816db6..7f06c09 100755
--- a/install/CreateInstaller.sh
+++ b/install/CreateInstaller.sh
@@ -2,6 +2,7 @@
 
 
 iVmName="vInstaller"
+vVmName="voltha_voltha"
 baseImage="Ubuntu1604LTS"
 iVmNetwork="vagrant-libvirt"
 installerArchive="installer.tar.bz2"
@@ -73,6 +74,7 @@
 	sed -i -e '/^#/!d' install.cfg
 	# Set the insecure registry configuration based on the installer hostname
 	echo -e "${lBlue}Set up the inescure registry hostname ${lCyan}vinstall${uId}${NC}"
+	sed -i -e '/docker_push_registry/s/.*/docker_push_registry: "vinstall:5000"/' ansible/group_vars/all
 	echo '{' > ansible/roles/voltha/templates/daemon.json
 	echo '"insecure-registries" : ["vinstall:5000"]' >> ansible/roles/voltha/templates/daemon.json
 	echo '}' >> ansible/roles/voltha/templates/daemon.json
@@ -184,12 +186,32 @@
 if [ -z "$vVM" ]; then
 	if [ $# -eq 1 -a "$1" == "test" ]; then
 		./BuildVoltha.sh $1
+		rtrn=$#
 	else
 		# Default to installer mode 
 		./BuildVoltha.sh install
+		rtrn=$#
+	fi
+	if [ $rtrn -ne 0 ]; then
+		echo -e "${red}Voltha build failed!! ${yellow}Please review the log and correct${lBlue} is running${NC}"
+		exit 1
 	fi
 fi
 
+# Extract all the image names and tags from the running voltha VM
+# No Don't do this, it's too error prone if the voltha VM is not 
+# built correctly, going with a static list for now.
+#echo -e "${lBlue}Extracting the docker image list from the voltha VM${NC}"
+#volIpAddr=`virsh domifaddr $vVmName${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
+#ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ../.vagrant/machines/voltha${uId}/libvirt/private_key vagrant@$volIpAddr "docker image ls" > images.tmp
+#cat images.tmp | grep -v 5000 | tail -n +2 | awk '{printf("  - %s:%s\n", $1, $2)}' > image-list.cfg
+#rm -f images.tmp
+#sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all
+#echo "voltha_containers:" >> ansible/group_vars/all
+echo -e "${lBlue}Set up the docker image list from ${yellow}containers.cfg${NC}"
+sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all
+cat containers.cfg >> ansible/group_vars/all
+
 # Install python which is required for ansible
 echo -e "${lBlue}Installing python${NC}"
 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get update 
diff --git a/install/ansible/group_vars/all b/install/ansible/group_vars/all
index 28cd368..d870fc1 100644
--- a/install/ansible/group_vars/all
+++ b/install/ansible/group_vars/all
@@ -10,29 +10,3 @@
 target_voltha_home: /home/voltha
 docker_daemon_json: daemon.json
 docker_daemon_json_dest: /etc/docker
-voltha_containers:
-  - voltha/nginx
-  - voltha/grafana
-  - voltha/portainer
-  - cord/vcli
-  - cord/dashd
-  - cord/config-push
-  - cord/tester
-  - cord/onos
-  - cord/shovel
-  - cord/netconf
-  - cord/podder
-  - cord/ofagent
-  - cord/chameleon
-  - cord/voltha
-  - cord/voltha-base
-  - nginx
-  - consul
-  - fluent/fluentd
-  - portainer/portainer
-  - wurstmeister/kafka
-  - wurstmeister/zookeeper
-  - kamon/grafana_graphite
-  - gliderlabs/registrator
-  - centurylink/ca-certs
-  - zookeeper
diff --git a/install/ansible/roles/cluster-host/tasks/cluster-host.yml b/install/ansible/roles/cluster-host/tasks/cluster-host.yml
deleted file mode 100644
index d1648f5..0000000
--- a/install/ansible/roles/cluster-host/tasks/cluster-host.yml
+++ /dev/null
@@ -1,103 +0,0 @@
-# Note: When the target == "cluster" the installer
-# is running to install voltha in the cluster hosts.
-# Whe the target == "installer" the installer is being
-# created.
-- name: A .ssh directory for the voltha user exists
-  file:
-    #path: "{{ ansible_env['HOME'] }}/.ssh"
-    path: "/home/voltha/.ssh"
-    state: directory
-    owner: voltha
-    group: voltha
-  tags: [cluster_host]
-
-- name: known_hosts file is absent for the voltha user
-  file:
-    path: "/home/voltha/.ssh/known_hosts"
-    state: absent
-  tags: [cluster_host]
-
-- name: Known host checking is disabled
-  copy:
-    src: files/ssh_config
-    dest: "/home/voltha/.ssh/config"
-    owner: voltha
-    group: voltha
-    mode: 0600
-  tags: [cluster_host]
-
-- name: Cluster host keys are propagated to all hosts in the cluster
-  copy:
-    src: files/.keys
-    dest: "/home/voltha"
-    owner: voltha
-    group: voltha
-    mode: 0600
-  tags: [cluster_host]
-
-#- name: Required configuration directories are copied
-#  copy:
-#    src: "/home/vinstall/{{ item }}"
-#    dest: "{{ target_voltha_home }}"
-#    owner: voltha
-#    group: voltha
-#  with_items:
-#    - docker-py
-#    - netifaces
-#    - deb_files
-#  when: target == "cluster"
-#  tags: [cluster_host]
-
-- name: Required configuration directories are copied
-  synchronize:
-    src: "/home/vinstall/{{ item }}"
-    dest: "{{ target_voltha_home }}"
-    archive: no
-    owner: no
-    perms: no
-    recursive: yes
-    links: yes
-  with_items:
-    - docker-py
-    - netifaces
-    - deb_files
-  tags: [cluster-host]
-
-- name: apt lists are up-to-date
-  copy:
-    src: "/var/lib/apt/lists"
-    dest: "/var/lib/apt"
-  tags: [cluster_host]
-
-- name: Dependent software is installed
-  command: dpkg -i "{{ target_voltha_home }}/deb_files/{{ item }}"
-  with_items: "{{ deb_files }}"
-  when: target == "cluster"
-  ignore_errors: true
-  when: target == "cluster"
-  tags: [cluster_host]
-
-- name: Dependent software is initialized
-  command: apt-get -f install
-  when: target == "cluster"
-  tags: [cluster_host]
-
-- name: Python packages are installed
-  command: pip install {{ item }} --no-index --find-links "file://{{ target_voltha_home }}/{{ item }}"
-  with_items:
-    - docker-py
-    - netifaces
-  when: target == "cluster"
-  tags: [cluster_host]
-
-- name: Configuration directories are deleted
-  file:
-    path: "{{ target_voltha_home }}/{{ item }}"
-    state: absent
-  with_items:
-    - docker-py
-    - netifaces
-    - deb_files
-  when: target == "cluster"
-  tags: [cluster_host]
-
diff --git a/install/ansible/roles/cluster-host/tasks/main.yml b/install/ansible/roles/cluster-host/tasks/main.yml
index 41acd90..76d4840 100644
--- a/install/ansible/roles/cluster-host/tasks/main.yml
+++ b/install/ansible/roles/cluster-host/tasks/main.yml
@@ -1,2 +1,101 @@
-- include: cluster-host.yml
-  when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
+# Note: When the target == "cluster" the installer
+# is running to install voltha in the cluster hosts.
+# Whe the target == "installer" the installer is being
+# created.
+- name: A .ssh directory for the voltha user exists
+  file:
+    #path: "{{ ansible_env['HOME'] }}/.ssh"
+    path: "/home/voltha/.ssh"
+    state: directory
+    owner: voltha
+    group: voltha
+  tags: [cluster_host]
+
+- name: known_hosts file is absent for the voltha user
+  file:
+    path: "/home/voltha/.ssh/known_hosts"
+    state: absent
+  tags: [cluster_host]
+
+- name: Known host checking is disabled
+  copy:
+    src: files/ssh_config
+    dest: "/home/voltha/.ssh/config"
+    owner: voltha
+    group: voltha
+    mode: 0600
+  tags: [cluster_host]
+
+- name: Cluster host keys are propagated to all hosts in the cluster
+  copy:
+    src: files/.keys
+    dest: "/home/voltha"
+    owner: voltha
+    group: voltha
+    mode: 0600
+  tags: [cluster_host]
+
+#- name: Required configuration directories are copied
+#  copy:
+#    src: "/home/vinstall/{{ item }}"
+#    dest: "{{ target_voltha_home }}"
+#    owner: voltha
+#    group: voltha
+#  with_items:
+#    - docker-py
+#    - netifaces
+#    - deb_files
+#  when: target == "cluster"
+#  tags: [cluster_host]
+
+- name: Required configuration directories are copied
+  synchronize:
+    src: "/home/vinstall/{{ item }}"
+    dest: "{{ target_voltha_home }}"
+    archive: no
+    owner: no
+    perms: no
+    recursive: yes
+    links: yes
+  with_items:
+    - docker-py
+    - netifaces
+    - deb_files
+  tags: [cluster-host]
+
+- name: apt lists are up-to-date
+  copy:
+    src: "/var/lib/apt/lists"
+    dest: "/var/lib/apt"
+  tags: [cluster_host]
+
+- name: Dependent software is installed (this takes about 10 Min, DONT'T PANIC, go for coffee instead)
+  command: dpkg -R -i "{{ target_voltha_home }}/deb_files"
+#  ignore_errors: true
+  when: target == "cluster"
+  tags: [cluster_host]
+
+#- name: Dependent software is initialized
+#  command: apt-get -y -f install
+#  when: target == "cluster"
+#  tags: [cluster_host]
+
+- name: Python packages are installed
+  command: pip install {{ item }} --no-index --find-links "file://{{ target_voltha_home }}/{{ item }}"
+  with_items:
+    - docker-py
+    - netifaces
+  when: target == "cluster"
+  tags: [cluster_host]
+
+- name: Configuration directories are deleted
+  file:
+    path: "{{ target_voltha_home }}/{{ item }}"
+    state: absent
+  with_items:
+    - docker-py
+    - netifaces
+    - deb_files
+  when: target == "cluster"
+  tags: [cluster_host]
+
diff --git a/install/ansible/roles/common/tasks/main.yml b/install/ansible/roles/common/tasks/main.yml
index c3bb649..f5904dc 100644
--- a/install/ansible/roles/common/tasks/main.yml
+++ b/install/ansible/roles/common/tasks/main.yml
@@ -20,13 +20,13 @@
   when: target != "cluster"
   tags: [common]
 
-- name: Services are not running
-  service:
-    name: "{{ item }}"
-    state: stopped
-  ignore_errors: yes
-  with_items: "{{ obsolete_services }}"
-  tags: [common]
+#- name: Services are not running
+#  service:
+#    name: "{{ item }}"
+#    state: stopped
+#  ignore_errors: yes
+#  with_items: "{{ obsolete_services }}"
+#  tags: [common]
 
 - name: Ensure there is a .ssh directory for /root
   file:
diff --git a/install/ansible/roles/docker-registry/tasks/debian.yml b/install/ansible/roles/docker-registry/tasks/debian.yml
deleted file mode 100644
index 788401f..0000000
--- a/install/ansible/roles/docker-registry/tasks/debian.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-- name: The insecure docker registry is started
-  command: docker run --restart=always -d -p 5000:5000 --name registry registry:2
-  register: result
-  ignore_errors: true
-  tags: [docker]
diff --git a/install/ansible/roles/docker-registry/tasks/main.yml b/install/ansible/roles/docker-registry/tasks/main.yml
index 1495847..788401f 100644
--- a/install/ansible/roles/docker-registry/tasks/main.yml
+++ b/install/ansible/roles/docker-registry/tasks/main.yml
@@ -1,5 +1,5 @@
-- include: debian.yml
-  when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
-
-- include: centos.yml
-  when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux'
+- name: The insecure docker registry is started
+  command: docker run --restart=always -d -p 5000:5000 --name registry registry:2
+  register: result
+  ignore_errors: true
+  tags: [docker]
diff --git a/install/ansible/roles/docker/tasks/centos.yml b/install/ansible/roles/docker/tasks/centos.yml
deleted file mode 100644
index a8910d4..0000000
--- a/install/ansible/roles/docker/tasks/centos.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-- name: CentOS files are copied
-  copy:
-    src: "{{ item.src }}"
-    dest: "{{ item.dest }}"
-  with_items: centos_files
-  tags: [docker]
-
-- name: CentOS package is installed
-  yum:
-    name: docker-engine
-    state: present
-  tags: [docker]
-
-- name: CentOS Daemon is reloaded
-  command: systemctl daemon-reload
-  tags: [docker]
-
-- name: CentOS service is running
-  service:
-    name: docker
-    state: running
-  tags: [docker]
-
diff --git a/install/ansible/roles/docker/tasks/debian.yml b/install/ansible/roles/docker/tasks/debian.yml
deleted file mode 100644
index d9f3f37..0000000
--- a/install/ansible/roles/docker/tasks/debian.yml
+++ /dev/null
@@ -1,64 +0,0 @@
-- name: Debian add Docker repository and update apt cache
-  apt_repository:
-    repo: deb https://apt.dockerproject.org/repo ubuntu-{{ debian_version }} main
-    update_cache: yes
-    state: present
-  when: target == "installer"
-  tags: [docker]
-
-- name: Debian Docker is present
-  apt:
-    name: docker-engine
-    state: latest
-    force: yes
-  when: target == "installer"
-  tags: [docker]
-
-- name: Debian python-pip is present
-  apt:
-    name: python-pip
-    state: present
-  when: target == "installer"
-  tags: [docker]
-
-- name: Debian docker-py is present
-  pip:
-    name: docker-py
-    version: "{{ docker_py_version }}"
-    state: present
-  when: target == "installer"
-  tags: [docker]
-
-- name: netifaces pip package is present
-  pip:
-    name: netifaces
-    version: "{{ netifaces_version }}"
-    state: present
-  when: target == "installer"
-  tags: [docker]
-
-- name: Docker config files are present
-  template:
-    src: "{{ docker_cfg }}"
-    dest: "{{ docker_cfg_dest }}"
-  register: copy_result
-  tags: [docker]
-
-- name: Debian Daemon is reloaded
-  command: systemctl daemon-reload
-  when: copy_result|changed and is_systemd is defined
-  tags: [docker]
-
-- name: Sudo user is added to the docker group
-  user:
-    name: "{{ ansible_env['SUDO_USER'] }}"
-    group: docker
-  register: user_result
-  tags: [docker]
-
-- name: Debian Docker service is restarted
-  service:
-    name: docker
-    state: restarted
-  when: copy_result|changed or user_result|changed
-  tags: [docker]
diff --git a/install/ansible/roles/docker/tasks/main.yml b/install/ansible/roles/docker/tasks/main.yml
index 1495847..d9f3f37 100644
--- a/install/ansible/roles/docker/tasks/main.yml
+++ b/install/ansible/roles/docker/tasks/main.yml
@@ -1,5 +1,64 @@
-- include: debian.yml
-  when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
+- name: Debian add Docker repository and update apt cache
+  apt_repository:
+    repo: deb https://apt.dockerproject.org/repo ubuntu-{{ debian_version }} main
+    update_cache: yes
+    state: present
+  when: target == "installer"
+  tags: [docker]
 
-- include: centos.yml
-  when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux'
+- name: Debian Docker is present
+  apt:
+    name: docker-engine
+    state: latest
+    force: yes
+  when: target == "installer"
+  tags: [docker]
+
+- name: Debian python-pip is present
+  apt:
+    name: python-pip
+    state: present
+  when: target == "installer"
+  tags: [docker]
+
+- name: Debian docker-py is present
+  pip:
+    name: docker-py
+    version: "{{ docker_py_version }}"
+    state: present
+  when: target == "installer"
+  tags: [docker]
+
+- name: netifaces pip package is present
+  pip:
+    name: netifaces
+    version: "{{ netifaces_version }}"
+    state: present
+  when: target == "installer"
+  tags: [docker]
+
+- name: Docker config files are present
+  template:
+    src: "{{ docker_cfg }}"
+    dest: "{{ docker_cfg_dest }}"
+  register: copy_result
+  tags: [docker]
+
+- name: Debian Daemon is reloaded
+  command: systemctl daemon-reload
+  when: copy_result|changed and is_systemd is defined
+  tags: [docker]
+
+- name: Sudo user is added to the docker group
+  user:
+    name: "{{ ansible_env['SUDO_USER'] }}"
+    group: docker
+  register: user_result
+  tags: [docker]
+
+- name: Debian Docker service is restarted
+  service:
+    name: docker
+    state: restarted
+  when: copy_result|changed or user_result|changed
+  tags: [docker]
diff --git a/install/ansible/roles/docker/templates/docker.cfg b/install/ansible/roles/docker/templates/docker.cfg
index d59db12..a18aeb0 100644
--- a/install/ansible/roles/docker/templates/docker.cfg
+++ b/install/ansible/roles/docker/templates/docker.cfg
@@ -1 +1 @@
-DOCKER_OPTS="$DOCKER_OPTS --insecure-registry 192.168.121.91:5000 -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --registry-mirror=http://192.168.121.91:5001"
+DOCKER_OPTS="$DOCKER_OPTS --insecure-registry 192.168.121.92:5000 -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --registry-mirror=http://192.168.121.92:5001"
diff --git a/install/ansible/roles/installer/tasks/installer.yml b/install/ansible/roles/installer/tasks/installer.yml
deleted file mode 100644
index a958cb0..0000000
--- a/install/ansible/roles/installer/tasks/installer.yml
+++ /dev/null
@@ -1,94 +0,0 @@
-- name: Ansible repository is available
-  apt_repository:
-    repo: 'ppa:ansible/ansible'
-  tags: [installer]
-- name: Debian ansible is present
-  apt:
-    name: ansible
-    state: latest
-    force: yes
-  tags: [installer]
-#- name: Installer files and directories are copied
-#  copy:
-#    src: "{{ cord_home }}/incubator/voltha/{{ item }}"
-#    dest: /home/vinstall
-#    owner: vinstall
-#    group: vinstall
-#    follow: no
-#  with_items:
-#    - install/installer.sh
-#    - install/install.cfg
-#    - install/ansible
-#    - compose
-#    - nginx_config
-- name: Installer files and directories are copied
-  synchronize:
-    src: "{{ cord_home }}/incubator/voltha/{{ item }}"
-    dest: /home/vinstall
-    archive: no
-    owner: no
-    perms: no
-    recursive: yes
-    links: yes
-  with_items:
-    - install/installer.sh
-    - install/install.cfg
-    - install/ansible
-    - compose
-    - nginx_config
-  tags: [installer]
-- name: Installer directories are owned by vinstall
-  file:
-    path: /home/vinstall/{{ item }}
-    owner: vinstall
-    group: vinstall
-    recurse: yes
-    follow: no
-  with_items:
-    - ansible
-    - compose
-    - nginx_config
-  tags: [installer]
-- name: Installer files are owned by vinstall
-  file:
-    path: /home/vinstall/{{ item }}
-    owner: vinstall
-    group: vinstall
-    follow: no
-  with_items:
-    - installer.sh
-    - install.cfg
-  tags: [installer]
-
-- name: Determine if test mode is active
-  become: false
-  local_action: stat path="{{ cord_home }}/incubator/voltha/install/.test"
-  register: file
-  ignore_errors: true
-- name: Test mode file is copied
-  copy:
-    src: "{{ cord_home }}/incubator/voltha/install/.test"
-    dest: /home/vinstall
-  when: file.stat.exists
-- name: The installer is made executable
-  file:
-    path: /home/vinstall/installer.sh
-    mode: 0744
-  tags: [installer]
-- name: Python docker-py {{ docker_py_version }} package source is available
-  command: pip download -d /home/vinstall/docker-py "docker-py=={{ docker_py_version }}"
-  tags: [installer]
-- name: Python netifaces {{ netifaces_version }} package source is available
-  command: pip download -d /home/vinstall/netifaces "netifaces=={{ netifaces_version }}"
-  tags: [installer]
-- name: Deb file directory doesn't exist
-  file:
-    path: /home/vinstall/deb_files
-    state: absent
-  tags: [installer]
-- name: Deb files are saved.
-  command: cp -r /var/cache/apt/archives /home/vinstall
-  tags: [installer]
-- name: Deb file directory is renamed
-  command: mv /home/vinstall/archives /home/vinstall/deb_files
-  tags: [installer]
diff --git a/install/ansible/roles/installer/tasks/main.yml b/install/ansible/roles/installer/tasks/main.yml
index 005734b..57125df 100644
--- a/install/ansible/roles/installer/tasks/main.yml
+++ b/install/ansible/roles/installer/tasks/main.yml
@@ -1,2 +1,98 @@
-- include: installer.yml
-  when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
+- name: Ansible repository is available
+  apt_repository:
+    repo: 'ppa:ansible/ansible'
+  tags: [installer]
+- name: Debian ansible is present
+  apt:
+    name: ansible
+    state: latest
+    force: yes
+  tags: [installer]
+#- name: Installer files and directories are copied
+#  copy:
+#    src: "{{ cord_home }}/incubator/voltha/{{ item }}"
+#    dest: /home/vinstall
+#    owner: vinstall
+#    group: vinstall
+#    follow: no
+#  with_items:
+#    - install/installer.sh
+#    - install/install.cfg
+#    - install/ansible
+#    - compose
+#    - nginx_config
+- name: Installer files and directories are copied
+  synchronize:
+    src: "{{ cord_home }}/incubator/voltha/{{ item }}"
+    dest: /home/vinstall
+    archive: no
+    owner: no
+    perms: no
+    recursive: yes
+    links: yes
+  with_items:
+    - install/sort_packages.sh
+    - install/sort_packages.py
+    - install/installer.sh
+    - install/install.cfg
+    - install/ansible
+    - compose
+    - nginx_config
+  tags: [installer]
+- name: Installer directories are owned by vinstall
+  file:
+    path: /home/vinstall/{{ item }}
+    owner: vinstall
+    group: vinstall
+    recurse: yes
+    follow: no
+  with_items:
+    - ansible
+    - compose
+    - nginx_config
+  tags: [installer]
+- name: Installer files are owned by vinstall
+  file:
+    path: /home/vinstall/{{ item }}
+    owner: vinstall
+    group: vinstall
+    follow: no
+  with_items:
+    - sort_packages.sh
+    - sort_packages.py
+    - installer.sh
+    - install.cfg
+  tags: [installer]
+
+- name: Determine if test mode is active
+  become: false
+  local_action: stat path="{{ cord_home }}/incubator/voltha/install/.test"
+  register: file
+  ignore_errors: true
+- name: Test mode file is copied
+  copy:
+    src: "{{ cord_home }}/incubator/voltha/install/.test"
+    dest: /home/vinstall
+  when: file.stat.exists
+- name: The installer is made executable
+  file:
+    path: /home/vinstall/installer.sh
+    mode: 0744
+  tags: [installer]
+- name: Python docker-py {{ docker_py_version }} package source is available
+  command: pip download -d /home/vinstall/docker-py "docker-py=={{ docker_py_version }}"
+  tags: [installer]
+- name: Python netifaces {{ netifaces_version }} package source is available
+  command: pip download -d /home/vinstall/netifaces "netifaces=={{ netifaces_version }}"
+  tags: [installer]
+- name: Deb file directory doesn't exist
+  file:
+    path: /home/vinstall/deb_files
+    state: absent
+  tags: [installer]
+- name: Deb files are saved.
+  command: cp -r /var/cache/apt/archives /home/vinstall
+  tags: [installer]
+- name: Deb file directory is renamed
+  command: mv /home/vinstall/archives /home/vinstall/deb_files
+  tags: [installer]
diff --git a/install/ansible/roles/pull-images/tasks/main.yml b/install/ansible/roles/pull-images/tasks/main.yml
index dde3d78..9b2044d 100644
--- a/install/ansible/roles/pull-images/tasks/main.yml
+++ b/install/ansible/roles/pull-images/tasks/main.yml
@@ -1,2 +1,12 @@
-- include: pull.yml
-  when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
+- name: Docker containers for Voltha are pulled
+  command: docker pull {{ docker_registry }}/{{ item }}
+  with_items: "{{ voltha_containers }}"
+  tags: [pull]
+- name: Docker images are re-tagged to expected names
+  command: docker tag {{ docker_registry }}/{{ item }} {{ item }}
+  with_items: "{{ voltha_containers }}"
+  tags: [pull]
+- name: Old docker image tags are removed
+  command: docker rmi {{ docker_registry }}/{{ item }}
+  with_items: "{{ voltha_containers }}"
+  tags: [pull]
diff --git a/install/ansible/roles/pull-images/tasks/pull.yml b/install/ansible/roles/pull-images/tasks/pull.yml
deleted file mode 100644
index 9b2044d..0000000
--- a/install/ansible/roles/pull-images/tasks/pull.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-- name: Docker containers for Voltha are pulled
-  command: docker pull {{ docker_registry }}/{{ item }}
-  with_items: "{{ voltha_containers }}"
-  tags: [pull]
-- name: Docker images are re-tagged to expected names
-  command: docker tag {{ docker_registry }}/{{ item }} {{ item }}
-  with_items: "{{ voltha_containers }}"
-  tags: [pull]
-- name: Old docker image tags are removed
-  command: docker rmi {{ docker_registry }}/{{ item }}
-  with_items: "{{ voltha_containers }}"
-  tags: [pull]
diff --git a/install/ansible/roles/push-images/tasks/main.yml b/install/ansible/roles/push-images/tasks/main.yml
index 8c8d827..88dbc52 100644
--- a/install/ansible/roles/push-images/tasks/main.yml
+++ b/install/ansible/roles/push-images/tasks/main.yml
@@ -1,2 +1,12 @@
-- include: push.yml
-  when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
+- name: Docker images are re-tagged to registry for push
+  command: docker tag {{ item }} {{ docker_push_registry }}/{{ item }}
+  with_items: "{{ voltha_containers }}"
+  tags: [push]
+- name: Docker containers for Voltha are pushed
+  command: docker push {{ docker_push_registry }}/{{ item }}
+  with_items: "{{ voltha_containers }}"
+  tags: [push]
+- name: Temporary registry push tags are removed
+  command: docker rmi {{ docker_push_registry }}/{{ item }}
+  with_items: "{{ voltha_containers }}"
+  tags: [push]
diff --git a/install/ansible/roles/push-images/tasks/push.yml b/install/ansible/roles/push-images/tasks/push.yml
deleted file mode 100644
index 88dbc52..0000000
--- a/install/ansible/roles/push-images/tasks/push.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-- name: Docker images are re-tagged to registry for push
-  command: docker tag {{ item }} {{ docker_push_registry }}/{{ item }}
-  with_items: "{{ voltha_containers }}"
-  tags: [push]
-- name: Docker containers for Voltha are pushed
-  command: docker push {{ docker_push_registry }}/{{ item }}
-  with_items: "{{ voltha_containers }}"
-  tags: [push]
-- name: Temporary registry push tags are removed
-  command: docker rmi {{ docker_push_registry }}/{{ item }}
-  with_items: "{{ voltha_containers }}"
-  tags: [push]
diff --git a/install/ansible/roles/swarm/tasks/main.yml b/install/ansible/roles/swarm/tasks/main.yml
index 92e73c2..b0a7009 100644
--- a/install/ansible/roles/swarm/tasks/main.yml
+++ b/install/ansible/roles/swarm/tasks/main.yml
@@ -1,2 +1,24 @@
-- include: swarm.yml
-  when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
+---
+- name: Ensure Swarm Master Initialization
+  command: "docker swarm init --advertise-addr {{ swarm_master_addr }}"
+  when: target == "swarm-master"
+  tags: [swarm]
+
+- name: Capture Swarm Cluster Manager Token
+  become: voltha
+  shell: ssh -i /home/voltha/.keys/{{ swarm_master_addr }} voltha@{{ swarm_master_addr }} sudo docker swarm join-token -q manager 2>/dev/null
+  register: manager_token
+  changed_when: false
+  when: target == "swarm-master-backup"
+  tags: [swarm]
+
+- name: Debug
+  debug:
+    msg: "TOKEN: {{ manager_token.stdout }}"
+  when: target == "swarm-master-backup"
+  tags: [swarm]
+
+- name: Join Swarm Cluster
+  command: "docker swarm join --token {{ manager_token.stdout }} {{ swarm_master_addr }}:2377"
+  when: target == "swarm-master-backup"
+  tags: [swarm]
diff --git a/install/ansible/roles/swarm/tasks/swarm.yml b/install/ansible/roles/swarm/tasks/swarm.yml
deleted file mode 100644
index b0a7009..0000000
--- a/install/ansible/roles/swarm/tasks/swarm.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: Ensure Swarm Master Initialization
-  command: "docker swarm init --advertise-addr {{ swarm_master_addr }}"
-  when: target == "swarm-master"
-  tags: [swarm]
-
-- name: Capture Swarm Cluster Manager Token
-  become: voltha
-  shell: ssh -i /home/voltha/.keys/{{ swarm_master_addr }} voltha@{{ swarm_master_addr }} sudo docker swarm join-token -q manager 2>/dev/null
-  register: manager_token
-  changed_when: false
-  when: target == "swarm-master-backup"
-  tags: [swarm]
-
-- name: Debug
-  debug:
-    msg: "TOKEN: {{ manager_token.stdout }}"
-  when: target == "swarm-master-backup"
-  tags: [swarm]
-
-- name: Join Swarm Cluster
-  command: "docker swarm join --token {{ manager_token.stdout }} {{ swarm_master_addr }}:2377"
-  when: target == "swarm-master-backup"
-  tags: [swarm]
diff --git a/install/ansible/roles/voltha/tasks/main.yml b/install/ansible/roles/voltha/tasks/main.yml
index 597ecd1..001c837 100644
--- a/install/ansible/roles/voltha/tasks/main.yml
+++ b/install/ansible/roles/voltha/tasks/main.yml
@@ -1,2 +1,203 @@
-- include: voltha.yml
-  when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
+# Note: When the target == "cluster" the installer
+# is running to install voltha in the cluster hosts.
+# Whe the target == "installer" the installer is being
+# created.
+- name: The environment is properly set on login
+  template:
+    src: bashrc.j2
+    dest: "{{ target_voltha_home }}/.bashrc"
+    owner: voltha
+    group: voltha
+    mode: "u=rw,g=r,o=r"
+  when: target == "cluster"
+  tags: [voltha]
+  
+- name: The .bashrc file is executed on ssh login
+  template:
+    src: bash_profile.j2
+    dest: "{{ target_voltha_home }}/.bash_profile"
+    owner: voltha
+    group: voltha
+    mode: "u=rw,g=r,o=r"
+  when: target == "cluster"
+  tags: [voltha]
+  
+- name: Required directory exists
+  file:
+    path: "{{ target_voltha_dir }}"
+    state: directory
+    owner: voltha
+    group: voltha
+  when: target == "cluster"
+  tags: [voltha]
+
+#- name: Required directories are copied
+#  copy:
+#    src: "/home/vinstall/{{ item }}"
+#    dest: "{{ target_voltha_dir }}"
+#    owner: voltha
+#    group: voltha
+#  with_items:
+#    - compose
+#    - nginx_config
+#  when: target == "cluster"
+#  tags: [voltha]
+
+- name: Installer files and directories are copied
+  synchronize:
+    src: "/home/vinstall/{{ item }}"
+    dest: "{{ target_voltha_dir }}"
+    archive: no
+    owner: no
+    perms: no
+    recursive: yes
+    links: yes
+  with_items:
+    - compose
+    - nginx_config
+  when: target == "cluster"
+  tags: [voltha]
+
+- name: Installer directories are owned by voltha
+  file:
+    path: /home/vinstall/{{ item }}
+    owner: voltha
+    group: voltha
+    recurse: yes
+    follow: no
+  with_items:
+    - compose
+    - nginx_config
+  when: target == "cluster"
+  tags: [voltha]
+
+#- name: Nginx module symlink is present
+#  file:
+#    dest: "{{ target_voltha_dir }}/nginx_config/modules"
+#    src: ../../usr/lib/nginx/modules
+#    state: link
+#    follow: no
+#    force: yes
+#  when: target == "cluster"
+#  tags: [voltha]
+
+- name: Nginx statup script is executable
+  file:
+    path: "{{ target_voltha_dir }}/nginx_config/start_service.sh"
+    mode: 0755
+  when: target == "cluster"
+  tags: [voltha]
+
+- name: Configuration files are on the cluster host
+  copy:
+    src: "files/consul_config"
+    dest: "{{ target_voltha_dir }}"
+  when: target == "cluster"
+  tags: [voltha]
+
+- name: Docker containers for Voltha are pulled
+  command: docker pull {{ docker_registry }}/{{ item }}
+  with_items: "{{ voltha_containers }}"
+  when: target == "cluster"
+  tags: [voltha]
+- name: Docker images are re-tagged to expected names
+  command: docker tag {{ docker_registry }}/{{ item }} {{ item }}
+  with_items: "{{ voltha_containers }}"
+  when: target == "cluster"
+  tags: [voltha]
+#- name: Old docker image tags are removed
+#  command: docker rmi {{ docker_registry }}/{{ item }}
+#  with_items: "{{ voltha_containers }}"
+#  when: target == "cluster"
+#  tags: [voltha]
+
+
+# Update the insecure registry to reflect the current installer.
+# The installer name can change depending on whether test mode
+# is being used or not.
+- name: Enable insecure install registry
+  template:
+    src: "{{ docker_daemon_json }}"
+    dest: "{{ docker_daemon_json_dest }}"
+  register: copy_result
+  when: target == "installer"
+  tags: [voltha]
+
+- name: Debain Daemon is reloaded
+  command: systemctl daemon-reload
+  when: copy_result|changed and is_systemd is defined and target == "installer"
+  tags: [voltha]
+
+- name: Debian Docker service is restarted
+  service:
+    name: docker
+    state: restarted
+  when: copy_result|changed or user_result|changed
+  when: target == "installer"
+  tags: [voltha]
+
+#- name: TEMPORARY RULE TO INSTALL ZOOKEEPER
+#  command: docker pull zookeeper
+#  when: target == "installer"
+#  tags: [voltha]
+
+- name: Docker images are re-tagged to registry for push
+  command: docker tag {{ item }} {{ docker_push_registry }}/{{ item }}
+  with_items: "{{ voltha_containers }}"
+  when: target == "installer"
+  tags: [voltha]
+- name: Docker containers for Voltha are pushed
+  command: docker push {{ docker_push_registry }}/{{ item }}
+  with_items: "{{ voltha_containers }}"
+  when: target == "installer"
+  tags: [voltha]
+- name: Temporary registry push tags are removed
+  command: docker rmi {{ docker_push_registry }}/{{ item }}
+  with_items: "{{ voltha_containers }}"
+  when: target == "installer"
+  tags: [voltha]
+
+- name: consul overlay network exists
+  command: docker network create --driver overlay --subnet 10.10.10.0/29 consul_net
+  when: target == "startup"
+  tags: [voltha]
+
+- name: kafka overlay network exists
+  command: docker network create --driver overlay --subnet 10.10.11.0/24 kafka_net
+  when: target == "startup"
+  tags: [voltha]
+
+- name: voltha overlay network exists
+  command: docker network create --driver overlay --subnet 10.10.12.0/24 voltha_net
+  when: target == "startup"
+  tags: [voltha]
+
+- name: consul cluster is running
+  command: docker service create --name consul --network consul_net --network voltha_net -e 'CONSUL_BIND_INTERFACE=eth0' --mode global --publish "8300:8300" --publish "8400:8400" --publish "8500:8500" --publish "8600:8600/udp" --mount type=bind,source=/cord/incubator/voltha/consul_config,destination=/consul/config consul agent -config-dir /consul/config
+  when: target == "startup"
+  tags: [voltha]
+
+- name: zookeeper node zk1 is running
+  command: docker service create --name zk1 --network kafka_net --network voltha_net -e 'ZOO_MY_ID=1' -e "ZOO_SERVERS=server.1=0.0.0.0:2888:3888 server.2=zk2:2888:3888 server.3=zk3:2888:3888" zookeeper
+  when: target == "startup"
+  tags: [voltha]
+
+- name: zookeeper node zk2 is running
+  command: docker service create --name zk2 --network kafka_net --network voltha_net -e 'ZOO_MY_ID=2' -e "server.1=zk1:2888:3888 server.2=0.0.0.0:2888:3888 server.3=zk3:2888:3888" zookeeper
+  when: target == "startup"
+  tags: [voltha]
+
+- name: zookeeper node zk3 is running
+  command: docker service create --name zk3 --network kafka_net --network voltha_net -e 'ZOO_MY_ID=3' -e "ZOO_SERVERS=server.1=zk1:2888:3888 server.2=zk2:2888:3888 server.3=0.0.0.0:2888:3888" zookeeper
+  when: target == "startup"
+  tags: [voltha]
+
+- name: kafka is running
+  command: docker service create --name kafka --network voltha_net  -e "KAFKA_ADVERTISED_PORT=9092" -e "KAFKA_ZOOKEEPER_CONNECT=zk1:2181,zk2:2181,zk3:2181" -e "KAFKA_HEAP_OPTS=-Xmx256M -Xms128M" --mode global --publish "9092:9092" wurstmeister/kafka
+  when: target == "startup"
+  tags: [voltha]
+
+- name: voltha is running on a single host for testing
+  command: docker service create --name voltha_core --network voltha_net cord/voltha voltha/voltha/main.py -v --consul=consul:8500 --kafka=kafka
+  when: target == "startup"
+  tags: [voltha]
diff --git a/install/cleanup.sh b/install/cleanup.sh
new file mode 100755
index 0000000..c3e545f
--- /dev/null
+++ b/install/cleanup.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+rm ansible/host_vars/*
+rm ansible/roles/voltha/templates/daemon.json
+rm -fr volthaInstaller-2/
+rm -fr volthaInstaller/
+rm ansible/volthainstall.retry
+rm key.pem
+sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all
+git checkout ansible/hosts/voltha
+git checkout ansible/hosts/installer
+git checkout ../settings.vagrant.yaml
+
diff --git a/install/containers.cfg b/install/containers.cfg
new file mode 100644
index 0000000..a9ffbbb
--- /dev/null
+++ b/install/containers.cfg
@@ -0,0 +1,15 @@
+voltha_containers:
+  - voltha/grafana:latest
+  - cord/vcli:latest
+  - cord/dashd:latest
+  - cord/shovel:latest
+  - cord/netconf:latest
+  - cord/ofagent:latest
+  - cord/chameleon:latest
+  - cord/voltha:latest
+  - cord/voltha-base:latest
+  - fluent/fluentd:latest
+  - consul:latest
+  - wurstmeister/kafka:latest
+  - zookeeper:latest
+  - gliderlabs/registrator:master
diff --git a/install/image-list.cfg b/install/image-list.cfg
deleted file mode 100644
index 54570c6..0000000
--- a/install/image-list.cfg
+++ /dev/null
@@ -1,27 +0,0 @@
-voltha/nginx
-voltha/grafana
-voltha/portainer
-cord/vcli
-cord/dashd
-cord/config-push
-cord/tester
-cord/onos
-cord/shovel
-cord/netconf
-cord/podder
-cord/ofagent
-cord/chameleon
-cord/voltha
-cord/voltha-base
-nginx
-consul
-fluent/fluentd
-alpine
-portainer/portainer
-wurstmeister/kafka
-ubuntu
-onosproject/onos
-wurstmeister/zookeeper
-kamon/grafana_graphite
-gliderlabs/registrator
-centurylink/ca-certs
diff --git a/install/installer.sh b/install/installer.sh
index 702417a..4600efd 100755
--- a/install/installer.sh
+++ b/install/installer.sh
@@ -104,11 +104,15 @@
 # guaranteed not to be the same device as the installer.
 mkdir grub_updates
 sudo mv deb_files/*grub* grub_updates
-echo "deb_files:" >> ansible/group_vars/all
-for i in deb_files/*.deb
-do
-echo "  - `basename $i`" >> ansible/group_vars/all
-done
+# Sort the packages in dependency order to get rid of scary non-errors
+# that are issued by ansible.
+#echo -e "${lBlue}Dependency sorting dependent software${NC}"
+#./sort_packages.sh
+#echo "deb_files:" >> ansible/group_vars/all
+#for i in `cat sortedDebs.txt`
+#do
+#echo "  - $i" >> ansible/group_vars/all
+#done
 
 # Make sure the ssh keys propagate to all hosts allowing passwordless logins between them
 echo -e "${lBlue}Propagating ssh keys${NC}"