VOL-570 : Install Kubernetes in production and test modes

- Supports offline installation for k8s
- Configs and deploys voltha components

Change-Id: I29eb87c035cea1e86176bb2e30d36278760ece82
diff --git a/Makefile b/Makefile
index 5a67727..92dcd86 100644
--- a/Makefile
+++ b/Makefile
@@ -129,10 +129,19 @@
 # Manually remove some image from this list as they don't reflect the new 
 # naming conventions for the VOLTHA build
 FETCH_K8S_IMAGE_LIST = \
+       alpine:3.6 \
        consul:0.9.2 \
+       fluent/fluentd:v0.12.42 \
+       gcr.io/google_containers/defaultbackend:1.4 \
+       gcr.io/google_containers/kubernetes-dashboard-amd64:v1.8.1 \
+       k8s.gcr.io/fluentd-gcp:1.30 \
+       kamon/grafana_graphite:3.0 \
+       marcelmaatkamp/freeradius:latest \
+       quay.io/coreos/hyperkube:v1.9.2_coreos.0 \
        quay.io/coreos/etcd-operator:v0.7.2 \
-       wurstmeister/kafka:1.0.0 \
-       zookeeper:3.4.11
+       quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.10.2 \
+       wurstmeister/kafka:latest \
+       zookeeper:latest
 
 FETCH_IMAGE_LIST = $(shell echo $(FETCH_BUILD_IMAGE_LIST) $(FETCH_COMPOSE_IMAGE_LIST) $(FETCH_K8S_IMAGE_LIST) | tr ' ' '\n' | sort -u)
 
diff --git a/install/CreateInstaller.sh b/install/CreateInstaller.sh
index 34828e0..870dfa8 100755
--- a/install/CreateInstaller.sh
+++ b/install/CreateInstaller.sh
@@ -14,6 +14,7 @@
 # Command line argument variables
 testMode="no"
 rebuildVoltha="no"
+useKubernetes="no"
 
 
 
@@ -43,6 +44,10 @@
 				rebuildVoltha="yes"
 				echo -e "${lBlue}Voltha rebuild is ${green}enabled${NC}"
 				;;
+                        "k8s" )
+                                useKubernetes="yes"
+                                echo -e "${lBlue}Kubernetes framework is ${green}enabled${NC}"
+                                ;;
 		esac
 	done
 }
@@ -256,6 +261,10 @@
 	vagrant destroy ha-serv${uId}-{1,2,3}
 	vagrant up ha-serv${uId}-{1,2,3}
 	./devSetHostList.sh
+
+	if [ "$useKubernetes" == "yes" ]; then
+		./devSetKubernetes.sh
+	fi
 fi
 
 # Ensure that the voltha VM is running so that images can be secured
@@ -278,6 +287,11 @@
 		echo -e "${red}Voltha build failed!! ${lCyan}Please review the log and correct${lBlue} is running${NC}"
 		exit 1
 	fi
+
+        if [ "$useKubernetes" == "yes" ]; then
+		# Load required k8s libraries on the voltha instance
+                ./preloadKubernetes.sh
+        fi
 fi
 
 # Extract all the image names and tags from the running voltha VM
@@ -288,7 +302,8 @@
 	echo -e "${lBlue}Extracting the docker image list from the voltha VM${NC}"
 	volIpAddr=`virsh domifaddr $vVmName${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
 	ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ../.vagrant/machines/voltha${uId}/libvirt/private_key vagrant@$volIpAddr "docker image ls" > images.tmp
-	cat images.tmp | grep -v 5000 | tail -n +2 | awk '{printf("  - %s:%s\n", $1, $2)}' | grep -v "<none>" > image-list.cfg
+        # Construct list of images; exclude all entries that point to the registry
+	cat images.tmp | grep -v :5000 | tail -n +2 | awk '{printf("  - %s:%s\n", $1, $2)}' | grep -v "<none>" > image-list.cfg
 	rm -f images.tmp
 	sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all
 	echo "voltha_containers:" >> ansible/group_vars/all
@@ -299,14 +314,19 @@
 else
 	echo -e "${lBlue}Set up the docker image list from ${lCyan}containers.cfg${NC}"
 	sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all
-	cat containers.cfg >> ansible/group_vars/all
+
+        if [ "$useKubernetes" == "yes" ]; then
+		cat containers.cfg.k8s >> ansible/group_vars/all
+ 	else
+		cat containers.cfg >> ansible/group_vars/all
+	fi
 fi
 
 
 # Install python which is required for ansible
 echo -e "${lBlue}Installing ${lCyan}Python${NC}"
 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get update 
-ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get -y install python
+ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get -y install python python-netaddr
 
 # Move all the python deb files to their own directory so they can be installed first
 echo -e "${lBlue}Caching ${lCyan}Python${lBlue} install${NC}"
@@ -314,6 +334,12 @@
 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr "sudo mv /var/cache/apt/archives/*.deb /home/vinstall/python-deb"
 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr "sudo chown -R vinstall.vinstall /home/vinstall/python-deb"
 
+if [ "$useKubernetes" == "yes" ]; then
+       echo -e "${lBlue}Cloning ${lCyan}Kubespray${lBlue} repository${NC}"
+       ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr "git clone --branch v2.4.0 https://github.com/kubernetes-incubator/kubespray.git /home/vinstall/kubespray"
+       #ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr "git clone https://github.com/kubernetes-incubator/kubespray.git /home/vinstall/kubespray"
+       ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr "sudo chown -R vinstall.vinstall /home/vinstall/kubespray"
+fi
 
 # Create the docker.cfg file in the ansible tree using the VMs IP address
 echo 'DOCKER_OPTS="$DOCKER_OPTS --insecure-registry '$ipAddr':5000 -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --registry-mirror=http://'$ipAddr':5001"' > ansible/roles/docker/templates/docker.cfg
diff --git a/install/ansible/roles/cluster-host/tasks/main.yml b/install/ansible/roles/cluster-host/tasks/main.yml
index 5d0b39e..531b827 100644
--- a/install/ansible/roles/cluster-host/tasks/main.yml
+++ b/install/ansible/roles/cluster-host/tasks/main.yml
@@ -314,7 +314,7 @@
 
 - name: The glusterfs service is started
   service:
-    name: glusterfs-server
+    name: glusterd
     enabled: yes
     state: started
   when: target == "cluster"
diff --git a/install/ansible/roles/common/tasks/main.yml b/install/ansible/roles/common/tasks/main.yml
index f5904dc..ec36046 100644
--- a/install/ansible/roles/common/tasks/main.yml
+++ b/install/ansible/roles/common/tasks/main.yml
@@ -13,6 +13,12 @@
   with_items: "{{ hosts }}"
   tags: [common]
 
+- name: GlusterFS repository is available
+  apt_repository:
+    repo: 'ppa:gluster/glusterfs-3.13'
+  when: target != "cluster"
+  tags: [common]
+
 - name: Latest apt packages
   apt:
     name: "{{ item }}"
diff --git a/install/ansible/roles/glusterfs/tasks/main.yml b/install/ansible/roles/glusterfs/tasks/main.yml
index b8e17da..782a151 100644
--- a/install/ansible/roles/glusterfs/tasks/main.yml
+++ b/install/ansible/roles/glusterfs/tasks/main.yml
@@ -26,3 +26,21 @@
     - registry_volume
     - logging_volume
 
+- name: The replicated registry filesystem is mounted
+  mount:
+    path: "{{ target_voltha_dir }}/registry_data"
+    src: "{{ inventory_hostname }}:/registry_volume"
+    fstype: glusterfs
+    opts:  "defaults,_netdev,noauto,x-systemd.automount"
+    state: mounted
+  when: target == "cluster"
+
+- name: The replicated logging filesystem is mounted on boot
+  mount:
+    path: "/var/log/voltha"
+    src: "{{ inventory_hostname }}:/logging_volume"
+    fstype: glusterfs
+    opts:  "defaults,_netdev,noauto,x-systemd.automount"
+    state: mounted
+  when: target == "cluster"
+  tags: [cluster_host]
diff --git a/install/ansible/roles/installer/tasks/main.yml b/install/ansible/roles/installer/tasks/main.yml
index 098d17d..492386d 100644
--- a/install/ansible/roles/installer/tasks/main.yml
+++ b/install/ansible/roles/installer/tasks/main.yml
@@ -32,6 +32,7 @@
     - install/BashLoginTarget.sh
     - install/ansible
     - compose
+    - k8s 
     - consul_config/basic.json
   tags: [installer]
 
diff --git a/install/ansible/roles/kubernetes-host/files/ssh_config b/install/ansible/roles/kubernetes-host/files/ssh_config
deleted file mode 100644
index 990a43d..0000000
--- a/install/ansible/roles/kubernetes-host/files/ssh_config
+++ /dev/null
@@ -1,3 +0,0 @@
-Host *
-   StrictHostKeyChecking no
-   UserKnownHostsFile=/dev/null
diff --git a/install/ansible/roles/kubernetes-host/tasks/main.yml b/install/ansible/roles/kubernetes-host/tasks/main.yml
deleted file mode 100644
index 8d5a564..0000000
--- a/install/ansible/roles/kubernetes-host/tasks/main.yml
+++ /dev/null
@@ -1,56 +0,0 @@
-# Note: When the target == "cluster" the installer
-# is running to install voltha in the cluster hosts.
-# Whe the target == "installer" the installer is being
-# created.
-- name: A .ssh directory for the voltha user exists
-  file:
-    #path: "{{ ansible_env['HOME'] }}/.ssh"
-    path: "{{ target_voltha_home }}/.ssh"
-    state: directory
-    owner: voltha
-    group: voltha
-  tags: [cluster_host]
-
-- name: known_hosts file is absent for the voltha user
-  file:
-    path: "{{ target_voltha_home }}/.ssh/known_hosts"
-    state: absent
-  tags: [cluster_host]
-
-- name: Known host checking is disabled
-  copy:
-    src: files/ssh_config
-    dest: "{{ target_voltha_home }}/.ssh/config"
-    owner: voltha
-    group: voltha
-    mode: 0600
-  tags: [cluster_host]
-
-- name: Cluster host keys are propagated to all hosts in the cluster
-  copy:
-    src: files/.keys
-    dest: "{{ target_voltha_home }}"
-    owner: voltha
-    group: voltha
-    mode: 0600
-  tags: [cluster_host]
-
-- name: A voltha directory under /var/log for voltha logs exists
-  file:
-    path: "/var/log/voltha/logging_volume"
-    state: directory
-  tags: [cluster_host]
-
-- name: Directories for voltha processes are created
-  file:
-    path: "{{ target_voltha_dir }}/{{ item }}"
-    state: directory
-    mode: 0755
-    owner: voltha
-    group: voltha
-  with_items:
-    - registry_data
-    - consul/data
-    - consul/config
-  when: target == "cluster"
-  tags: [cluster_host]
diff --git a/install/ansible/roles/voltha-kubernetes/tasks/config.yml b/install/ansible/roles/voltha-k8s/tasks/config.yml
similarity index 86%
rename from install/ansible/roles/voltha-kubernetes/tasks/config.yml
rename to install/ansible/roles/voltha-k8s/tasks/config.yml
index 87e5ab2..c3bfe22 100644
--- a/install/ansible/roles/voltha-kubernetes/tasks/config.yml
+++ b/install/ansible/roles/voltha-k8s/tasks/config.yml
@@ -24,7 +24,7 @@
 
 - name: VOLT-HA Config | Required directory exists
   file:
-    path: "{{ target_voltha_dir }}"
+    path: "{{ target_voltha_home }}"
     state: directory
     owner: voltha
     group: voltha
@@ -34,7 +34,7 @@
 - name: VOLT-HA Config | Configuration files and directories are copied
   synchronize:
     src: "{{ install_source }}/{{ item }}"
-    dest: "{{ target_voltha_dir }}"
+    dest: "{{ target_voltha_home }}"
     archive: no
     owner: no
     perms: no
@@ -47,7 +47,7 @@
 
 - name: VOLT-HA Config | Configuration directories are owned by voltha
   file:
-    path: "{{ target_voltha_dir }}/{{ item }}"
+    path: "{{ target_voltha_home }}/{{ item }}"
     owner: voltha
     group: voltha
     recurse: yes
@@ -74,10 +74,3 @@
   with_items: "{{ voltha_containers }}"
   when: target == "cluster"
   tags: [voltha]
-
-- name: VOLT-HA Config | Add CNI bridge for PONSIM layer 2 support
-  template:
-    src: ponsim_bridge.j2
-    dest: "/etc/cni/net.d/10-pon0.conf"
-  when: target == "cluster"
-  tags: [voltha]
\ No newline at end of file
diff --git a/install/ansible/roles/voltha-kubernetes/tasks/deploy.yml b/install/ansible/roles/voltha-k8s/tasks/deploy.yml
similarity index 67%
rename from install/ansible/roles/voltha-kubernetes/tasks/deploy.yml
rename to install/ansible/roles/voltha-k8s/tasks/deploy.yml
index f5a2929..91cc1ea 100644
--- a/install/ansible/roles/voltha-kubernetes/tasks/deploy.yml
+++ b/install/ansible/roles/voltha-k8s/tasks/deploy.yml
@@ -1,10 +1,10 @@
 - name: "VOLT-HA Deploy | Add Namespace"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/namespace.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/namespace.yml
   run_once: true
 
 # Ingress
 - name: "VOLT-HA Deploy | Start Ingress Controller"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/ingress
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/ingress
   run_once: true
 
 - name: "VOLT-HA Deploy | Wait for Default HTTP backend to be ready"
@@ -18,7 +18,7 @@
 # Zookeeper
 
 - name: "VOLT-HA Deploy | Start zookeeper"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/zookeeper.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/zookeeper.yml
   run_once: true
 
 - name: "VOLT-HA Deploy | Wait for zookeeper to be ready"
@@ -31,17 +31,21 @@
 
 # Kafka
 - name: "VOLT-HA Deploy | Start kafka"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/kafka.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/kafka.yml
   run_once: true
 
 - name: "VOLT-HA Deploy | Wait for kafka to be ready"
   command: kubectl rollout status statefulset/kafka -w -n {{ voltha_namespace }}
   run_once: true
 
-# Fluentd
+# Fluentd Config
+- name: "VOLT-HA Deploy | Add Fluentd Configuration"
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/fluentd-config.yml
+  run_once: true
 
+# Fluentd
 - name: "VOLT-HA Deploy | Start fluentd"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/fluentd.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/fluentd.yml
   run_once: true
 
 - name: "VOLT-HA Deploy | Wait for fluentd to be ready"
@@ -49,12 +53,11 @@
   with_items:
     - fluentdactv
     - fluentdstby
-    - fluentd
   run_once: true
 
 # Consul
 - name: "VOLT-HA Deploy | Start consul"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/consul.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/consul.yml
   run_once: true
 
 - name: "VOLT-HA Deploy | Wait for consul to be ready"
@@ -63,7 +66,7 @@
 
 # Voltha Core (for consul)
 - name: "VOLT-HA Deploy | Start VOLT-HA core (for consul)"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/vcore_for_consul.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/vcore_for_consul.yml
   run_once: true
 
 - name: "VOLT-HA Deploy | Wait for VOLT-HA core (for consul) to be ready"
@@ -72,7 +75,7 @@
 
 # OFagent
 - name: "VOLT-HA Deploy | Start OpenFlow Agent"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/ofagent.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/ofagent.yml
   run_once: true
 
 - name: "VOLT-HA Deploy | Wait for OpenFlow Agent to be ready"
@@ -81,7 +84,7 @@
 
 # Envoy (for consul)
 - name: "VOLT-HA Deploy | Start Envoy (for consul)"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/envoy_for_consul.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/envoy_for_consul.yml
   run_once: true
 
 - name: "VOLT-HA Deploy | Wait for Envoy (for consul) to be ready"
@@ -90,7 +93,7 @@
 
 # Voltha CLI
 - name: "VOLT-HA Deploy | Start VOLT-HA CLI"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/vcli.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/vcli.yml
   run_once: true
 
 - name: "VOLT-HA Deploy | Wait for VOLT-HA CLI to be ready"
@@ -99,7 +102,7 @@
 
 # NETCONF
 - name: "VOLT-HA Deploy | Start NETCONF"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/netconf.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/netconf.yml
   run_once: true
 
 - name: "VOLT-HA Deploy | Wait for NETCONF to be ready"
@@ -108,7 +111,7 @@
 
 # Grafana
 - name: "VOLT-HA Deploy | Start Grafana"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/grafana.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/grafana.yml
   run_once: true
   when: monitor_voltha_stats
 
@@ -119,7 +122,7 @@
 
 # Dashd and Shovel
 - name: "VOLT-HA Deploy | Start DASHD and SHOVEL"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/stats.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/stats.yml
   run_once: true
   when: monitor_voltha_stats
 
@@ -130,30 +133,3 @@
     - shovel
   run_once: true
   when: monitor_voltha_stats
-
-# FreeRADIUS Config
-- name: "VOLT-HA Deploy | Add FreeRADIUS Configuration"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/freeradius-config.yml
-  run_once: true
-
-# FreeRADIUS
-- name: "VOLT-HA Deploy | Start FreeRADIUS"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/freeradius.yml
-  run_once: true
-
-- name: "VOLT-HA Deploy | Wait for FreeRADIUS to be ready"
-  command: kubectl rollout status deployment/freeradius -w -n {{ voltha_namespace }}
-  run_once: true
-
-# ONOS
-- name: "VOLT-HA Deploy | Start ONOS"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/onos.yml
-  run_once: true
-
-- name: "VOLT-HA Deploy | Wait for ONOS to be ready"
-  command: kubectl rollout status deployment/onos -w -n {{ voltha_namespace }}
-  run_once: true
-
-
-
-
diff --git a/install/ansible/roles/voltha-kubernetes/tasks/main.yml b/install/ansible/roles/voltha-k8s/tasks/main.yml
similarity index 100%
rename from install/ansible/roles/voltha-kubernetes/tasks/main.yml
rename to install/ansible/roles/voltha-k8s/tasks/main.yml
diff --git a/install/ansible/roles/voltha-kubernetes/tasks/teardown.yml b/install/ansible/roles/voltha-k8s/tasks/teardown.yml
similarity index 69%
rename from install/ansible/roles/voltha-kubernetes/tasks/teardown.yml
rename to install/ansible/roles/voltha-k8s/tasks/teardown.yml
index fdd0485..10fb856 100644
--- a/install/ansible/roles/voltha-kubernetes/tasks/teardown.yml
+++ b/install/ansible/roles/voltha-k8s/tasks/teardown.yml
@@ -1,82 +1,69 @@
-# ONOS
-- name: "VOLT-HA Teardown | Stop ONOS"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/onos.yml
-  run_once: true
-
-# FreeRADIUS
-- name: "VOLT-HA Teardown | Stop FreeRADIUS"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/freeradius.yml
-  run_once: true
-
-# FreeRADIUS Config
-- name: "VOLT-HA Teardown | Remove FreeRADIUS Configuration"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/freeradius-config.yml
-  run_once: true
-
 # Dashd and Shovel
 - name: "VOLT-HA Teardown | Stop DASHD and SHOVEL"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/stats.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/stats.yml
   run_once: true
 
 # Grafana
 - name: "VOLT-HA Teardown | Stop Grafana"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/grafana.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/grafana.yml
   run_once: true
 
 # NETCONF
 - name: "VOLT-HA Teardown | Stop NETCONF"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/netconf.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/netconf.yml
   run_once: true
 
 # Voltha CLI
 - name: "VOLT-HA Teardown | Stop VOLT-HA CLI"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/vcli.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/vcli.yml
   run_once: true
 
 # Envoy (for consul)
 - name: "VOLT-HA Teardown | Stop Envoy (for consul)"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/envoy_for_consul.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/envoy_for_consul.yml
   run_once: true
 
 # OFagent
 - name: "VOLT-HA Teardown | Stop OpenFlow Agent"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/ofagent.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/ofagent.yml
   run_once: true
 
 # Voltha Core (for consul)
 - name: "VOLT-HA Teardown | Stop VOLT-HA core (for consul)"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/vcore_for_consul.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/vcore_for_consul.yml
   run_once: true
 
 # Consul
 - name: "VOLT-HA Teardown | Stop consul"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/consul.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/consul.yml
   run_once: true
 
 # Fluentd
 - name: "VOLT-HA Teardown | Stop fluentd"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/fluentd.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/fluentd.yml
+  run_once: true
+
+# Fluentd Config
+- name: "VOLT-HA Teardown | Remove Fluentd Configuration"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/fluentd-config.yml
   run_once: true
 
 # Kafka
 - name: "VOLT-HA Teardown | Stop kafka"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/kafka.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/kafka.yml
   run_once: true
 
 # Zookeeper
 - name: "VOLT-HA Teardown | Stop zookeeper"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/zookeeper.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/zookeeper.yml
   run_once: true
 
 # Ingress
 - name: "VOLT-HA Teardown | Stop Ingress Controller"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/ingress
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/ingress
   run_once: true
 
 # Namespace
 - name: "VOLT-HA Teardown | Remove Namespace"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/namespace.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/namespace.yml
   run_once: true
-
-
-
diff --git a/install/ansible/roles/voltha-kubernetes/templates/bash_profile.j2 b/install/ansible/roles/voltha-k8s/templates/bash_profile.j2
similarity index 100%
rename from install/ansible/roles/voltha-kubernetes/templates/bash_profile.j2
rename to install/ansible/roles/voltha-k8s/templates/bash_profile.j2
diff --git a/install/ansible/roles/voltha-kubernetes/templates/bashrc.j2 b/install/ansible/roles/voltha-k8s/templates/bashrc.j2
similarity index 100%
rename from install/ansible/roles/voltha-kubernetes/templates/bashrc.j2
rename to install/ansible/roles/voltha-k8s/templates/bashrc.j2
diff --git a/install/ansible/roles/voltha-kubernetes/templates/ponsim_bridge.j2 b/install/ansible/roles/voltha-kubernetes/templates/ponsim_bridge.j2
deleted file mode 100644
index 6ddcfbd..0000000
--- a/install/ansible/roles/voltha-kubernetes/templates/ponsim_bridge.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-{
-    "name": "pon0",
-    "type": "bridge",
-    "bridge": "pon0",
-    "isGateway": true,
-    "ipMask": true,
-    "ipam": {
-        "type": "host-local",
-        "subnet": "10.22.0.0/16",
-        "routes": [
-          { "dst": "0.0.0.0/0" }
-        ]
-    }
-}
-
diff --git a/install/ansible/voltha-k8s.yml b/install/ansible/voltha-k8s.yml
index 1761d04..7d8cb62 100644
--- a/install/ansible/voltha-k8s.yml
+++ b/install/ansible/voltha-k8s.yml
@@ -4,20 +4,29 @@
   vars:
     target: cluster
   roles:
-    - { role: common, when: config_voltha is defined }
-    - { role: kubernetes-host, when: config_voltha is defined }
-    - { role: voltha-kubernetes, when: config_voltha is defined }
+    - common
+    - cluster-host
+    - docker
+    - { role: voltha-k8s, when: config_voltha is defined }
+
+- hosts: cluster
+  remote_user: voltha
+  serial: 1
+  vars:
+    target: cluster
+  roles:
+    - glusterfs
 
 - hosts:  k8s-master
   remote_user: voltha
   serial: 1
   roles:
     - {
-        role: voltha-kubernetes, when: deploy_voltha is defined,
+        role: voltha-k8s, when: deploy_voltha is defined,
         monitor_voltha_stats: true,
         use_ponsim: true
       }
     - {
-        role: voltha-kubernetes,
+        role: voltha-k8s,
         when: teardown_voltha is defined
       }
diff --git a/install/containers.cfg.k8s b/install/containers.cfg.k8s
new file mode 100644
index 0000000..5a7eeb8
--- /dev/null
+++ b/install/containers.cfg.k8s
@@ -0,0 +1,30 @@
+voltha_containers:
+  - consul:0.9.2
+  - fluent/fluentd:v0.12.42
+  - gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.2
+  - gcr.io/google_containers/defaultbackend:1.4
+  - gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.8
+  - gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.8
+  - gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.8
+  - gcr.io/google_containers/kubernetes-dashboard-amd64:v1.8.1
+  - gcr.io/google_containers/pause-amd64:3.0
+  - k8s.gcr.io/fluentd-gcp:1.30
+  - kamon/grafana_graphite:3.0
+  - lyft/envoy:29361deae91575a1d46c7a21e913f19e75622ebe
+  - quay.io/calico/cni:v1.11.0
+  - quay.io/calico/ctl:v1.6.1
+  - quay.io/calico/node:v2.6.2
+  - quay.io/calico/routereflector:v0.4.0
+  - quay.io/coreos/etcd:v3.2.4
+  - quay.io/coreos/hyperkube:v1.9.2_coreos.0
+  - quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.10.2
+  - voltha-cli:latest
+  - voltha-dashd:latest
+  - voltha-envoy:latest
+  - voltha-netconf:latest
+  - voltha-ofagent:latest
+  - voltha-shovel:latest
+  - voltha-tools:latest
+  - voltha-voltha:latest
+  - wurstmeister/kafka:latest
+  - zookeeper:latest
diff --git a/install/devSetKubernetes.sh b/install/devSetKubernetes.sh
new file mode 100755
index 0000000..e504d12
--- /dev/null
+++ b/install/devSetKubernetes.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+# This script is for development use.
+
+echo 'cluster_framework="kubernetes"' >> install.cfg
+echo 'cluster_service_subnet="192.168.0.0\/18"' >> install.cfg
+echo 'cluster_pod_subnet="192.168.128.0\/18"' >> install.cfg
diff --git a/install/install.cfg b/install/install.cfg
index 4bf0b66..8a94851 100644
--- a/install/install.cfg
+++ b/install/install.cfg
@@ -11,14 +11,11 @@
 # consulLimit=5
 
 # Specify the cluster framework type (swarm or kubernetes)
-cluster_framework="swarm"
-
-# Docker registry address
-#cluster_registry="localhost:5000"
+# cluster_framework="kubernetes"
 
 # Address range for kubernetes services
-#cluster_service_subnet="192.168.0.0\/18"
+# cluster_service_subnet="192.168.0.0\/18"
 
 # Address range for kubernetes pods
-#cluster_pod_subnet="192.168.64.0\/18"
+# cluster_pod_subnet="192.168.128.0\/18"
 
diff --git a/install/installer.sh b/install/installer.sh
index f3a5aa3..4a8453c 100755
--- a/install/installer.sh
+++ b/install/installer.sh
@@ -105,93 +105,118 @@
 
 done
 
+# Add the dependent software list to the cluster variables
+echo -e "${lBlue}Setting up dependent software${NC}"
+# Delete any grub updates since the boot disk is almost
+# guaranteed not to be the same device as the installer.
+mkdir grub_updates
+sudo mv deb_files/*grub* grub_updates
+# Sort the packages in dependency order to get rid of scary non-errors
+# that are issued by ansible.
+#echo -e "${lBlue}Dependency sorting dependent software${NC}"
+#./sort_packages.sh
+#echo "deb_files:" >> ansible/group_vars/all
+#for i in `cat sortedDebs.txt`
+#do
+#echo "  - $i" >> ansible/group_vars/all
+#done
+
+# Make sure the ssh keys propagate to all hosts allowing passwordless logins between them
+echo -e "${lBlue}Propagating ssh keys${NC}"
 cp -r .keys ansible/roles/cluster-host/files
 
+# Install python on all the 3 servers since python is required for
+for i in $hosts
+do
+    echo -e "${lBlue}Installing ${lCyan}Python${lBlue}${NC}"
+    scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i -r python-deb voltha@$i:.
+    ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i  voltha@$i "sudo dpkg -i /home/voltha/python-deb/*minimal*"
+    ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i  voltha@$i sudo dpkg -i -R /home/voltha/python-deb
+    ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i  voltha@$i rm -fr python-deb
+
+done
 
 if [  "$cluster_framework" == "kubernetes" ]; then
 
-	echo -e "${green}Deploying kubernetes${NC}"
+    echo -e "${green}Deploying kubernetes${NC}"
 
-    rm -rf kubespray
+    # Remove previously created inventory if it exists
+    cp -rfp kubespray/inventory kubespray/inventory/voltha
 
-    git clone https://github.com/kubernetes-incubator/kubespray.git
+    # Adjust kubespray configuration
 
-    cp -rfp kubespray/inventory/sample kubespray/inventory/voltha
-
-    # TODO: Replace with variables taken from config file
-    # bootstrap_os: ubuntu
-    # network_plugin: weave
-
+    # Destination OS
     sed -i -e "/bootstrap_os: none/s/.*/bootstrap_os: ubuntu/" \
         kubespray/inventory/voltha/group_vars/all.yml
-    sed -i -e "/kube_network_plugin: calico/s/.*/kube_network_plugin: weave/" \
-        kubespray/inventory/voltha/group_vars/k8s-cluster.yml
+
+    # Subnet used for deployed k8s services
     sed -i -e "/kube_service_addresses: 10.233.0.0\/18/s/.*/kube_service_addresses: $cluster_service_subnet/" \
         kubespray/inventory/voltha/group_vars/k8s-cluster.yml
+
+    # Subnet used for deployed k8s pods
     sed -i -e "/kube_pods_subnet: 10.233.64.0\/18/s/.*/kube_pods_subnet: $cluster_pod_subnet/" \
         kubespray/inventory/voltha/group_vars/k8s-cluster.yml
-    sed -i -e "s/docker_options: \"/&--insecure-registry=$cluster_registry /" \
-        kubespray/inventory/voltha/group_vars/k8s-cluster.yml
 
+    # Prevent any downloads from kubespray
+    sed -i -e "s/skip_downloads: false/skip_downloads: true/" \
+        kubespray/cluster.yml
+    sed -i -e "s/- { role: docker, tags: docker }/#&/" \
+        kubespray/cluster.yml
+    sed -i -e "s/skip_downloads: false/skip_downloads: true/" \
+        kubespray/roles/download/defaults/main.yml
+    sed -i -e "s/when: ansible_os_family == \"Debian\"/& and skip_downloads == \"false\" /" \
+        kubespray/roles/kubernetes/preinstall/tasks/main.yml
+    sed -i -e "s/or is_atomic)/& and skip_downloads == \"false\" /" \
+        kubespray/roles/kubernetes/preinstall/tasks/main.yml
+
+    # Disable swapon check
+    sed -i -e "s/kubelet_fail_swap_on|default(true)/kubelet_fail_swap_on|default(false)/" \
+        kubespray/roles/kubernetes/preinstall/tasks/verify-settings.yml
+
+    # Construct node inventory
     CONFIG_FILE=kubespray/inventory/voltha/hosts.ini python3 \
         kubespray/contrib/inventory_builder/inventory.py $hosts
 
+    ordered_nodes=`CONFIG_FILE=kubespray/inventory/voltha/hosts.ini python3 \
+        kubespray/contrib/inventory_builder/inventory.py print_ips`
+
     # The inventory defines
     sed -i -e '/\[kube-master\]/a\
     node3
     ' kubespray/inventory/voltha/hosts.ini
 
-    ansible-playbook -u root -i kubespray/inventory/voltha/hosts.ini kubespray/cluster.yml
-
     echo "[k8s-master]" > ansible/hosts/k8s-master
 
+    mkdir -p kubespray/inventory/voltha/host_vars
+
     ctr=1
-    for i in $hosts
+    for i in $ordered_nodes
     do
+        echo -e "${lBlue}Adding SSH keys to kubespray ansible${NC}"
+        echo "ansible_ssh_private_key_file: $wd/.keys/$i" > kubespray/inventory/voltha/host_vars/node$ctr
+
         if [ $ctr -eq 1 ]; then
             echo  $i >> ansible/hosts/k8s-master
-            ctr=0
         fi
+        ctr=$((ctr + 1))
     done
 
-    ansible-playbook ansible/voltha-k8s.yml -i ansible/hosts/cluster -e 'config_voltha=true'
-    ansible-playbook ansible/voltha-k8s.yml -i ansible/hosts/k8s-master -e 'deploy_voltha=true'
+    # Prepare Voltha
+    # ... Prepares environment and copies all required container images
+    # ... including the ones needed by kubespray
+    cp ansible/ansible.cfg .ansible.cfg
+    ansible-playbook -v ansible/voltha-k8s.yml -i ansible/hosts/cluster -e 'config_voltha=true'
+
+    # Deploy kubernetes
+    ANSIBLE_CONFIG=kubespray/ansible.cfg ansible-playbook -v -b \
+        --become-method=sudo --become-user root -u voltha \
+        -i kubespray/inventory/voltha/hosts.ini kubespray/cluster.yml
+
+    # Deploy Voltha
+    ansible-playbook -v ansible/voltha-k8s.yml -i ansible/hosts/k8s-master -e 'deploy_voltha=true'
 
 else
-    #
     # Legacy swarm instructions
-    #
-
-    # Add the dependent software list to the cluster variables
-    echo -e "${lBlue}Setting up dependent software${NC}"
-    # Delete any grub updates since the boot disk is almost
-    # guaranteed not to be the same device as the installer.
-    mkdir grub_updates
-    sudo mv deb_files/*grub* grub_updates
-    # Sort the packages in dependency order to get rid of scary non-errors
-    # that are issued by ansible.
-    #echo -e "${lBlue}Dependency sorting dependent software${NC}"
-    #./sort_packages.sh
-    #echo "deb_files:" >> ansible/group_vars/all
-    #for i in `cat sortedDebs.txt`
-    #do
-    #echo "  - $i" >> ansible/group_vars/all
-    #done
-
-    # Make sure the ssh keys propagate to all hosts allowing passwordless logins between them
-    echo -e "${lBlue}Propagating ssh keys${NC}"
-    cp -r .keys ansible/roles/cluster-host/files
-
-    # Install python on all the 3 servers since python is required for
-    for i in $hosts
-    do
-        echo -e "${lBlue}Installing ${lCyan}Python${lBlue}${NC}"
-        scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i -r python-deb voltha@$i:.
-        ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i  voltha@$i "sudo dpkg -i /home/voltha/python-deb/*minimal*"
-        ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i  voltha@$i sudo dpkg -i -R /home/voltha/python-deb
-        ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i  voltha@$i rm -fr python-deb
-
-    done
 
     # Create the daemon.json file for the swarm
     echo "{" > daemon.json
diff --git a/install/preloadKubernetes.sh b/install/preloadKubernetes.sh
new file mode 100755
index 0000000..aab97ba
--- /dev/null
+++ b/install/preloadKubernetes.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+uId=`id -u`
+vmName="voltha_voltha${uId}"
+
+# Get the VM's ip address
+ipAddr=`virsh domifaddr $vmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
+
+# Retrieve stable kubespray repo
+ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i \
+	../.vagrant/machines/voltha${uId}/libvirt/private_key vagrant@$ipAddr \
+       "git clone --branch v2.4.0 https://github.com/kubernetes-incubator/kubespray.git"
+
+# Setup a new ansible manifest to only download files 
+cat <<HERE > download.yml
+---
+- hosts: k8s-cluster
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  roles:
+  - { role: kubespray-defaults}
+  - { role: download, tags: download, skip_downloads: false}
+HERE
+
+# Copy the manifest over to the voltha instance
+scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i \
+	../.vagrant/machines/voltha${uId}/libvirt/private_key \
+	download.yml vagrant@$ipAddr:kubespray/download.yml
+
+# Run the manifest
+ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i \
+	../.vagrant/machines/voltha${uId}/libvirt/private_key vagrant@$ipAddr \
+	"mkdir -p releases && cd kubespray && ANSIBLE_CONFIG=ansible.cfg ansible-playbook -v -u root -i inventory/local-tests.cfg download.yml"
+
+rtrn=$?
+
+echo "Preload return code: $rtrn"
+
+exit $rtrn
+
+
+