VOL-642: Installation of VOLT-HA in a Kubernetes cluster

- Changed existing installer to support swarm and kubernetes
- Using kubespray as the base framework to deploy kubernetes
- Implemented config/deploy/teardown tasks for voltha

Amendments:

- Changed from python to python3 when running the inventory builder

Change-Id: I059fd348b9a749397b373e333b5602944c817745
diff --git a/install/ansible/group_vars/all b/install/ansible/group_vars/all
index 9ab3722..af887c2 100644
--- a/install/ansible/group_vars/all
+++ b/install/ansible/group_vars/all
@@ -14,3 +14,5 @@
 registry_volume_size: 5
 consul_volume_size: 5
 logger_volume_size: 20
+install_source: /home/vinstall
+voltha_namespace: voltha
diff --git a/install/ansible/roles/cluster-host/tasks/main.yml b/install/ansible/roles/cluster-host/tasks/main.yml
index 7358e6e..5d0b39e 100644
--- a/install/ansible/roles/cluster-host/tasks/main.yml
+++ b/install/ansible/roles/cluster-host/tasks/main.yml
@@ -37,7 +37,7 @@
 
 #- name: Required configuration directories are copied
 #  copy:
-#    src: "/home/vinstall/{{ item }}"
+#    src: "{{ install_source }}/{{ item }}"
 #    dest: "{{ target_voltha_home }}"
 #    owner: voltha
 #    group: voltha
@@ -50,7 +50,7 @@
 
 - name: Required configuration directories are copied
   synchronize:
-    src: "/home/vinstall/{{ item }}"
+    src: "{{ install_source }}/{{ item }}"
     dest: "{{ target_voltha_home }}"
     archive: no
     owner: no
@@ -65,7 +65,7 @@
 
 - name: Required configuration scripts are copied
   synchronize:
-    src: "/home/vinstall/{{ item }}"
+    src: "{{ install_source }}/{{ item }}"
     dest: "{{ target_voltha_home }}"
     archive: no
     owner: no
@@ -104,7 +104,7 @@
 
 #- name: upstart barrier filesystem loop mount script is installed
 #  copy:
-#    src: "/home/vinstall/losetup.conf"
+#    src: "{{ install_source }}/losetup.conf"
 #    dest: /etc/init
 #    owner: root
 #    group: root
diff --git a/install/ansible/roles/kubernetes-host/files/ssh_config b/install/ansible/roles/kubernetes-host/files/ssh_config
new file mode 100644
index 0000000..990a43d
--- /dev/null
+++ b/install/ansible/roles/kubernetes-host/files/ssh_config
@@ -0,0 +1,3 @@
+Host *
+   StrictHostKeyChecking no
+   UserKnownHostsFile=/dev/null
diff --git a/install/ansible/roles/kubernetes-host/tasks/main.yml b/install/ansible/roles/kubernetes-host/tasks/main.yml
new file mode 100644
index 0000000..8d5a564
--- /dev/null
+++ b/install/ansible/roles/kubernetes-host/tasks/main.yml
@@ -0,0 +1,56 @@
+# Note: When the target == "cluster" the installer
+# is running to install voltha in the cluster hosts.
+# Whe the target == "installer" the installer is being
+# created.
+- name: A .ssh directory for the voltha user exists
+  file:
+    #path: "{{ ansible_env['HOME'] }}/.ssh"
+    path: "{{ target_voltha_home }}/.ssh"
+    state: directory
+    owner: voltha
+    group: voltha
+  tags: [cluster_host]
+
+- name: known_hosts file is absent for the voltha user
+  file:
+    path: "{{ target_voltha_home }}/.ssh/known_hosts"
+    state: absent
+  tags: [cluster_host]
+
+- name: Known host checking is disabled
+  copy:
+    src: files/ssh_config
+    dest: "{{ target_voltha_home }}/.ssh/config"
+    owner: voltha
+    group: voltha
+    mode: 0600
+  tags: [cluster_host]
+
+- name: Cluster host keys are propagated to all hosts in the cluster
+  copy:
+    src: files/.keys
+    dest: "{{ target_voltha_home }}"
+    owner: voltha
+    group: voltha
+    mode: 0600
+  tags: [cluster_host]
+
+- name: A voltha directory under /var/log for voltha logs exists
+  file:
+    path: "/var/log/voltha/logging_volume"
+    state: directory
+  tags: [cluster_host]
+
+- name: Directories for voltha processes are created
+  file:
+    path: "{{ target_voltha_dir }}/{{ item }}"
+    state: directory
+    mode: 0755
+    owner: voltha
+    group: voltha
+  with_items:
+    - registry_data
+    - consul/data
+    - consul/config
+  when: target == "cluster"
+  tags: [cluster_host]
diff --git a/install/ansible/roles/voltha-kubernetes/tasks/config.yml b/install/ansible/roles/voltha-kubernetes/tasks/config.yml
new file mode 100644
index 0000000..87e5ab2
--- /dev/null
+++ b/install/ansible/roles/voltha-kubernetes/tasks/config.yml
@@ -0,0 +1,83 @@
+# Note: When the target == "cluster" the installer
+# is running to install voltha in the cluster hosts.
+# Whe the target == "installer" the installer is being
+# created.
+- name: VOLT-HA Config | The environment is properly set on login
+  template:
+    src: bashrc.j2
+    dest: "{{ target_voltha_home }}/.bashrc"
+    owner: voltha
+    group: voltha
+    mode: "u=rw,g=r,o=r"
+  when: target == "cluster"
+  tags: [voltha]
+
+- name: VOLT-HA Config | The .bashrc file is executed on ssh login
+  template:
+    src: bash_profile.j2
+    dest: "{{ target_voltha_home }}/.bash_profile"
+    owner: voltha
+    group: voltha
+    mode: "u=rw,g=r,o=r"
+  when: target == "cluster"
+  tags: [voltha]
+
+- name: VOLT-HA Config | Required directory exists
+  file:
+    path: "{{ target_voltha_dir }}"
+    state: directory
+    owner: voltha
+    group: voltha
+  when: target == "cluster"
+  tags: [voltha]
+
+- name: VOLT-HA Config | Configuration files and directories are copied
+  synchronize:
+    src: "{{ install_source }}/{{ item }}"
+    dest: "{{ target_voltha_dir }}"
+    archive: no
+    owner: no
+    perms: no
+    recursive: yes
+    links: yes
+  with_items:
+    - k8s
+  when: target == "cluster"
+  tags: [voltha]
+
+- name: VOLT-HA Config | Configuration directories are owned by voltha
+  file:
+    path: "{{ target_voltha_dir }}/{{ item }}"
+    owner: voltha
+    group: voltha
+    recurse: yes
+    follow: no
+  with_items:
+    - k8s
+  when: target == "cluster"
+  tags: [voltha]
+
+- name: VOLT-HA Config | Docker containers for Voltha are pulled
+  command: docker pull {{ docker_registry }}/{{ item }}
+  with_items: "{{ voltha_containers }}"
+  when: target == "cluster"
+  tags: [voltha]
+
+- name: VOLT-HA Config | Docker images are re-tagged to expected names
+  command: docker tag {{ docker_registry }}/{{ item }} {{ item }}
+  with_items: "{{ voltha_containers }}"
+  when: target == "cluster"
+  tags: [voltha]
+
+- name: VOLT-HA Config | Docker images are re-tagged to cluster registry names
+  command: docker tag {{ docker_registry }}/{{ item }} {{ inventory_hostname }}:5001/{{ item }}
+  with_items: "{{ voltha_containers }}"
+  when: target == "cluster"
+  tags: [voltha]
+
+- name: VOLT-HA Config | Add CNI bridge for PONSIM layer 2 support
+  template:
+    src: ponsim_bridge.j2
+    dest: "/etc/cni/net.d/10-pon0.conf"
+  when: target == "cluster"
+  tags: [voltha]
\ No newline at end of file
diff --git a/install/ansible/roles/voltha-kubernetes/tasks/deploy.yml b/install/ansible/roles/voltha-kubernetes/tasks/deploy.yml
new file mode 100644
index 0000000..f5a2929
--- /dev/null
+++ b/install/ansible/roles/voltha-kubernetes/tasks/deploy.yml
@@ -0,0 +1,159 @@
+- name: "VOLT-HA Deploy | Add Namespace"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/namespace.yml
+  run_once: true
+
+# Ingress
+- name: "VOLT-HA Deploy | Start Ingress Controller"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/ingress
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for Default HTTP backend to be ready"
+  command: kubectl rollout status deployment/default-http-backend -w -n {{ voltha_namespace }}
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for Ingress Controller to be ready"
+  command: kubectl rollout status deployment/nginx-ingress-controller -w -n {{ voltha_namespace }}
+  run_once: true
+
+# Zookeeper
+
+- name: "VOLT-HA Deploy | Start zookeeper"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/zookeeper.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for zookeeper to be ready"
+  command: kubectl rollout status statefulset/{{ item }} -w -n {{ voltha_namespace }}
+  with_items:
+    - zookeeper1
+    - zookeeper2
+    - zookeeper3
+  run_once: true
+
+# Kafka
+- name: "VOLT-HA Deploy | Start kafka"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/kafka.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for kafka to be ready"
+  command: kubectl rollout status statefulset/kafka -w -n {{ voltha_namespace }}
+  run_once: true
+
+# Fluentd
+
+- name: "VOLT-HA Deploy | Start fluentd"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/fluentd.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for fluentd to be ready"
+  command: kubectl rollout status deployment/{{ item }} -w -n {{ voltha_namespace }}
+  with_items:
+    - fluentdactv
+    - fluentdstby
+    - fluentd
+  run_once: true
+
+# Consul
+- name: "VOLT-HA Deploy | Start consul"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/consul.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for consul to be ready"
+  command: kubectl rollout status statefulset/consul -w -n {{ voltha_namespace }}
+  run_once: true
+
+# Voltha Core (for consul)
+- name: "VOLT-HA Deploy | Start VOLT-HA core (for consul)"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/vcore_for_consul.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for VOLT-HA core (for consul) to be ready"
+  command: kubectl rollout status deployment/vcore -w -n {{ voltha_namespace }}
+  run_once: true
+
+# OFagent
+- name: "VOLT-HA Deploy | Start OpenFlow Agent"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/ofagent.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for OpenFlow Agent to be ready"
+  command: kubectl rollout status deployment/ofagent -w -n {{ voltha_namespace }}
+  run_once: true
+
+# Envoy (for consul)
+- name: "VOLT-HA Deploy | Start Envoy (for consul)"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/envoy_for_consul.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for Envoy (for consul) to be ready"
+  command: kubectl rollout status deployment/voltha -w -n {{ voltha_namespace }}
+  run_once: true
+
+# Voltha CLI
+- name: "VOLT-HA Deploy | Start VOLT-HA CLI"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/vcli.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for VOLT-HA CLI to be ready"
+  command: kubectl rollout status deployment/vcli -w -n {{ voltha_namespace }}
+  run_once: true
+
+# NETCONF
+- name: "VOLT-HA Deploy | Start NETCONF"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/netconf.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for NETCONF to be ready"
+  command: kubectl rollout status deployment/netconf -w -n {{ voltha_namespace }}
+  run_once: true
+
+# Grafana
+- name: "VOLT-HA Deploy | Start Grafana"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/grafana.yml
+  run_once: true
+  when: monitor_voltha_stats
+
+- name: "VOLT-HA Deploy | Wait for Grafana to be ready"
+  command: kubectl rollout status deployment/grafana -w -n {{ voltha_namespace }}
+  run_once: true
+  when: monitor_voltha_stats
+
+# Dashd and Shovel
+- name: "VOLT-HA Deploy | Start DASHD and SHOVEL"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/stats.yml
+  run_once: true
+  when: monitor_voltha_stats
+
+- name: "VOLT-HA Deploy | Wait for DASHD and SHOVEL to be ready"
+  command: kubectl rollout status deployment/{{ item }} -w -n {{ voltha_namespace }}
+  with_items:
+    - dashd
+    - shovel
+  run_once: true
+  when: monitor_voltha_stats
+
+# FreeRADIUS Config
+- name: "VOLT-HA Deploy | Add FreeRADIUS Configuration"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/freeradius-config.yml
+  run_once: true
+
+# FreeRADIUS
+- name: "VOLT-HA Deploy | Start FreeRADIUS"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/freeradius.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for FreeRADIUS to be ready"
+  command: kubectl rollout status deployment/freeradius -w -n {{ voltha_namespace }}
+  run_once: true
+
+# ONOS
+- name: "VOLT-HA Deploy | Start ONOS"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/onos.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for ONOS to be ready"
+  command: kubectl rollout status deployment/onos -w -n {{ voltha_namespace }}
+  run_once: true
+
+
+
+
diff --git a/install/ansible/roles/voltha-kubernetes/tasks/main.yml b/install/ansible/roles/voltha-kubernetes/tasks/main.yml
new file mode 100644
index 0000000..6053343
--- /dev/null
+++ b/install/ansible/roles/voltha-kubernetes/tasks/main.yml
@@ -0,0 +1,8 @@
+- import_tasks: config.yml
+  when: config_voltha is defined
+
+- import_tasks: deploy.yml
+  when: deploy_voltha is defined
+
+- import_tasks: teardown.yml
+  when: teardown_voltha is defined
diff --git a/install/ansible/roles/voltha-kubernetes/tasks/teardown.yml b/install/ansible/roles/voltha-kubernetes/tasks/teardown.yml
new file mode 100644
index 0000000..fdd0485
--- /dev/null
+++ b/install/ansible/roles/voltha-kubernetes/tasks/teardown.yml
@@ -0,0 +1,82 @@
+# ONOS
+- name: "VOLT-HA Teardown | Stop ONOS"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/onos.yml
+  run_once: true
+
+# FreeRADIUS
+- name: "VOLT-HA Teardown | Stop FreeRADIUS"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/freeradius.yml
+  run_once: true
+
+# FreeRADIUS Config
+- name: "VOLT-HA Teardown | Remove FreeRADIUS Configuration"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/freeradius-config.yml
+  run_once: true
+
+# Dashd and Shovel
+- name: "VOLT-HA Teardown | Stop DASHD and SHOVEL"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/stats.yml
+  run_once: true
+
+# Grafana
+- name: "VOLT-HA Teardown | Stop Grafana"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/grafana.yml
+  run_once: true
+
+# NETCONF
+- name: "VOLT-HA Teardown | Stop NETCONF"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/netconf.yml
+  run_once: true
+
+# Voltha CLI
+- name: "VOLT-HA Teardown | Stop VOLT-HA CLI"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/vcli.yml
+  run_once: true
+
+# Envoy (for consul)
+- name: "VOLT-HA Teardown | Stop Envoy (for consul)"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/envoy_for_consul.yml
+  run_once: true
+
+# OFagent
+- name: "VOLT-HA Teardown | Stop OpenFlow Agent"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/ofagent.yml
+  run_once: true
+
+# Voltha Core (for consul)
+- name: "VOLT-HA Teardown | Stop VOLT-HA core (for consul)"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/vcore_for_consul.yml
+  run_once: true
+
+# Consul
+- name: "VOLT-HA Teardown | Stop consul"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/consul.yml
+  run_once: true
+
+# Fluentd
+- name: "VOLT-HA Teardown | Stop fluentd"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/fluentd.yml
+  run_once: true
+
+# Kafka
+- name: "VOLT-HA Teardown | Stop kafka"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/kafka.yml
+  run_once: true
+
+# Zookeeper
+- name: "VOLT-HA Teardown | Stop zookeeper"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/zookeeper.yml
+  run_once: true
+
+# Ingress
+- name: "VOLT-HA Teardown | Stop Ingress Controller"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/ingress
+  run_once: true
+
+# Namespace
+- name: "VOLT-HA Teardown | Remove Namespace"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/namespace.yml
+  run_once: true
+
+
+
diff --git a/install/ansible/roles/voltha-kubernetes/templates/bash_profile.j2 b/install/ansible/roles/voltha-kubernetes/templates/bash_profile.j2
new file mode 100644
index 0000000..45cb87e
--- /dev/null
+++ b/install/ansible/roles/voltha-kubernetes/templates/bash_profile.j2
@@ -0,0 +1,3 @@
+if [ -f ~/.bashrc ]; then
+  . ~/.bashrc
+fi
diff --git a/install/ansible/roles/voltha-kubernetes/templates/bashrc.j2 b/install/ansible/roles/voltha-kubernetes/templates/bashrc.j2
new file mode 100644
index 0000000..73bcc88
--- /dev/null
+++ b/install/ansible/roles/voltha-kubernetes/templates/bashrc.j2
@@ -0,0 +1,2 @@
+DOCKER_HOST_IP={{ ansible_default_ipv4.address }}
+export DOCKER_HOST_IP
diff --git a/install/ansible/roles/voltha-kubernetes/templates/ponsim_bridge.j2 b/install/ansible/roles/voltha-kubernetes/templates/ponsim_bridge.j2
new file mode 100644
index 0000000..6ddcfbd
--- /dev/null
+++ b/install/ansible/roles/voltha-kubernetes/templates/ponsim_bridge.j2
@@ -0,0 +1,15 @@
+{
+    "name": "pon0",
+    "type": "bridge",
+    "bridge": "pon0",
+    "isGateway": true,
+    "ipMask": true,
+    "ipam": {
+        "type": "host-local",
+        "subnet": "10.22.0.0/16",
+        "routes": [
+          { "dst": "0.0.0.0/0" }
+        ]
+    }
+}
+
diff --git a/install/ansible/voltha-k8s.yml b/install/ansible/voltha-k8s.yml
new file mode 100644
index 0000000..1761d04
--- /dev/null
+++ b/install/ansible/voltha-k8s.yml
@@ -0,0 +1,23 @@
+- hosts: cluster
+  remote_user: voltha
+  serial: 1
+  vars:
+    target: cluster
+  roles:
+    - { role: common, when: config_voltha is defined }
+    - { role: kubernetes-host, when: config_voltha is defined }
+    - { role: voltha-kubernetes, when: config_voltha is defined }
+
+- hosts:  k8s-master
+  remote_user: voltha
+  serial: 1
+  roles:
+    - {
+        role: voltha-kubernetes, when: deploy_voltha is defined,
+        monitor_voltha_stats: true,
+        use_ponsim: true
+      }
+    - {
+        role: voltha-kubernetes,
+        when: teardown_voltha is defined
+      }
diff --git a/install/install.cfg b/install/install.cfg
index 964800f..4bf0b66 100644
--- a/install/install.cfg
+++ b/install/install.cfg
@@ -10,3 +10,15 @@
 # The space reserved for Consul's storage
 # consulLimit=5
 
+# Specify the cluster framework type (swarm or kubernetes)
+cluster_framework="swarm"
+
+# Docker registry address
+#cluster_registry="localhost:5000"
+
+# Address range for kubernetes services
+#cluster_service_subnet="192.168.0.0\/18"
+
+# Address range for kubernetes pods
+#cluster_pod_subnet="192.168.64.0\/18"
+
diff --git a/install/installer.sh b/install/installer.sh
index f1f9732..f3a5aa3 100755
--- a/install/installer.sh
+++ b/install/installer.sh
@@ -35,13 +35,13 @@
 # Configure barrier file sizes but only if a value was provided in the config file
 
 if [ -v logLimit ]; then
-	sed -i -e "/logger_volume_size/s/.*/logger_volume_size: ${logLimit}/" ansible/group_vars/all
+    sed -i -e "/logger_volume_size/s/.*/logger_volume_size: ${logLimit}/" ansible/group_vars/all
 fi
 if [ -v regLimit ]; then
-	sed -i -e "/registry_volume_size/s/.*/registry_volume_size: ${regLimit}/" ansible/group_vars/all
+    sed -i -e "/registry_volume_size/s/.*/registry_volume_size: ${regLimit}/" ansible/group_vars/all
 fi
 if [ -v consulLimit ]; then
-	sed -i -e "/consul_volume_size/s/.*/consul_volume_size: ${consulLimit}/" ansible/group_vars/all
+    sed -i -e "/consul_volume_size/s/.*/consul_volume_size: ${consulLimit}/" ansible/group_vars/all
 fi
 
 # Create the key directory
@@ -60,156 +60,215 @@
 
 for i in $hosts
 do
-	# Generate the key for the host
-	echo -e "${lBlue}Generating the key-pair for communication with host ${yellow}$i${NC}"
-	ssh-keygen -f ./$i -t rsa -N ''
-	mv $i .keys
+    # Generate the key for the host
+    echo -e "${lBlue}Generating the key-pair for communication with host ${yellow}$i${NC}"
+    ssh-keygen -f ./$i -t rsa -N ''
+    mv $i .keys
 
-	# Generate the pre-configuration script
-	echo -e "${lBlue}Creating the pre-configuration script${NC}"
-	head -n +1 BashLoginTarget.sh > bash_login.sh
-	echo "" >> bash_login.sh
-	echo -n 'key="' >> bash_login.sh
-	sed -i -e 's/$/"/' $i.pub
-	cat $i.pub >> bash_login.sh
-	tail -n +2 BashLoginTarget.sh | grep -v "{{ key }}" >> bash_login.sh
-	rm $i.pub
+    # Generate the pre-configuration script
+    echo -e "${lBlue}Creating the pre-configuration script${NC}"
+    head -n +1 BashLoginTarget.sh > bash_login.sh
+    echo "" >> bash_login.sh
+    echo -n 'key="' >> bash_login.sh
+    sed -i -e 's/$/"/' $i.pub
+    cat $i.pub >> bash_login.sh
+    tail -n +2 BashLoginTarget.sh | grep -v "{{ key }}" >> bash_login.sh
+    rm $i.pub
 
-	# Copy the pre-config file to the VM
-	echo -e "${lBlue}Transfering pre-configuration script to ${yellow}$i${NC}"
-	if [ -d ".test" ]; then
-		echo -e "${red}Test mode set!!${lBlue} Using pre-populated ssh key for ${yellow}$i${NC}"
-		scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .test/$i bash_login.sh $iUser@$i:.bash_login
-	else
-		scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no bash_login.sh $iUser@$i:.bash_login
-	fi
-	rm bash_login.sh
+    # Copy the pre-config file to the VM
+    echo -e "${lBlue}Transfering pre-configuration script to ${yellow}$i${NC}"
+    if [ -d ".test" ]; then
+        echo -e "${red}Test mode set!!${lBlue} Using pre-populated ssh key for ${yellow}$i${NC}"
+        scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .test/$i bash_login.sh $iUser@$i:.bash_login
+    else
+        scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no bash_login.sh $iUser@$i:.bash_login
+    fi
+    rm bash_login.sh
 
-	# Run the pre-config file on the VM
-	echo -e "${lBlue}Running the pre-configuration script on ${yellow}$i${NC}"
-	if [ -d ".test" ]; then
-		echo -e "${red}Test mode set!!${lBlue} Using pre-populated ssh key for ${yellow}$i${NC}"
-		ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .test/$i $iUser@$i
-	else
-		ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $iUser@$i
-	fi
+    # Run the pre-config file on the VM
+    echo -e "${lBlue}Running the pre-configuration script on ${yellow}$i${NC}"
+    if [ -d ".test" ]; then
+        echo -e "${red}Test mode set!!${lBlue} Using pre-populated ssh key for ${yellow}$i${NC}"
+        ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .test/$i $iUser@$i
+    else
+        ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $iUser@$i
+    fi
 
-	# Configure ansible and ssh for silent operation
-	echo -e "${lBlue}Configuring ansible${NC}"
-	echo $i >> ansible/hosts/cluster
-	echo "ansible_ssh_private_key_file: $wd/.keys/$i" > ansible/host_vars/$i
+    # Configure ansible and ssh for silent operation
+    echo -e "${lBlue}Configuring ansible${NC}"
+    echo $i >> ansible/hosts/cluster
+    echo "ansible_ssh_private_key_file: $wd/.keys/$i" > ansible/host_vars/$i
 
-	# Create the tunnel to the registry to allow pulls from localhost
-	echo -e "${lBlue}Creating a secure shell tunnel to the registry for ${yellow}$i${NC}"
-	ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i -f voltha@$i -R 5000:localhost:5000 -N
-	
+    # Create the tunnel to the registry to allow pulls from localhost
+    echo -e "${lBlue}Creating a secure shell tunnel to the registry for ${yellow}$i${NC}"
+    ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i -f voltha@$i -R 5000:localhost:5000 -N
+
 done
-# Add the dependent software list to the cluster variables
-echo -e "${lBlue}Setting up dependent software${NC}"
-# Delete any grub updates since the boot disk is almost
-# guaranteed not to be the same device as the installer.
-mkdir grub_updates
-sudo mv deb_files/*grub* grub_updates
-# Sort the packages in dependency order to get rid of scary non-errors
-# that are issued by ansible.
-#echo -e "${lBlue}Dependency sorting dependent software${NC}"
-#./sort_packages.sh
-#echo "deb_files:" >> ansible/group_vars/all
-#for i in `cat sortedDebs.txt`
-#do
-#echo "  - $i" >> ansible/group_vars/all
-#done
 
-# Make sure the ssh keys propagate to all hosts allowing passwordless logins between them
-echo -e "${lBlue}Propagating ssh keys${NC}"
 cp -r .keys ansible/roles/cluster-host/files
 
-# Install python on all the 3 servers since python is required for
-for i in $hosts
-do
-	echo -e "${lBlue}Installing ${lCyan}Python${lBlue}${NC}"
-	scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i -r python-deb voltha@$i:.
-	ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i  voltha@$i "sudo dpkg -i /home/voltha/python-deb/*minimal*"
-	ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i  voltha@$i sudo dpkg -i -R /home/voltha/python-deb
-	ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i  voltha@$i rm -fr python-deb
 
-done
+if [  "$cluster_framework" == "kubernetes" ]; then
 
-# Create the daemon.json file for the swarm
-echo "{" > daemon.json
-echo -n '  "insecure-registries" : [' >> daemon.json
-first=""
-for i in .keys/*
-do
-	if [ -z "$first" ]; then
-		echo -n '"'`basename $i`':5001"' >> daemon.json
-		first="not"
-	else
-		echo -n ' , "'`basename $i`':5001"' >> daemon.json
-	fi
-done
-echo "]" >> daemon.json
-echo "}" >> daemon.json
-unset first
+	echo -e "${green}Deploying kubernetes${NC}"
 
-# Running ansible
-echo -e "${lBlue}Running ansible${NC}"
-cp ansible/ansible.cfg .ansible.cfg
-ansible-playbook ansible/voltha.yml -i ansible/hosts/cluster
+    rm -rf kubespray
 
-# Now all 3 servers need to be rebooted because of software installs.
-# Reboot them and wait patiently until they all come back.
-# Note this destroys the registry tunnel wich is no longer needed.
-hList=""
-for i in $hosts
-do
-	echo -e "${lBlue}Rebooting cluster hosts${NC}"
-	ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i  voltha@$i sudo telinit 6
-	hList="$i $hList"
-done
+    git clone https://github.com/kubernetes-incubator/kubespray.git
 
-# Give the hosts time to shut down so that pings stop working or the
-# script just falls through the next loop and the rest fails.
-echo -e "${lBlue}Waiting for shutdown${NC}"
-sleep 5
+    cp -rfp kubespray/inventory/sample kubespray/inventory/voltha
 
+    # TODO: Replace with variables taken from config file
+    # bootstrap_os: ubuntu
+    # network_plugin: weave
 
-while [ ! -z "$hList" ];
-do
-	# Attempt to ping the VMs on the list one by one.
-	echo -e "${lBlue}Waiting for hosts to reboot ${yellow}$hList${NC}"
-	for i in $hList
-	do
-		ping -q -c 1 $i > /dev/null 2>&1
-		ret=$?
-		if [ $ret -eq 0 ]; then
-			ipExpr=`echo $i | sed -e "s/\./[.]/g"`
-			hList=`echo $hList | sed -e "s/$ipExpr//" | sed -e "s/^ //" | sed -e "s/ $//"`
-		fi
-	done
-	
-done
+    sed -i -e "/bootstrap_os: none/s/.*/bootstrap_os: ubuntu/" \
+        kubespray/inventory/voltha/group_vars/all.yml
+    sed -i -e "/kube_network_plugin: calico/s/.*/kube_network_plugin: weave/" \
+        kubespray/inventory/voltha/group_vars/k8s-cluster.yml
+    sed -i -e "/kube_service_addresses: 10.233.0.0\/18/s/.*/kube_service_addresses: $cluster_service_subnet/" \
+        kubespray/inventory/voltha/group_vars/k8s-cluster.yml
+    sed -i -e "/kube_pods_subnet: 10.233.64.0\/18/s/.*/kube_pods_subnet: $cluster_pod_subnet/" \
+        kubespray/inventory/voltha/group_vars/k8s-cluster.yml
+    sed -i -e "s/docker_options: \"/&--insecure-registry=$cluster_registry /" \
+        kubespray/inventory/voltha/group_vars/k8s-cluster.yml
 
-# Now initialize the the docker swarm cluster with managers.
-# The first server needs to be the primary swarm manager
-# the other nodes are backup mangers that join the swarm.
-# In the future, worker nodes will likely be added.
+    CONFIG_FILE=kubespray/inventory/voltha/hosts.ini python3 \
+        kubespray/contrib/inventory_builder/inventory.py $hosts
 
-echo "[swarm-master]" > ansible/hosts/swarm-master
-echo "[swarm-master-backup]" > ansible/hosts/swarm-master-backup
+    # The inventory defines
+    sed -i -e '/\[kube-master\]/a\
+    node3
+    ' kubespray/inventory/voltha/hosts.ini
 
-ctr=1
-for i in $hosts
-do
+    ansible-playbook -u root -i kubespray/inventory/voltha/hosts.ini kubespray/cluster.yml
+
+    echo "[k8s-master]" > ansible/hosts/k8s-master
+
+    ctr=1
+    for i in $hosts
+    do
         if [ $ctr -eq 1 ]; then
-                echo  $i >> ansible/hosts/swarm-master
-		echo "swarm_master_addr: \"$i\"" >> ansible/group_vars/all
-		ctr=0
-        else
-                echo  $i >> ansible/hosts/swarm-master-backup
+            echo  $i >> ansible/hosts/k8s-master
+            ctr=0
         fi
-done
-ansible-playbook ansible/swarm.yml -i ansible/hosts/swarm-master
-ansible-playbook ansible/swarm.yml -i ansible/hosts/swarm-master-backup
-ansible-playbook ansible/voltha.yml -i ansible/hosts/swarm-master
+    done
 
+    ansible-playbook ansible/voltha-k8s.yml -i ansible/hosts/cluster -e 'config_voltha=true'
+    ansible-playbook ansible/voltha-k8s.yml -i ansible/hosts/k8s-master -e 'deploy_voltha=true'
+
+else
+    #
+    # Legacy swarm instructions
+    #
+
+    # Add the dependent software list to the cluster variables
+    echo -e "${lBlue}Setting up dependent software${NC}"
+    # Delete any grub updates since the boot disk is almost
+    # guaranteed not to be the same device as the installer.
+    mkdir grub_updates
+    sudo mv deb_files/*grub* grub_updates
+    # Sort the packages in dependency order to get rid of scary non-errors
+    # that are issued by ansible.
+    #echo -e "${lBlue}Dependency sorting dependent software${NC}"
+    #./sort_packages.sh
+    #echo "deb_files:" >> ansible/group_vars/all
+    #for i in `cat sortedDebs.txt`
+    #do
+    #echo "  - $i" >> ansible/group_vars/all
+    #done
+
+    # Make sure the ssh keys propagate to all hosts allowing passwordless logins between them
+    echo -e "${lBlue}Propagating ssh keys${NC}"
+    cp -r .keys ansible/roles/cluster-host/files
+
+    # Install python on all the 3 servers since python is required for
+    for i in $hosts
+    do
+        echo -e "${lBlue}Installing ${lCyan}Python${lBlue}${NC}"
+        scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i -r python-deb voltha@$i:.
+        ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i  voltha@$i "sudo dpkg -i /home/voltha/python-deb/*minimal*"
+        ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i  voltha@$i sudo dpkg -i -R /home/voltha/python-deb
+        ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i  voltha@$i rm -fr python-deb
+
+    done
+
+    # Create the daemon.json file for the swarm
+    echo "{" > daemon.json
+    echo -n '  "insecure-registries" : [' >> daemon.json
+    first=""
+    for i in .keys/*
+    do
+        if [ -z "$first" ]; then
+            echo -n '"'`basename $i`':5001"' >> daemon.json
+            first="not"
+        else
+            echo -n ' , "'`basename $i`':5001"' >> daemon.json
+        fi
+    done
+    echo "]" >> daemon.json
+    echo "}" >> daemon.json
+    unset first
+
+    # Running ansible
+    echo -e "${lBlue}Running ansible${NC}"
+    cp ansible/ansible.cfg .ansible.cfg
+    ansible-playbook ansible/voltha.yml -i ansible/hosts/cluster
+
+    # Now all 3 servers need to be rebooted because of software installs.
+    # Reboot them and wait patiently until they all come back.
+    # Note this destroys the registry tunnel wich is no longer needed.
+    hList=""
+    for i in $hosts
+    do
+        echo -e "${lBlue}Rebooting cluster hosts${NC}"
+        ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i  voltha@$i sudo telinit 6
+        hList="$i $hList"
+    done
+
+    # Give the hosts time to shut down so that pings stop working or the
+    # script just falls through the next loop and the rest fails.
+    echo -e "${lBlue}Waiting for shutdown${NC}"
+    sleep 5
+
+
+    while [ ! -z "$hList" ];
+    do
+        # Attempt to ping the VMs on the list one by one.
+        echo -e "${lBlue}Waiting for hosts to reboot ${yellow}$hList${NC}"
+        for i in $hList
+        do
+            ping -q -c 1 $i > /dev/null 2>&1
+            ret=$?
+            if [ $ret -eq 0 ]; then
+                ipExpr=`echo $i | sed -e "s/\./[.]/g"`
+                hList=`echo $hList | sed -e "s/$ipExpr//" | sed -e "s/^ //" | sed -e "s/ $//"`
+            fi
+        done
+
+    done
+
+    # Now initialize the the docker swarm cluster with managers.
+    # The first server needs to be the primary swarm manager
+    # the other nodes are backup mangers that join the swarm.
+    # In the future, worker nodes will likely be added.
+
+    echo "[swarm-master]" > ansible/hosts/swarm-master
+    echo "[swarm-master-backup]" > ansible/hosts/swarm-master-backup
+
+    ctr=1
+    for i in $hosts
+    do
+            if [ $ctr -eq 1 ]; then
+                    echo  $i >> ansible/hosts/swarm-master
+            echo "swarm_master_addr: \"$i\"" >> ansible/group_vars/all
+            ctr=0
+            else
+                    echo  $i >> ansible/hosts/swarm-master-backup
+            fi
+    done
+    ansible-playbook ansible/swarm.yml -i ansible/hosts/swarm-master
+    ansible-playbook ansible/swarm.yml -i ansible/hosts/swarm-master-backup
+    ansible-playbook ansible/voltha.yml -i ansible/hosts/swarm-master
+
+fi
diff --git a/k8s/consul.yml b/k8s/consul.yml
index b398b7a..0ffcc15 100644
--- a/k8s/consul.yml
+++ b/k8s/consul.yml
@@ -51,6 +51,8 @@
 spec:
   serviceName: consul
   replicas: 3
+  updateStrategy:
+    type: RollingUpdate
   template:
     metadata:
       labels:
diff --git a/k8s/freeradius-config.yml b/k8s/freeradius-config.yml
new file mode 100644
index 0000000..3e20756
--- /dev/null
+++ b/k8s/freeradius-config.yml
@@ -0,0 +1,13 @@
+apiVersion: v1
+data:
+  clients.conf: |
+    client 192.168.64.0/18 {
+      secret = SECRET
+    }
+  users: |
+    user Cleartext-Password := "password", MS-CHAP-Use-NTLM-Auth := 0
+
+kind: ConfigMap
+metadata:
+  name: freeradius-config
+  namespace: voltha
diff --git a/k8s/kafka.yml b/k8s/kafka.yml
index 00f8441..a75bbeb 100644
--- a/k8s/kafka.yml
+++ b/k8s/kafka.yml
@@ -20,6 +20,8 @@
 spec:
   serviceName: kafka
   replicas: 3
+  updateStrategy:
+    type: RollingUpdate
   selector:
     matchLabels:
       app: kafka
diff --git a/k8s/zookeeper.yml b/k8s/zookeeper.yml
index 2f3ceba..112a460 100644
--- a/k8s/zookeeper.yml
+++ b/k8s/zookeeper.yml
@@ -66,6 +66,8 @@
 spec:
   serviceName: zoo1
   replicas: 1
+  updateStrategy:
+    type: RollingUpdate
   selector:
     matchLabels:
       app: zookeeper-1
@@ -104,6 +106,8 @@
 spec:
   serviceName: zoo2
   replicas: 1
+  updateStrategy:
+    type: RollingUpdate
   selector:
     matchLabels:
       app: zookeeper-2
@@ -142,6 +146,8 @@
 spec:
   serviceName: zoo3
   replicas: 1
+  updateStrategy:
+    type: RollingUpdate
   selector:
     matchLabels:
       app: zookeeper-3