VOL-642: Installation of VOLT-HA in a Kubernetes cluster

- Changed existing installer to support swarm and kubernetes
- Using kubespray as the base framework to deploy kubernetes
- Implemented config/deploy/teardown tasks for voltha

Amendments:

- Changed from python to python3 when running the inventory builder

Change-Id: I059fd348b9a749397b373e333b5602944c817745
diff --git a/install/ansible/roles/voltha-kubernetes/tasks/config.yml b/install/ansible/roles/voltha-kubernetes/tasks/config.yml
new file mode 100644
index 0000000..87e5ab2
--- /dev/null
+++ b/install/ansible/roles/voltha-kubernetes/tasks/config.yml
@@ -0,0 +1,83 @@
+# Note: When the target == "cluster" the installer
+# is running to install voltha in the cluster hosts.
+# Whe the target == "installer" the installer is being
+# created.
+- name: VOLT-HA Config | The environment is properly set on login
+  template:
+    src: bashrc.j2
+    dest: "{{ target_voltha_home }}/.bashrc"
+    owner: voltha
+    group: voltha
+    mode: "u=rw,g=r,o=r"
+  when: target == "cluster"
+  tags: [voltha]
+
+- name: VOLT-HA Config | The .bashrc file is executed on ssh login
+  template:
+    src: bash_profile.j2
+    dest: "{{ target_voltha_home }}/.bash_profile"
+    owner: voltha
+    group: voltha
+    mode: "u=rw,g=r,o=r"
+  when: target == "cluster"
+  tags: [voltha]
+
+- name: VOLT-HA Config | Required directory exists
+  file:
+    path: "{{ target_voltha_dir }}"
+    state: directory
+    owner: voltha
+    group: voltha
+  when: target == "cluster"
+  tags: [voltha]
+
+- name: VOLT-HA Config | Configuration files and directories are copied
+  synchronize:
+    src: "{{ install_source }}/{{ item }}"
+    dest: "{{ target_voltha_dir }}"
+    archive: no
+    owner: no
+    perms: no
+    recursive: yes
+    links: yes
+  with_items:
+    - k8s
+  when: target == "cluster"
+  tags: [voltha]
+
+- name: VOLT-HA Config | Configuration directories are owned by voltha
+  file:
+    path: "{{ target_voltha_dir }}/{{ item }}"
+    owner: voltha
+    group: voltha
+    recurse: yes
+    follow: no
+  with_items:
+    - k8s
+  when: target == "cluster"
+  tags: [voltha]
+
+- name: VOLT-HA Config | Docker containers for Voltha are pulled
+  command: docker pull {{ docker_registry }}/{{ item }}
+  with_items: "{{ voltha_containers }}"
+  when: target == "cluster"
+  tags: [voltha]
+
+- name: VOLT-HA Config | Docker images are re-tagged to expected names
+  command: docker tag {{ docker_registry }}/{{ item }} {{ item }}
+  with_items: "{{ voltha_containers }}"
+  when: target == "cluster"
+  tags: [voltha]
+
+- name: VOLT-HA Config | Docker images are re-tagged to cluster registry names
+  command: docker tag {{ docker_registry }}/{{ item }} {{ inventory_hostname }}:5001/{{ item }}
+  with_items: "{{ voltha_containers }}"
+  when: target == "cluster"
+  tags: [voltha]
+
+- name: VOLT-HA Config | Add CNI bridge for PONSIM layer 2 support
+  template:
+    src: ponsim_bridge.j2
+    dest: "/etc/cni/net.d/10-pon0.conf"
+  when: target == "cluster"
+  tags: [voltha]
\ No newline at end of file
diff --git a/install/ansible/roles/voltha-kubernetes/tasks/deploy.yml b/install/ansible/roles/voltha-kubernetes/tasks/deploy.yml
new file mode 100644
index 0000000..f5a2929
--- /dev/null
+++ b/install/ansible/roles/voltha-kubernetes/tasks/deploy.yml
@@ -0,0 +1,159 @@
+- name: "VOLT-HA Deploy | Add Namespace"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/namespace.yml
+  run_once: true
+
+# Ingress
+- name: "VOLT-HA Deploy | Start Ingress Controller"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/ingress
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for Default HTTP backend to be ready"
+  command: kubectl rollout status deployment/default-http-backend -w -n {{ voltha_namespace }}
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for Ingress Controller to be ready"
+  command: kubectl rollout status deployment/nginx-ingress-controller -w -n {{ voltha_namespace }}
+  run_once: true
+
+# Zookeeper
+
+- name: "VOLT-HA Deploy | Start zookeeper"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/zookeeper.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for zookeeper to be ready"
+  command: kubectl rollout status statefulset/{{ item }} -w -n {{ voltha_namespace }}
+  with_items:
+    - zookeeper1
+    - zookeeper2
+    - zookeeper3
+  run_once: true
+
+# Kafka
+- name: "VOLT-HA Deploy | Start kafka"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/kafka.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for kafka to be ready"
+  command: kubectl rollout status statefulset/kafka -w -n {{ voltha_namespace }}
+  run_once: true
+
+# Fluentd
+
+- name: "VOLT-HA Deploy | Start fluentd"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/fluentd.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for fluentd to be ready"
+  command: kubectl rollout status deployment/{{ item }} -w -n {{ voltha_namespace }}
+  with_items:
+    - fluentdactv
+    - fluentdstby
+    - fluentd
+  run_once: true
+
+# Consul
+- name: "VOLT-HA Deploy | Start consul"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/consul.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for consul to be ready"
+  command: kubectl rollout status statefulset/consul -w -n {{ voltha_namespace }}
+  run_once: true
+
+# Voltha Core (for consul)
+- name: "VOLT-HA Deploy | Start VOLT-HA core (for consul)"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/vcore_for_consul.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for VOLT-HA core (for consul) to be ready"
+  command: kubectl rollout status deployment/vcore -w -n {{ voltha_namespace }}
+  run_once: true
+
+# OFagent
+- name: "VOLT-HA Deploy | Start OpenFlow Agent"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/ofagent.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for OpenFlow Agent to be ready"
+  command: kubectl rollout status deployment/ofagent -w -n {{ voltha_namespace }}
+  run_once: true
+
+# Envoy (for consul)
+- name: "VOLT-HA Deploy | Start Envoy (for consul)"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/envoy_for_consul.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for Envoy (for consul) to be ready"
+  command: kubectl rollout status deployment/voltha -w -n {{ voltha_namespace }}
+  run_once: true
+
+# Voltha CLI
+- name: "VOLT-HA Deploy | Start VOLT-HA CLI"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/vcli.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for VOLT-HA CLI to be ready"
+  command: kubectl rollout status deployment/vcli -w -n {{ voltha_namespace }}
+  run_once: true
+
+# NETCONF
+- name: "VOLT-HA Deploy | Start NETCONF"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/netconf.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for NETCONF to be ready"
+  command: kubectl rollout status deployment/netconf -w -n {{ voltha_namespace }}
+  run_once: true
+
+# Grafana
+- name: "VOLT-HA Deploy | Start Grafana"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/grafana.yml
+  run_once: true
+  when: monitor_voltha_stats
+
+- name: "VOLT-HA Deploy | Wait for Grafana to be ready"
+  command: kubectl rollout status deployment/grafana -w -n {{ voltha_namespace }}
+  run_once: true
+  when: monitor_voltha_stats
+
+# Dashd and Shovel
+- name: "VOLT-HA Deploy | Start DASHD and SHOVEL"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/stats.yml
+  run_once: true
+  when: monitor_voltha_stats
+
+- name: "VOLT-HA Deploy | Wait for DASHD and SHOVEL to be ready"
+  command: kubectl rollout status deployment/{{ item }} -w -n {{ voltha_namespace }}
+  with_items:
+    - dashd
+    - shovel
+  run_once: true
+  when: monitor_voltha_stats
+
+# FreeRADIUS Config
+- name: "VOLT-HA Deploy | Add FreeRADIUS Configuration"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/freeradius-config.yml
+  run_once: true
+
+# FreeRADIUS
+- name: "VOLT-HA Deploy | Start FreeRADIUS"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/freeradius.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for FreeRADIUS to be ready"
+  command: kubectl rollout status deployment/freeradius -w -n {{ voltha_namespace }}
+  run_once: true
+
+# ONOS
+- name: "VOLT-HA Deploy | Start ONOS"
+  command: kubectl apply -f {{ target_voltha_dir }}/k8s/onos.yml
+  run_once: true
+
+- name: "VOLT-HA Deploy | Wait for ONOS to be ready"
+  command: kubectl rollout status deployment/onos -w -n {{ voltha_namespace }}
+  run_once: true
+
+
+
+
diff --git a/install/ansible/roles/voltha-kubernetes/tasks/main.yml b/install/ansible/roles/voltha-kubernetes/tasks/main.yml
new file mode 100644
index 0000000..6053343
--- /dev/null
+++ b/install/ansible/roles/voltha-kubernetes/tasks/main.yml
@@ -0,0 +1,8 @@
+- import_tasks: config.yml
+  when: config_voltha is defined
+
+- import_tasks: deploy.yml
+  when: deploy_voltha is defined
+
+- import_tasks: teardown.yml
+  when: teardown_voltha is defined
diff --git a/install/ansible/roles/voltha-kubernetes/tasks/teardown.yml b/install/ansible/roles/voltha-kubernetes/tasks/teardown.yml
new file mode 100644
index 0000000..fdd0485
--- /dev/null
+++ b/install/ansible/roles/voltha-kubernetes/tasks/teardown.yml
@@ -0,0 +1,82 @@
+# ONOS
+- name: "VOLT-HA Teardown | Stop ONOS"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/onos.yml
+  run_once: true
+
+# FreeRADIUS
+- name: "VOLT-HA Teardown | Stop FreeRADIUS"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/freeradius.yml
+  run_once: true
+
+# FreeRADIUS Config
+- name: "VOLT-HA Teardown | Remove FreeRADIUS Configuration"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/freeradius-config.yml
+  run_once: true
+
+# Dashd and Shovel
+- name: "VOLT-HA Teardown | Stop DASHD and SHOVEL"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/stats.yml
+  run_once: true
+
+# Grafana
+- name: "VOLT-HA Teardown | Stop Grafana"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/grafana.yml
+  run_once: true
+
+# NETCONF
+- name: "VOLT-HA Teardown | Stop NETCONF"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/netconf.yml
+  run_once: true
+
+# Voltha CLI
+- name: "VOLT-HA Teardown | Stop VOLT-HA CLI"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/vcli.yml
+  run_once: true
+
+# Envoy (for consul)
+- name: "VOLT-HA Teardown | Stop Envoy (for consul)"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/envoy_for_consul.yml
+  run_once: true
+
+# OFagent
+- name: "VOLT-HA Teardown | Stop OpenFlow Agent"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/ofagent.yml
+  run_once: true
+
+# Voltha Core (for consul)
+- name: "VOLT-HA Teardown | Stop VOLT-HA core (for consul)"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/vcore_for_consul.yml
+  run_once: true
+
+# Consul
+- name: "VOLT-HA Teardown | Stop consul"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/consul.yml
+  run_once: true
+
+# Fluentd
+- name: "VOLT-HA Teardown | Stop fluentd"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/fluentd.yml
+  run_once: true
+
+# Kafka
+- name: "VOLT-HA Teardown | Stop kafka"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/kafka.yml
+  run_once: true
+
+# Zookeeper
+- name: "VOLT-HA Teardown | Stop zookeeper"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/zookeeper.yml
+  run_once: true
+
+# Ingress
+- name: "VOLT-HA Teardown | Stop Ingress Controller"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/ingress
+  run_once: true
+
+# Namespace
+- name: "VOLT-HA Teardown | Remove Namespace"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/namespace.yml
+  run_once: true
+
+
+
diff --git a/install/ansible/roles/voltha-kubernetes/templates/bash_profile.j2 b/install/ansible/roles/voltha-kubernetes/templates/bash_profile.j2
new file mode 100644
index 0000000..45cb87e
--- /dev/null
+++ b/install/ansible/roles/voltha-kubernetes/templates/bash_profile.j2
@@ -0,0 +1,3 @@
+if [ -f ~/.bashrc ]; then
+  . ~/.bashrc
+fi
diff --git a/install/ansible/roles/voltha-kubernetes/templates/bashrc.j2 b/install/ansible/roles/voltha-kubernetes/templates/bashrc.j2
new file mode 100644
index 0000000..73bcc88
--- /dev/null
+++ b/install/ansible/roles/voltha-kubernetes/templates/bashrc.j2
@@ -0,0 +1,2 @@
+DOCKER_HOST_IP={{ ansible_default_ipv4.address }}
+export DOCKER_HOST_IP
diff --git a/install/ansible/roles/voltha-kubernetes/templates/ponsim_bridge.j2 b/install/ansible/roles/voltha-kubernetes/templates/ponsim_bridge.j2
new file mode 100644
index 0000000..6ddcfbd
--- /dev/null
+++ b/install/ansible/roles/voltha-kubernetes/templates/ponsim_bridge.j2
@@ -0,0 +1,15 @@
+{
+    "name": "pon0",
+    "type": "bridge",
+    "bridge": "pon0",
+    "isGateway": true,
+    "ipMask": true,
+    "ipam": {
+        "type": "host-local",
+        "subnet": "10.22.0.0/16",
+        "routes": [
+          { "dst": "0.0.0.0/0" }
+        ]
+    }
+}
+