VOL-570 : Install Kubernetes in production and test modes

- Supports offline installation for k8s
- Configs and deploys voltha components

Change-Id: I29eb87c035cea1e86176bb2e30d36278760ece82
diff --git a/install/ansible/roles/cluster-host/tasks/main.yml b/install/ansible/roles/cluster-host/tasks/main.yml
index 5d0b39e..531b827 100644
--- a/install/ansible/roles/cluster-host/tasks/main.yml
+++ b/install/ansible/roles/cluster-host/tasks/main.yml
@@ -314,7 +314,7 @@
 
 - name: The glusterfs service is started
   service:
-    name: glusterfs-server
+    name: glusterd
     enabled: yes
     state: started
   when: target == "cluster"
diff --git a/install/ansible/roles/common/tasks/main.yml b/install/ansible/roles/common/tasks/main.yml
index f5904dc..ec36046 100644
--- a/install/ansible/roles/common/tasks/main.yml
+++ b/install/ansible/roles/common/tasks/main.yml
@@ -13,6 +13,12 @@
   with_items: "{{ hosts }}"
   tags: [common]
 
+- name: GlusterFS repository is available
+  apt_repository:
+    repo: 'ppa:gluster/glusterfs-3.13'
+  when: target != "cluster"
+  tags: [common]
+
 - name: Latest apt packages
   apt:
     name: "{{ item }}"
diff --git a/install/ansible/roles/glusterfs/tasks/main.yml b/install/ansible/roles/glusterfs/tasks/main.yml
index b8e17da..782a151 100644
--- a/install/ansible/roles/glusterfs/tasks/main.yml
+++ b/install/ansible/roles/glusterfs/tasks/main.yml
@@ -26,3 +26,21 @@
     - registry_volume
     - logging_volume
 
+- name: The replicated registry filesystem is mounted
+  mount:
+    path: "{{ target_voltha_dir }}/registry_data"
+    src: "{{ inventory_hostname }}:/registry_volume"
+    fstype: glusterfs
+    opts:  "defaults,_netdev,noauto,x-systemd.automount"
+    state: mounted
+  when: target == "cluster"
+
+- name: The replicated logging filesystem is mounted on boot
+  mount:
+    path: "/var/log/voltha"
+    src: "{{ inventory_hostname }}:/logging_volume"
+    fstype: glusterfs
+    opts:  "defaults,_netdev,noauto,x-systemd.automount"
+    state: mounted
+  when: target == "cluster"
+  tags: [cluster_host]
diff --git a/install/ansible/roles/installer/tasks/main.yml b/install/ansible/roles/installer/tasks/main.yml
index 098d17d..492386d 100644
--- a/install/ansible/roles/installer/tasks/main.yml
+++ b/install/ansible/roles/installer/tasks/main.yml
@@ -32,6 +32,7 @@
     - install/BashLoginTarget.sh
     - install/ansible
     - compose
+    - k8s 
     - consul_config/basic.json
   tags: [installer]
 
diff --git a/install/ansible/roles/kubernetes-host/files/ssh_config b/install/ansible/roles/kubernetes-host/files/ssh_config
deleted file mode 100644
index 990a43d..0000000
--- a/install/ansible/roles/kubernetes-host/files/ssh_config
+++ /dev/null
@@ -1,3 +0,0 @@
-Host *
-   StrictHostKeyChecking no
-   UserKnownHostsFile=/dev/null
diff --git a/install/ansible/roles/kubernetes-host/tasks/main.yml b/install/ansible/roles/kubernetes-host/tasks/main.yml
deleted file mode 100644
index 8d5a564..0000000
--- a/install/ansible/roles/kubernetes-host/tasks/main.yml
+++ /dev/null
@@ -1,56 +0,0 @@
-# Note: When the target == "cluster" the installer
-# is running to install voltha in the cluster hosts.
-# Whe the target == "installer" the installer is being
-# created.
-- name: A .ssh directory for the voltha user exists
-  file:
-    #path: "{{ ansible_env['HOME'] }}/.ssh"
-    path: "{{ target_voltha_home }}/.ssh"
-    state: directory
-    owner: voltha
-    group: voltha
-  tags: [cluster_host]
-
-- name: known_hosts file is absent for the voltha user
-  file:
-    path: "{{ target_voltha_home }}/.ssh/known_hosts"
-    state: absent
-  tags: [cluster_host]
-
-- name: Known host checking is disabled
-  copy:
-    src: files/ssh_config
-    dest: "{{ target_voltha_home }}/.ssh/config"
-    owner: voltha
-    group: voltha
-    mode: 0600
-  tags: [cluster_host]
-
-- name: Cluster host keys are propagated to all hosts in the cluster
-  copy:
-    src: files/.keys
-    dest: "{{ target_voltha_home }}"
-    owner: voltha
-    group: voltha
-    mode: 0600
-  tags: [cluster_host]
-
-- name: A voltha directory under /var/log for voltha logs exists
-  file:
-    path: "/var/log/voltha/logging_volume"
-    state: directory
-  tags: [cluster_host]
-
-- name: Directories for voltha processes are created
-  file:
-    path: "{{ target_voltha_dir }}/{{ item }}"
-    state: directory
-    mode: 0755
-    owner: voltha
-    group: voltha
-  with_items:
-    - registry_data
-    - consul/data
-    - consul/config
-  when: target == "cluster"
-  tags: [cluster_host]
diff --git a/install/ansible/roles/voltha-kubernetes/tasks/config.yml b/install/ansible/roles/voltha-k8s/tasks/config.yml
similarity index 86%
rename from install/ansible/roles/voltha-kubernetes/tasks/config.yml
rename to install/ansible/roles/voltha-k8s/tasks/config.yml
index 87e5ab2..c3bfe22 100644
--- a/install/ansible/roles/voltha-kubernetes/tasks/config.yml
+++ b/install/ansible/roles/voltha-k8s/tasks/config.yml
@@ -24,7 +24,7 @@
 
 - name: VOLT-HA Config | Required directory exists
   file:
-    path: "{{ target_voltha_dir }}"
+    path: "{{ target_voltha_home }}"
     state: directory
     owner: voltha
     group: voltha
@@ -34,7 +34,7 @@
 - name: VOLT-HA Config | Configuration files and directories are copied
   synchronize:
     src: "{{ install_source }}/{{ item }}"
-    dest: "{{ target_voltha_dir }}"
+    dest: "{{ target_voltha_home }}"
     archive: no
     owner: no
     perms: no
@@ -47,7 +47,7 @@
 
 - name: VOLT-HA Config | Configuration directories are owned by voltha
   file:
-    path: "{{ target_voltha_dir }}/{{ item }}"
+    path: "{{ target_voltha_home }}/{{ item }}"
     owner: voltha
     group: voltha
     recurse: yes
@@ -74,10 +74,3 @@
   with_items: "{{ voltha_containers }}"
   when: target == "cluster"
   tags: [voltha]
-
-- name: VOLT-HA Config | Add CNI bridge for PONSIM layer 2 support
-  template:
-    src: ponsim_bridge.j2
-    dest: "/etc/cni/net.d/10-pon0.conf"
-  when: target == "cluster"
-  tags: [voltha]
\ No newline at end of file
diff --git a/install/ansible/roles/voltha-kubernetes/tasks/deploy.yml b/install/ansible/roles/voltha-k8s/tasks/deploy.yml
similarity index 67%
rename from install/ansible/roles/voltha-kubernetes/tasks/deploy.yml
rename to install/ansible/roles/voltha-k8s/tasks/deploy.yml
index f5a2929..91cc1ea 100644
--- a/install/ansible/roles/voltha-kubernetes/tasks/deploy.yml
+++ b/install/ansible/roles/voltha-k8s/tasks/deploy.yml
@@ -1,10 +1,10 @@
 - name: "VOLT-HA Deploy | Add Namespace"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/namespace.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/namespace.yml
   run_once: true
 
 # Ingress
 - name: "VOLT-HA Deploy | Start Ingress Controller"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/ingress
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/ingress
   run_once: true
 
 - name: "VOLT-HA Deploy | Wait for Default HTTP backend to be ready"
@@ -18,7 +18,7 @@
 # Zookeeper
 
 - name: "VOLT-HA Deploy | Start zookeeper"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/zookeeper.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/zookeeper.yml
   run_once: true
 
 - name: "VOLT-HA Deploy | Wait for zookeeper to be ready"
@@ -31,17 +31,21 @@
 
 # Kafka
 - name: "VOLT-HA Deploy | Start kafka"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/kafka.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/kafka.yml
   run_once: true
 
 - name: "VOLT-HA Deploy | Wait for kafka to be ready"
   command: kubectl rollout status statefulset/kafka -w -n {{ voltha_namespace }}
   run_once: true
 
-# Fluentd
+# Fluentd Config
+- name: "VOLT-HA Deploy | Add Fluentd Configuration"
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/fluentd-config.yml
+  run_once: true
 
+# Fluentd
 - name: "VOLT-HA Deploy | Start fluentd"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/fluentd.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/fluentd.yml
   run_once: true
 
 - name: "VOLT-HA Deploy | Wait for fluentd to be ready"
@@ -49,12 +53,11 @@
   with_items:
     - fluentdactv
     - fluentdstby
-    - fluentd
   run_once: true
 
 # Consul
 - name: "VOLT-HA Deploy | Start consul"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/consul.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/consul.yml
   run_once: true
 
 - name: "VOLT-HA Deploy | Wait for consul to be ready"
@@ -63,7 +66,7 @@
 
 # Voltha Core (for consul)
 - name: "VOLT-HA Deploy | Start VOLT-HA core (for consul)"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/vcore_for_consul.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/vcore_for_consul.yml
   run_once: true
 
 - name: "VOLT-HA Deploy | Wait for VOLT-HA core (for consul) to be ready"
@@ -72,7 +75,7 @@
 
 # OFagent
 - name: "VOLT-HA Deploy | Start OpenFlow Agent"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/ofagent.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/ofagent.yml
   run_once: true
 
 - name: "VOLT-HA Deploy | Wait for OpenFlow Agent to be ready"
@@ -81,7 +84,7 @@
 
 # Envoy (for consul)
 - name: "VOLT-HA Deploy | Start Envoy (for consul)"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/envoy_for_consul.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/envoy_for_consul.yml
   run_once: true
 
 - name: "VOLT-HA Deploy | Wait for Envoy (for consul) to be ready"
@@ -90,7 +93,7 @@
 
 # Voltha CLI
 - name: "VOLT-HA Deploy | Start VOLT-HA CLI"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/vcli.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/vcli.yml
   run_once: true
 
 - name: "VOLT-HA Deploy | Wait for VOLT-HA CLI to be ready"
@@ -99,7 +102,7 @@
 
 # NETCONF
 - name: "VOLT-HA Deploy | Start NETCONF"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/netconf.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/netconf.yml
   run_once: true
 
 - name: "VOLT-HA Deploy | Wait for NETCONF to be ready"
@@ -108,7 +111,7 @@
 
 # Grafana
 - name: "VOLT-HA Deploy | Start Grafana"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/grafana.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/grafana.yml
   run_once: true
   when: monitor_voltha_stats
 
@@ -119,7 +122,7 @@
 
 # Dashd and Shovel
 - name: "VOLT-HA Deploy | Start DASHD and SHOVEL"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/stats.yml
+  command: kubectl apply -f {{ target_voltha_home }}/k8s/stats.yml
   run_once: true
   when: monitor_voltha_stats
 
@@ -130,30 +133,3 @@
     - shovel
   run_once: true
   when: monitor_voltha_stats
-
-# FreeRADIUS Config
-- name: "VOLT-HA Deploy | Add FreeRADIUS Configuration"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/freeradius-config.yml
-  run_once: true
-
-# FreeRADIUS
-- name: "VOLT-HA Deploy | Start FreeRADIUS"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/freeradius.yml
-  run_once: true
-
-- name: "VOLT-HA Deploy | Wait for FreeRADIUS to be ready"
-  command: kubectl rollout status deployment/freeradius -w -n {{ voltha_namespace }}
-  run_once: true
-
-# ONOS
-- name: "VOLT-HA Deploy | Start ONOS"
-  command: kubectl apply -f {{ target_voltha_dir }}/k8s/onos.yml
-  run_once: true
-
-- name: "VOLT-HA Deploy | Wait for ONOS to be ready"
-  command: kubectl rollout status deployment/onos -w -n {{ voltha_namespace }}
-  run_once: true
-
-
-
-
diff --git a/install/ansible/roles/voltha-kubernetes/tasks/main.yml b/install/ansible/roles/voltha-k8s/tasks/main.yml
similarity index 100%
rename from install/ansible/roles/voltha-kubernetes/tasks/main.yml
rename to install/ansible/roles/voltha-k8s/tasks/main.yml
diff --git a/install/ansible/roles/voltha-kubernetes/tasks/teardown.yml b/install/ansible/roles/voltha-k8s/tasks/teardown.yml
similarity index 69%
rename from install/ansible/roles/voltha-kubernetes/tasks/teardown.yml
rename to install/ansible/roles/voltha-k8s/tasks/teardown.yml
index fdd0485..10fb856 100644
--- a/install/ansible/roles/voltha-kubernetes/tasks/teardown.yml
+++ b/install/ansible/roles/voltha-k8s/tasks/teardown.yml
@@ -1,82 +1,69 @@
-# ONOS
-- name: "VOLT-HA Teardown | Stop ONOS"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/onos.yml
-  run_once: true
-
-# FreeRADIUS
-- name: "VOLT-HA Teardown | Stop FreeRADIUS"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/freeradius.yml
-  run_once: true
-
-# FreeRADIUS Config
-- name: "VOLT-HA Teardown | Remove FreeRADIUS Configuration"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/freeradius-config.yml
-  run_once: true
-
 # Dashd and Shovel
 - name: "VOLT-HA Teardown | Stop DASHD and SHOVEL"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/stats.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/stats.yml
   run_once: true
 
 # Grafana
 - name: "VOLT-HA Teardown | Stop Grafana"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/grafana.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/grafana.yml
   run_once: true
 
 # NETCONF
 - name: "VOLT-HA Teardown | Stop NETCONF"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/netconf.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/netconf.yml
   run_once: true
 
 # Voltha CLI
 - name: "VOLT-HA Teardown | Stop VOLT-HA CLI"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/vcli.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/vcli.yml
   run_once: true
 
 # Envoy (for consul)
 - name: "VOLT-HA Teardown | Stop Envoy (for consul)"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/envoy_for_consul.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/envoy_for_consul.yml
   run_once: true
 
 # OFagent
 - name: "VOLT-HA Teardown | Stop OpenFlow Agent"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/ofagent.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/ofagent.yml
   run_once: true
 
 # Voltha Core (for consul)
 - name: "VOLT-HA Teardown | Stop VOLT-HA core (for consul)"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/vcore_for_consul.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/vcore_for_consul.yml
   run_once: true
 
 # Consul
 - name: "VOLT-HA Teardown | Stop consul"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/consul.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/consul.yml
   run_once: true
 
 # Fluentd
 - name: "VOLT-HA Teardown | Stop fluentd"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/fluentd.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/fluentd.yml
+  run_once: true
+
+# Fluentd Config
+- name: "VOLT-HA Teardown | Remove Fluentd Configuration"
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/fluentd-config.yml
   run_once: true
 
 # Kafka
 - name: "VOLT-HA Teardown | Stop kafka"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/kafka.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/kafka.yml
   run_once: true
 
 # Zookeeper
 - name: "VOLT-HA Teardown | Stop zookeeper"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/zookeeper.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/zookeeper.yml
   run_once: true
 
 # Ingress
 - name: "VOLT-HA Teardown | Stop Ingress Controller"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/ingress
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/ingress
   run_once: true
 
 # Namespace
 - name: "VOLT-HA Teardown | Remove Namespace"
-  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_dir }}/k8s/namespace.yml
+  command: kubectl delete --ignore-not-found=true -f {{ target_voltha_home }}/k8s/namespace.yml
   run_once: true
-
-
-
diff --git a/install/ansible/roles/voltha-kubernetes/templates/bash_profile.j2 b/install/ansible/roles/voltha-k8s/templates/bash_profile.j2
similarity index 100%
rename from install/ansible/roles/voltha-kubernetes/templates/bash_profile.j2
rename to install/ansible/roles/voltha-k8s/templates/bash_profile.j2
diff --git a/install/ansible/roles/voltha-kubernetes/templates/bashrc.j2 b/install/ansible/roles/voltha-k8s/templates/bashrc.j2
similarity index 100%
rename from install/ansible/roles/voltha-kubernetes/templates/bashrc.j2
rename to install/ansible/roles/voltha-k8s/templates/bashrc.j2
diff --git a/install/ansible/roles/voltha-kubernetes/templates/ponsim_bridge.j2 b/install/ansible/roles/voltha-kubernetes/templates/ponsim_bridge.j2
deleted file mode 100644
index 6ddcfbd..0000000
--- a/install/ansible/roles/voltha-kubernetes/templates/ponsim_bridge.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-{
-    "name": "pon0",
-    "type": "bridge",
-    "bridge": "pon0",
-    "isGateway": true,
-    "ipMask": true,
-    "ipam": {
-        "type": "host-local",
-        "subnet": "10.22.0.0/16",
-        "routes": [
-          { "dst": "0.0.0.0/0" }
-        ]
-    }
-}
-