VOL-382 This update adds a replicated file system to hold the registry
information such that regardless of where the cluster registry restarted
in the event of a server failure or a complete cluster reboot.
A few cleanup items for hard coded directory values were also addressed.

Change-Id: I5d1313c5f093bafb7c4669ac91813251bbbb6795
diff --git a/compose/docker-compose-registry.yml b/compose/docker-compose-registry.yml
index 5f353d9..8804259 100644
--- a/compose/docker-compose-registry.yml
+++ b/compose/docker-compose-registry.yml
@@ -1,33 +1,18 @@
 #
-# This Docker stackfile deploys a Voltha CLI container along with one backup.
+# This Docker stackfile deploys a docker insecure registry
 #
-# The stackfile assumes that overlay network 'voltha_net' has already been
-# created. To deploy the stack, issue the command:
 #
-#     docker stack deploy -c docker-compose-vcli.yml cli
+#     docker stack deploy -c docker-compose-registry.yml registry
 #
 
 version: "3"
 services:
   registry:
     image: registry:2
+    restart: always
     deploy:
       replicas: 1
-#    environment:
-#      DOCKER_HOST_IP: "${DOCKER_HOST_IP}"
-#    entrypoint:
-#      - /cli/cli/setup.sh
-#      - -C consul:8500
-#      - -g voltha:50555
-#      - -s voltha:18880
-#      - -G
-#    networks:
-#      - voltha-net
     ports:
       - "5001:5000"
-      
-#networks:
-#  voltha-net:
-#    external:
-#      name: voltha_net
-
+    volumes:
+      - /cord/incubator/voltha/registry_data/registry_volume:/var/lib/registry
diff --git a/install/ansible/group_vars/all b/install/ansible/group_vars/all
index d870fc1..6a3164e 100644
--- a/install/ansible/group_vars/all
+++ b/install/ansible/group_vars/all
@@ -2,9 +2,10 @@
 docker_cfg: docker.cfg
 docker_cfg_dest: /etc/default/docker
 docker_registry: "localhost:5000"
-docker_push_registry: "vinstall:5000"
+docker_push_registry: "vinstall1001:5000"
 cord_home: /home/volthainstall/cord
 target_voltha_dir: /cord/incubator/voltha
+replicated_fs_dir: /var/cache
 docker_py_version: "1.7.0"
 netifaces_version: "0.10.4"
 target_voltha_home: /home/voltha
diff --git a/install/ansible/roles/cluster-host/tasks/main.yml b/install/ansible/roles/cluster-host/tasks/main.yml
index 099eb25..ac1e2e2 100644
--- a/install/ansible/roles/cluster-host/tasks/main.yml
+++ b/install/ansible/roles/cluster-host/tasks/main.yml
@@ -5,7 +5,7 @@
 - name: A .ssh directory for the voltha user exists
   file:
     #path: "{{ ansible_env['HOME'] }}/.ssh"
-    path: "/home/voltha/.ssh"
+    path: "{{ target_voltha_home }}/.ssh"
     state: directory
     owner: voltha
     group: voltha
@@ -13,14 +13,14 @@
 
 - name: known_hosts file is absent for the voltha user
   file:
-    path: "/home/voltha/.ssh/known_hosts"
+    path: "{{ target_voltha_home }}/.ssh/known_hosts"
     state: absent
   tags: [cluster_host]
 
 - name: Known host checking is disabled
   copy:
     src: files/ssh_config
-    dest: "/home/voltha/.ssh/config"
+    dest: "{{ target_voltha_home }}/.ssh/config"
     owner: voltha
     group: voltha
     mode: 0600
@@ -29,7 +29,7 @@
 - name: Cluster host keys are propagated to all hosts in the cluster
   copy:
     src: files/.keys
-    dest: "/home/voltha"
+    dest: "{{ target_voltha_home }}"
     owner: voltha
     group: voltha
     mode: 0600
@@ -121,3 +121,102 @@
   when: target == "cluster"
   tags: [cluster_host]
 
+- name: Replicated filesystem file is created
+  command: "dd if=/dev/zero of={{ replicated_fs_dir }}/.cluster-fs-file bs=100M count=100"
+  when: target == "cluster"
+  tags: [cluster_host]
+
+- name: The loop device is set up for file system creation
+  command: "losetup -f {{ replicated_fs_dir }}/.cluster-fs-file"
+  when: target == "cluster"
+  tags: [cluster_host]
+
+- name: The xfs filesystem is created on the loop device
+  filesystem:
+    fstype: xfs
+    dev: /dev/loop0
+    opts: -i size=512
+  when: target == "cluster"
+  tags: [cluster_host]
+
+- name: The loop device that's no longer needed is removed
+  command: "losetup -D"
+  when: target == "cluster"
+  tags: [cluster_host]
+
+- name: The registry fileystem file is owned by voltha
+  file:
+    path: "{{ replicated_fs_dir }}/.cluster-fs-file"
+    mode: 0755
+    owner: voltha
+    group: voltha
+  when: target == "cluster"
+  tags: [cluster_host]
+
+- name: A brick for a glusterfs mountpoint is created
+  file:
+    path: "{{ replicated_fs_dir }}/brick1"
+    state: directory
+    mode: 0755
+    owner: voltha
+    group: voltha
+  when: target == "cluster"
+  tags: [cluster_host]
+
+- name: The replicated filesystem is mounted on boot
+  mount:
+    path: "{{ replicated_fs_dir }}/brick1"
+    src: "{{ replicated_fs_dir }}/.cluster-fs-file"
+    fstype: xfs
+    opts: loop
+    state: mounted
+  when: target == "cluster"
+  tags: [cluster_host]
+
+- name: A directory for the glusterfs volume is created
+  file:
+    path: "{{ replicated_fs_dir }}/brick1/registry_volume"
+    state: directory
+    mode: 0755
+    owner: voltha
+    group: voltha
+  when: target == "cluster"
+  tags: [cluster_host]
+
+- name: A directory for the insecure registry data is created
+  file:
+    path: "{{ target_voltha_dir }}/registry_data"
+    state: directory
+    mode: 0755
+    owner: voltha
+    group: voltha
+  when: target == "cluster"
+  tags: [cluster_host]
+
+- name: A directory for consul's data is created
+  file:
+    path: "{{ target_voltha_dir }}/consul/data"
+    state: directory
+    mode: 0755
+    owner: voltha
+    group: voltha
+  when: target == "cluster"
+  tags: [cluster_host]
+
+- name: The glusterfs service is started
+  service:
+    name: glusterfs-server
+    enabled: yes
+    state: started
+  when: target == "cluster"
+  tags: [cluster_host]
+
+- name: The replicated filesystem is mounted on boot
+  mount:
+    path: "{{ target_voltha_dir }}/registry_data"
+    src: "{{ inventory_hostname }}:/registry_volume"
+    fstype: glusterfs
+    opts:  "defaults,_netdev,noauto,x-systemd.automount"
+    state: present
+  when: target == "cluster"
+  tags: [cluster_host]
diff --git a/install/ansible/roles/common/defaults/main.yml b/install/ansible/roles/common/defaults/main.yml
index 43637ae..8012978 100644
--- a/install/ansible/roles/common/defaults/main.yml
+++ b/install/ansible/roles/common/defaults/main.yml
@@ -17,7 +17,9 @@
   - python-nose
   - python-flake8
   - python-scapy
-#  - glusterfs-server
+  - glusterfs-server
+  - glusterfs-client
+  - attr
 #  - python-libpcap
 
 obsolete_services:
diff --git a/install/ansible/roles/glusterfs/tasks/main.yml b/install/ansible/roles/glusterfs/tasks/main.yml
new file mode 100644
index 0000000..e744d75
--- /dev/null
+++ b/install/ansible/roles/glusterfs/tasks/main.yml
@@ -0,0 +1,14 @@
+- name: The glusterfs volume is created
+  gluster_volume:
+    bricks: "{{ replicated_fs_dir }}/brick1"
+    force: true
+    cluster: "{{groups.cluster | join(\",\")}}"
+    replicas: 3
+    name: registry_volume
+    state: present
+  run_once: true
+
+- name: Start the gluster volume
+  gluster_volume:
+    name: registry_volume
+    state: started
diff --git a/install/ansible/voltha.yml b/install/ansible/voltha.yml
index 8216c6b..7dc6042 100644
--- a/install/ansible/voltha.yml
+++ b/install/ansible/voltha.yml
@@ -9,6 +9,13 @@
     - docker
     - docker-compose
     - voltha
+- hosts: cluster
+  remote_user: voltha
+  serial: 1
+  vars:
+    target: cluster
+  roles:
+    - glusterfs
 - hosts: swarm-master
   remote_user: voltha
   serial: 1