Multiple changes
- Added code to create default KVM storage pool and network if they
  don't exits.
- Added a docker registry to the cluster.
- Added a a wait cycle for consul to start to avoid proxy thrashing when
  voltah starts.
- Addition of the rmake developer tool to simplify the edit develop make
  and install into cluster developer workflow.

Change-Id: Ie365948bc3cdb9064670560d32e685554bfca432
diff --git a/compose/docker-compose-registry.yml b/compose/docker-compose-registry.yml
new file mode 100644
index 0000000..5f353d9
--- /dev/null
+++ b/compose/docker-compose-registry.yml
@@ -0,0 +1,33 @@
+#
+# This Docker stackfile deploys a Voltha CLI container along with one backup.
+#
+# The stackfile assumes that overlay network 'voltha_net' has already been
+# created. To deploy the stack, issue the command:
+#
+#     docker stack deploy -c docker-compose-vcli.yml cli
+#
+
+version: "3"
+services:
+  registry:
+    image: registry:2
+    deploy:
+      replicas: 1
+#    environment:
+#      DOCKER_HOST_IP: "${DOCKER_HOST_IP}"
+#    entrypoint:
+#      - /cli/cli/setup.sh
+#      - -C consul:8500
+#      - -g voltha:50555
+#      - -s voltha:18880
+#      - -G
+#    networks:
+#      - voltha-net
+    ports:
+      - "5001:5000"
+      
+#networks:
+#  voltha-net:
+#    external:
+#      name: voltha_net
+
diff --git a/install/CreateInstaller.sh b/install/CreateInstaller.sh
index 6a45a37..9ca37d7 100755
--- a/install/CreateInstaller.sh
+++ b/install/CreateInstaller.sh
@@ -108,25 +108,49 @@
 
 # Check to make sure that the vagrant-libvirt network is both defined and started
 echo -e "${lBlue}Verify tha the ${lCyan}vagrant-libvirt${lBlue} network is defined and started${NC}"
-virsh net-list | grep "vagrant-libvirt" > /dev/null
+virsh net-list --all | grep "vagrant-libvirt" > /dev/null
 rtrn=$?
 if [ $rtrn -eq 1 ]; then
-	# The network isn't running, check if it's defined
-	virsh net-list --all | grep "vagrant-libvirt" > /dev/null
+	# Not defined
+	echo -e "${lBlue}Defining the ${lCyan}vagrant-libvirt${lBlue} network${NC}"
+	virsh net-define vagrant-libvirt.xml
+	echo -e "${lBlue}Starting the ${lCyan}vagrant-libvirt${lBlue} network${NC}"
+	virsh net-start vagrant-libvirt
+else
+	virsh net-list | grep "vagrant-libvirt" > /dev/null
 	rtrn=$?
 	if [ $rtrn -eq 1 ]; then
-		# Not defined either
-		echo -e "${lBlue}Defining the ${lCyan}vagrant-libvirt${lBlue} network${NC}"
-		virsh net-define vagrant-libvirt.xml
-		echo -e "${lBlue}Starting the ${lCyan}vagrant-libvirt${lBlue} network${NC}"
-		virsh net-start vagrant-libvirt
-	else
 		# Defined but not started
 		echo -e "${lBlue}Starting the ${lCyan}vagrant-libvirt${lBlue} network${NC}"
 		virsh net-start vagrant-libvirt
+
+	else
+		# Defined and running
+		echo -e "${lBlue}The ${lCyan}vagrant-libvirt${lBlue} network is ${green} running${NC}"
 	fi
+fi
+
+# Check that the default storage pool exists and create it if it doesn't
+virsh pool-list --all | grep default > /dev/null
+rtrn=$?
+if [ $rtrn -eq 1 ]; then
+	# Not defined
+	echo -e "${lBlue}Defining the ${lCyan}defaul${lBlue} storage pool${NC}"
+	virsh pool-define-as --name default --type dir --target /var/lib/libvirt/images/
+	virsh pool-autostart default
+	echo -e "${lBlue}Starting the ${lCyan}defaul${lBlue} storage pool${NC}"
+	virsh pool-start default
 else
-	echo -e "${lBlue}The ${lCyan}vagrant-libvirt${lBlue} network is ${green} running${NC}"
+	virsh pool-list | grep default > /dev/null
+	rtrn=$?
+	if [ $rtrn -eq 1 ]; then
+		# Defined but not started
+		echo -e "${lBlue}Starting the ${lCyan}defaul${lBlue} storage pool${NC}"
+		virsh pool-start default
+	else
+		# Defined and running
+		echo -e "${lBlue}The ${lCyan}default${lBlue} storage pool ${green} running${NC}"
+	fi
 fi
 
 
diff --git a/install/ansible/roles/cluster-host/tasks/main.yml b/install/ansible/roles/cluster-host/tasks/main.yml
index 09e42e4..228a54e 100644
--- a/install/ansible/roles/cluster-host/tasks/main.yml
+++ b/install/ansible/roles/cluster-host/tasks/main.yml
@@ -77,15 +77,14 @@
     links: yes
   tags: [cluster_host]
 
-- name: pre-emptive strike to avoid errors during package installation
-  apt:
-    name: "{{ item }}"
-    state: absent
-  with_items:
-    - ubuntu-core-launcher
-    - snapd
-  tags: [cluster_host]
-
+#- name: pre-emptive strike to avoid errors during package installation
+#  apt:
+#    name: "{{ item }}"
+#    state: absent
+#  with_items:
+#    - ubuntu-core-launcher
+#    - snapd
+#  tags: [cluster_host]
 - name: A fluentd directory under tmp for voltha logs exists
   file:
     path: "/tmp/fluentd"
diff --git a/install/ansible/roles/voltha/tasks/main.yml b/install/ansible/roles/voltha/tasks/main.yml
index b7ee587..76afee1 100644
--- a/install/ansible/roles/voltha/tasks/main.yml
+++ b/install/ansible/roles/voltha/tasks/main.yml
@@ -49,6 +49,18 @@
   when: target == "cluster"
   tags: [voltha]
 
+- name: Insecure registry is configured and permitted
+  synchronize:
+    src: "/home/vinstall/daemon.json"
+    dest: /etc/docker/
+    archive: no
+    owner: no
+    perms: no
+    recursive: no
+    links: yes
+  when: target == "cluster"
+  tags: [voltha]
+
 - name: Configuration directories are owned by voltha
   file:
     path: "{{ target_voltha_dir }}/{{ item }}"
@@ -102,6 +114,11 @@
   with_items: "{{ voltha_containers }}"
   when: target == "cluster"
   tags: [voltha]
+- name: Docker images are re-tagged to cluster registry names
+  command: docker tag {{ docker_registry }}/{{ item }} {{ inventory_hostname }}:5001/{{ item }}
+  with_items: "{{ voltha_containers }}"
+  when: target == "cluster"
+  tags: [voltha]
 #- name: Old docker image tags are removed
 #  command: docker rmi {{ docker_registry }}/{{ item }}
 #  with_items: "{{ voltha_containers }}"
@@ -162,3 +179,21 @@
   when: target == "startup"
   tags: [voltha]
 
+- name: cluster specific insecure registry is started
+  command: "docker stack deploy -c {{ target_voltha_dir }}/compose/docker-compose-registry.yml registry"
+  become: voltha
+  when: target == "startup"
+  tags: [voltha]
+
+- name: wait for the cluster startup to settle
+  command: "sleep 15"
+  become: voltha
+  when: target == "startup"
+  tags: [voltha]
+
+- name: Docker containers in the cluster are pushed to the cluster registry
+  command: docker push {{ inventory_hostname }}:5001/{{ item }}
+  with_items: "{{ voltha_containers }}"
+  when: target == "startup"
+  tags: [voltha]
+
diff --git a/install/installer.sh b/install/installer.sh
index 9a23b79..77c25a5 100755
--- a/install/installer.sh
+++ b/install/installer.sh
@@ -129,6 +129,22 @@
 
 done
 
+# Create the daemon.json file for the swarm
+echo "{" > daemon.json
+echo -n '  "insecure-registries" : [' >> daemon.json
+first=""
+for i in .keys/*
+do
+	if [ -z "$first" ]; then
+		echo -n '"'`basename $i`':5001"' >> daemon.json
+		first="not"
+	else
+		echo -n ' , "'`basename $i`':5001"' >> daemon.json
+	fi
+done
+echo "]" >> daemon.json
+echo "}" >> daemon.json
+unset first
 
 # Running ansible
 echo -e "${lBlue}Running ansible${NC}"
diff --git a/install/rmake.sh b/install/rmake.sh
new file mode 100755
index 0000000..cf6ddb8
--- /dev/null
+++ b/install/rmake.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+# This script is for developers only. It will sync the local filesystem
+# to the voltha vm and then rebuild each of the targets specified on the
+# command line.
+
+cont=$1
+uid=`id -u`
+iVmName="vInstaller${uid}"
+vVmName="voltha_voltha${uid}"
+volthaHome=~/cord/incubator/voltha
+iIpAddr=`virsh domifaddr $iVmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
+vIpAddr=`virsh domifaddr $vVmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
+
+# TODO: Validate the command line and print a help message
+
+pushd ~/cord/incubator/voltha
+vagrant rsync
+popd
+pushd ~/cord/incubator/voltha/install
+# Build each of the specified targets
+for i in $@
+do
+	ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ../.vagrant/machines/voltha${uid}/libvirt/private_key vagrant@$vIpAddr "cd /cord/incubator/voltha && source env.sh && make $i"
+done
+popd
diff --git a/install/voltha-swarm-start.sh b/install/voltha-swarm-start.sh
index cd402c8..da19949 100755
--- a/install/voltha-swarm-start.sh
+++ b/install/voltha-swarm-start.sh
@@ -5,6 +5,15 @@
 docker network create --driver overlay --subnet=10.0.1.0/24 --opt encrypted=true voltha_net
 docker stack deploy -c ${voltha_base_dir}/compose/docker-compose-kafka-cluster.yml kafka
 docker stack deploy -c ${voltha_base_dir}/compose/docker-compose-consul-cluster.yml consul
+echo "Waiting for consul to start"
+while true
+do
+	cs=`docker service ls | grep consul_consul | awk '{print $4}'`
+	if [ "$cs" == "3/3" ]; then
+		break
+	fi
+done
+sleep 10
 docker stack deploy -c ${voltha_base_dir}/compose/docker-compose-voltha-swarm.yml vcore
 docker stack deploy -c ${voltha_base_dir}/compose/docker-compose-envoy-swarm.yml voltha
 docker stack deploy -c ${voltha_base_dir}/compose/docker-compose-vcli.yml cli