Added the necessary code to reboot the target servers after the
installation in case the kernel is upgraded during the install which is
likely if the servers are installed as recommended.
Also fixed a small issue where the key files required for swarm
initialization were nested 2 directories deep.
This update contnues to address VOL-6.

Change-Id: I5d0e8b26be8028e68dcd382acf851a3ffa5bac85
diff --git a/install/installer.sh b/install/installer.sh
index 465f5d9..9c5d708 100755
--- a/install/installer.sh
+++ b/install/installer.sh
@@ -20,6 +20,12 @@
 # Source the configuration information
 . install.cfg
 
+if [ -z "$hosts" ]; then
+	echo -e "${red}No hosts specifed!!${NC}"
+	echo -e "${red}Did you forget to update the config file ${yellow}installer.cfg${red}?${NC}"
+	exit
+fi
+
 # Create the key directory
 mkdir .keys
 
@@ -102,13 +108,46 @@
 
 # Make sure the ssh keys propagate to all hosts allowing passwordless logins between them
 echo -e "${lBlue}Propagating ssh keys${NC}"
-cp -r .keys ansible/roles/cluster-host/files/.keys
+cp -r .keys ansible/roles/cluster-host/files
 
 # Running ansible
 echo -e "${lBlue}Running ansible${NC}"
 cp ansible/ansible.cfg .ansible.cfg
 sudo ansible-playbook ansible/voltha.yml -i ansible/hosts/cluster
 
+# Now all 3 servers need to be rebooted because of software installs.
+# Reboot them and wait patiently until they all come back.
+# Note this destroys the registry tunnel wich is no longer needed.
+hList=""
+for i in $hosts
+do
+	echo -e "${lBlue}Rebooting cluster hosts${NC}"
+	ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i  voltha@$i sudo telinit 6
+	hList="$i $hList"
+done
+
+# Give the hosts time to shut down so that pings stop working or the
+# script just falls through the next loop and the rest fails.
+echo -e "${lBlue}Waiting for shutdown${NC}"
+sleep 5
+
+
+while [ ! -z "$hList" ];
+do
+	# Attempt to ping the VMs on the list one by one.
+	echo -e "${lBlue}Waiting for hosts to reboot ${yellow}$hList${NC}"
+	for i in $hList
+	do
+		ping -q -c 1 $i > /dev/null 2>&1
+		ret=$?
+		if [ $ret -eq 0 ]; then
+			ipExpr=`echo $i | sed -e "s/\./[.]/g"`
+			hList=`echo $hList | sed -e "s/$ipExpr//" | sed -e "s/^ //" | sed -e "s/ $//"`
+		fi
+	done
+	
+done
+
 # Now initialize the the docker swarm cluster with managers.
 # The first server needs to be the primary swarm manager
 # the other nodes are backup mangers that join the swarm.