Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 1 | #!/bin/bash |
| 2 | |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 3 | lBlue='\033[1;34m' |
| 4 | green='\033[0;32m' |
| 5 | orange='\033[0;33m' |
| 6 | NC='\033[0m' |
| 7 | red='\033[0;31m' |
| 8 | yellow='\033[1;33m' |
| 9 | dGrey='\033[1;30m' |
| 10 | lGrey='\033[1;37m' |
| 11 | lCyan='\033[1;36m' |
| 12 | wd=`pwd` |
| 13 | |
| 14 | |
| 15 | # Clean up any prior executions |
| 16 | rm -fr .keys |
| 17 | rm -f ansible/hosts/cluster |
| 18 | rm -f ansible/host_vars/* |
| 19 | |
| 20 | # Source the configuration information |
| 21 | . install.cfg |
| 22 | |
Sergio Slobodrian | 37f4a0e | 2017-06-14 07:50:01 -0400 | [diff] [blame] | 23 | if [ -z "$hosts" ]; then |
| 24 | echo -e "${red}No hosts specifed!!${NC}" |
| 25 | echo -e "${red}Did you forget to update the config file ${yellow}installer.cfg${red}?${NC}" |
| 26 | exit |
| 27 | fi |
| 28 | |
Sergio Slobodrian | b92e513 | 2017-09-13 13:04:05 -0400 | [diff] [blame] | 29 | if [ "$iUser" == "voltha" ]; then |
| 30 | echo -e "${yellow}voltha ${red}can't be used as be install user!!!${NC}" |
| 31 | echo -e "${red}Please delete the ${yellow}voltha ${red}user on the targets and create a different installation user${NC}" |
| 32 | exit |
| 33 | fi |
| 34 | |
Sergio Slobodrian | 8725ea8 | 2017-08-27 23:47:41 -0400 | [diff] [blame] | 35 | # Configure barrier file sizes but only if a value was provided in the config file |
| 36 | |
| 37 | if [ -v logLimit ]; then |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 38 | sed -i -e "/logger_volume_size/s/.*/logger_volume_size: ${logLimit}/" ansible/group_vars/all |
Sergio Slobodrian | 8725ea8 | 2017-08-27 23:47:41 -0400 | [diff] [blame] | 39 | fi |
| 40 | if [ -v regLimit ]; then |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 41 | sed -i -e "/registry_volume_size/s/.*/registry_volume_size: ${regLimit}/" ansible/group_vars/all |
Sergio Slobodrian | 8725ea8 | 2017-08-27 23:47:41 -0400 | [diff] [blame] | 42 | fi |
| 43 | if [ -v consulLimit ]; then |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 44 | sed -i -e "/consul_volume_size/s/.*/consul_volume_size: ${consulLimit}/" ansible/group_vars/all |
Sergio Slobodrian | 8725ea8 | 2017-08-27 23:47:41 -0400 | [diff] [blame] | 45 | fi |
| 46 | |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 47 | # Create the key directory |
| 48 | mkdir .keys |
| 49 | |
| 50 | # Create the host list |
| 51 | echo "[cluster]" > ansible/hosts/cluster |
| 52 | |
| 53 | # Silence SSH and avoid prompts |
| 54 | rm -f ~/.ssh/config |
| 55 | echo "Host *" > ~/.ssh/config |
| 56 | echo " StrictHostKeyChecking no" >> ~/.ssh/config |
| 57 | echo " UserKnownHostsFile /dev/null" >> ~/.ssh/config |
| 58 | |
| 59 | sudo cp ~/.ssh/config /root/.ssh/config |
| 60 | |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 61 | for i in $hosts |
| 62 | do |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 63 | # Generate the key for the host |
| 64 | echo -e "${lBlue}Generating the key-pair for communication with host ${yellow}$i${NC}" |
| 65 | ssh-keygen -f ./$i -t rsa -N '' |
| 66 | mv $i .keys |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 67 | |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 68 | # Generate the pre-configuration script |
| 69 | echo -e "${lBlue}Creating the pre-configuration script${NC}" |
| 70 | head -n +1 BashLoginTarget.sh > bash_login.sh |
| 71 | echo "" >> bash_login.sh |
| 72 | echo -n 'key="' >> bash_login.sh |
| 73 | sed -i -e 's/$/"/' $i.pub |
| 74 | cat $i.pub >> bash_login.sh |
| 75 | tail -n +2 BashLoginTarget.sh | grep -v "{{ key }}" >> bash_login.sh |
| 76 | rm $i.pub |
Sergio Slobodrian | b92e513 | 2017-09-13 13:04:05 -0400 | [diff] [blame] | 77 | |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 78 | # Copy the pre-config file to the VM |
| 79 | echo -e "${lBlue}Transfering pre-configuration script to ${yellow}$i${NC}" |
| 80 | if [ -d ".test" ]; then |
| 81 | echo -e "${red}Test mode set!!${lBlue} Using pre-populated ssh key for ${yellow}$i${NC}" |
| 82 | scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .test/$i bash_login.sh $iUser@$i:.bash_login |
| 83 | else |
| 84 | scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no bash_login.sh $iUser@$i:.bash_login |
| 85 | fi |
| 86 | rm bash_login.sh |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 87 | |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 88 | # Run the pre-config file on the VM |
| 89 | echo -e "${lBlue}Running the pre-configuration script on ${yellow}$i${NC}" |
| 90 | if [ -d ".test" ]; then |
| 91 | echo -e "${red}Test mode set!!${lBlue} Using pre-populated ssh key for ${yellow}$i${NC}" |
| 92 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .test/$i $iUser@$i |
| 93 | else |
| 94 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $iUser@$i |
| 95 | fi |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 96 | |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 97 | # Configure ansible and ssh for silent operation |
| 98 | echo -e "${lBlue}Configuring ansible${NC}" |
| 99 | echo $i >> ansible/hosts/cluster |
| 100 | echo "ansible_ssh_private_key_file: $wd/.keys/$i" > ansible/host_vars/$i |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 101 | |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 102 | # Create the tunnel to the registry to allow pulls from localhost |
| 103 | echo -e "${lBlue}Creating a secure shell tunnel to the registry for ${yellow}$i${NC}" |
| 104 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i -f voltha@$i -R 5000:localhost:5000 -N |
| 105 | |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 106 | done |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 107 | |
Stephane Barbarie | 2cbffca | 2018-03-26 16:20:03 -0400 | [diff] [blame] | 108 | # Add the dependent software list to the cluster variables |
| 109 | echo -e "${lBlue}Setting up dependent software${NC}" |
| 110 | # Delete any grub updates since the boot disk is almost |
| 111 | # guaranteed not to be the same device as the installer. |
| 112 | mkdir grub_updates |
| 113 | sudo mv deb_files/*grub* grub_updates |
| 114 | # Sort the packages in dependency order to get rid of scary non-errors |
| 115 | # that are issued by ansible. |
| 116 | #echo -e "${lBlue}Dependency sorting dependent software${NC}" |
| 117 | #./sort_packages.sh |
| 118 | #echo "deb_files:" >> ansible/group_vars/all |
| 119 | #for i in `cat sortedDebs.txt` |
| 120 | #do |
| 121 | #echo " - $i" >> ansible/group_vars/all |
| 122 | #done |
| 123 | |
| 124 | # Make sure the ssh keys propagate to all hosts allowing passwordless logins between them |
| 125 | echo -e "${lBlue}Propagating ssh keys${NC}" |
Sergio Slobodrian | 37f4a0e | 2017-06-14 07:50:01 -0400 | [diff] [blame] | 126 | cp -r .keys ansible/roles/cluster-host/files |
Sergio Slobodrian | d24189e | 2017-06-10 23:27:15 -0400 | [diff] [blame] | 127 | |
Stephane Barbarie | 2cbffca | 2018-03-26 16:20:03 -0400 | [diff] [blame] | 128 | # Install python on all the 3 servers since python is required for |
| 129 | for i in $hosts |
| 130 | do |
| 131 | echo -e "${lBlue}Installing ${lCyan}Python${lBlue}${NC}" |
| 132 | scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i -r python-deb voltha@$i:. |
| 133 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i voltha@$i "sudo dpkg -i /home/voltha/python-deb/*minimal*" |
| 134 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i voltha@$i sudo dpkg -i -R /home/voltha/python-deb |
| 135 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i voltha@$i rm -fr python-deb |
| 136 | |
| 137 | done |
Sergio Slobodrian | 9d9c844 | 2017-07-25 07:55:42 -0400 | [diff] [blame] | 138 | |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 139 | if [ "$cluster_framework" == "kubernetes" ]; then |
Sergio Slobodrian | 9d9c844 | 2017-07-25 07:55:42 -0400 | [diff] [blame] | 140 | |
Stephane Barbarie | 2cbffca | 2018-03-26 16:20:03 -0400 | [diff] [blame] | 141 | echo -e "${green}Deploying kubernetes${NC}" |
Sergio Slobodrian | 9d9c844 | 2017-07-25 07:55:42 -0400 | [diff] [blame] | 142 | |
Stephane Barbarie | 2cbffca | 2018-03-26 16:20:03 -0400 | [diff] [blame] | 143 | # Remove previously created inventory if it exists |
Stephane Barbarie | 78d9fa6 | 2018-04-19 14:11:05 -0400 | [diff] [blame] | 144 | cp -rfp kubespray/inventory/sample kubespray/inventory/voltha |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 145 | |
Stephane Barbarie | 2cbffca | 2018-03-26 16:20:03 -0400 | [diff] [blame] | 146 | # Adjust kubespray configuration |
Sergio Slobodrian | 37f4a0e | 2017-06-14 07:50:01 -0400 | [diff] [blame] | 147 | |
Stephane Barbarie | 2cbffca | 2018-03-26 16:20:03 -0400 | [diff] [blame] | 148 | # Destination OS |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 149 | sed -i -e "/bootstrap_os: none/s/.*/bootstrap_os: ubuntu/" \ |
| 150 | kubespray/inventory/voltha/group_vars/all.yml |
Stephane Barbarie | 2cbffca | 2018-03-26 16:20:03 -0400 | [diff] [blame] | 151 | |
| 152 | # Subnet used for deployed k8s services |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 153 | sed -i -e "/kube_service_addresses: 10.233.0.0\/18/s/.*/kube_service_addresses: $cluster_service_subnet/" \ |
| 154 | kubespray/inventory/voltha/group_vars/k8s-cluster.yml |
Stephane Barbarie | 2cbffca | 2018-03-26 16:20:03 -0400 | [diff] [blame] | 155 | |
| 156 | # Subnet used for deployed k8s pods |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 157 | sed -i -e "/kube_pods_subnet: 10.233.64.0\/18/s/.*/kube_pods_subnet: $cluster_pod_subnet/" \ |
| 158 | kubespray/inventory/voltha/group_vars/k8s-cluster.yml |
Sergio Slobodrian | 37f4a0e | 2017-06-14 07:50:01 -0400 | [diff] [blame] | 159 | |
Stephane Barbarie | 2cbffca | 2018-03-26 16:20:03 -0400 | [diff] [blame] | 160 | # Prevent any downloads from kubespray |
| 161 | sed -i -e "s/skip_downloads: false/skip_downloads: true/" \ |
| 162 | kubespray/cluster.yml |
| 163 | sed -i -e "s/- { role: docker, tags: docker }/#&/" \ |
| 164 | kubespray/cluster.yml |
| 165 | sed -i -e "s/skip_downloads: false/skip_downloads: true/" \ |
| 166 | kubespray/roles/download/defaults/main.yml |
| 167 | sed -i -e "s/when: ansible_os_family == \"Debian\"/& and skip_downloads == \"false\" /" \ |
| 168 | kubespray/roles/kubernetes/preinstall/tasks/main.yml |
| 169 | sed -i -e "s/or is_atomic)/& and skip_downloads == \"false\" /" \ |
| 170 | kubespray/roles/kubernetes/preinstall/tasks/main.yml |
| 171 | |
Stephane Barbarie | 78d9fa6 | 2018-04-19 14:11:05 -0400 | [diff] [blame] | 172 | # Configure failover parameters |
| 173 | sed -i -e "s/kube_controller_node_monitor_grace_period: .*/kube_controller_node_monitor_grace_period: 20s/" \ |
| 174 | kubespray/roles/kubernetes/master/defaults/main.yml |
| 175 | sed -i -e "s/kube_controller_pod_eviction_timeout: .*/kube_controller_pod_eviction_timeout: 30s/" \ |
| 176 | kubespray/roles/kubernetes/master/defaults/main.yml |
| 177 | |
Stephane Barbarie | 2cbffca | 2018-03-26 16:20:03 -0400 | [diff] [blame] | 178 | # Construct node inventory |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 179 | CONFIG_FILE=kubespray/inventory/voltha/hosts.ini python3 \ |
| 180 | kubespray/contrib/inventory_builder/inventory.py $hosts |
Sergio Slobodrian | d24189e | 2017-06-10 23:27:15 -0400 | [diff] [blame] | 181 | |
Stephane Barbarie | 78d9fa6 | 2018-04-19 14:11:05 -0400 | [diff] [blame] | 182 | # The inventory builder configures 2 masters. |
| 183 | # Due to non-stable behaviours, force the use of a single master |
| 184 | cat kubespray/inventory/voltha/hosts.ini \ |
| 185 | | sed -e ':begin;$!N;s/\(\[kube-master\]\)\n/\1/;tbegin;P;D' \ |
| 186 | | sed -e '/\[kube-master\].*/,/\[kube-node\]/{//!d}' \ |
| 187 | | sed -e 's/\(\[kube-master\]\)\(.*\)/\1\n\2\n/' \ |
| 188 | > kubespray/inventory/voltha/hosts.ini.tmp |
| 189 | |
| 190 | mv kubespray/inventory/voltha/hosts.ini.tmp kubespray/inventory/voltha/hosts.ini |
| 191 | |
Stephane Barbarie | 2cbffca | 2018-03-26 16:20:03 -0400 | [diff] [blame] | 192 | ordered_nodes=`CONFIG_FILE=kubespray/inventory/voltha/hosts.ini python3 \ |
| 193 | kubespray/contrib/inventory_builder/inventory.py print_ips` |
| 194 | |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 195 | echo "[k8s-master]" > ansible/hosts/k8s-master |
| 196 | |
Stephane Barbarie | 2cbffca | 2018-03-26 16:20:03 -0400 | [diff] [blame] | 197 | mkdir -p kubespray/inventory/voltha/host_vars |
| 198 | |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 199 | ctr=1 |
Stephane Barbarie | 2cbffca | 2018-03-26 16:20:03 -0400 | [diff] [blame] | 200 | for i in $ordered_nodes |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 201 | do |
Stephane Barbarie | 2cbffca | 2018-03-26 16:20:03 -0400 | [diff] [blame] | 202 | echo -e "${lBlue}Adding SSH keys to kubespray ansible${NC}" |
| 203 | echo "ansible_ssh_private_key_file: $wd/.keys/$i" > kubespray/inventory/voltha/host_vars/node$ctr |
| 204 | |
Sergio Slobodrian | d24189e | 2017-06-10 23:27:15 -0400 | [diff] [blame] | 205 | if [ $ctr -eq 1 ]; then |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 206 | echo $i >> ansible/hosts/k8s-master |
Sergio Slobodrian | d24189e | 2017-06-10 23:27:15 -0400 | [diff] [blame] | 207 | fi |
Stephane Barbarie | 2cbffca | 2018-03-26 16:20:03 -0400 | [diff] [blame] | 208 | ctr=$((ctr + 1)) |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 209 | done |
Sergio Slobodrian | d24189e | 2017-06-10 23:27:15 -0400 | [diff] [blame] | 210 | |
Stephane Barbarie | 2cbffca | 2018-03-26 16:20:03 -0400 | [diff] [blame] | 211 | # Prepare Voltha |
| 212 | # ... Prepares environment and copies all required container images |
| 213 | # ... including the ones needed by kubespray |
| 214 | cp ansible/ansible.cfg .ansible.cfg |
| 215 | ansible-playbook -v ansible/voltha-k8s.yml -i ansible/hosts/cluster -e 'config_voltha=true' |
| 216 | |
| 217 | # Deploy kubernetes |
| 218 | ANSIBLE_CONFIG=kubespray/ansible.cfg ansible-playbook -v -b \ |
| 219 | --become-method=sudo --become-user root -u voltha \ |
| 220 | -i kubespray/inventory/voltha/hosts.ini kubespray/cluster.yml |
| 221 | |
Stephane Barbarie | 78d9fa6 | 2018-04-19 14:11:05 -0400 | [diff] [blame] | 222 | # Now all 3 servers need to be rebooted because of software installs. |
| 223 | # Reboot them and wait patiently until they all come back. |
| 224 | # Note this destroys the registry tunnel wich is no longer needed. |
| 225 | hList="" |
| 226 | for i in $hosts |
| 227 | do |
| 228 | echo -e "${lBlue}Rebooting cluster hosts${NC}" |
| 229 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i voltha@$i sudo telinit 6 |
| 230 | hList="$i $hList" |
| 231 | done |
| 232 | |
| 233 | # Give the hosts time to shut down so that pings stop working or the |
| 234 | # script just falls through the next loop and the rest fails. |
| 235 | echo -e "${lBlue}Waiting for shutdown${NC}" |
| 236 | sleep 5 |
| 237 | |
| 238 | |
| 239 | while [ ! -z "$hList" ]; |
| 240 | do |
| 241 | # Attempt to ping the VMs on the list one by one. |
| 242 | echo -e "${lBlue}Waiting for hosts to reboot ${yellow}$hList${NC}" |
| 243 | for i in $hList |
| 244 | do |
| 245 | ping -q -c 1 $i > /dev/null 2>&1 |
| 246 | ret=$? |
| 247 | if [ $ret -eq 0 ]; then |
| 248 | ipExpr=`echo $i | sed -e "s/\./[.]/g"` |
| 249 | hList=`echo $hList | sed -e "s/$ipExpr//" | sed -e "s/^ //" | sed -e "s/ $//"` |
| 250 | fi |
| 251 | done |
| 252 | |
| 253 | done |
| 254 | |
| 255 | # Wait for kubernetes to settle after reboot |
| 256 | k8sIsUp="no" |
| 257 | while [ "$k8sIsUp" == "no" ]; |
| 258 | do |
| 259 | # Attempt to ping the VMs on the list one by one. |
| 260 | echo -e "${lBlue}Waiting for kubernetes to settle${NC}" |
| 261 | for i in $hosts |
| 262 | do |
| 263 | nc -vz $i 6443 > /dev/null 2>&1 |
| 264 | ret=$? |
| 265 | if [ $ret -eq 0 ]; then |
| 266 | k8sIsUp="yes" |
| 267 | break |
| 268 | fi |
| 269 | sleep 1 |
| 270 | done |
| 271 | done |
| 272 | echo -e "${lBlue}Kubernetes is up and running${NC}" |
| 273 | |
Stephane Barbarie | 2cbffca | 2018-03-26 16:20:03 -0400 | [diff] [blame] | 274 | # Deploy Voltha |
| 275 | ansible-playbook -v ansible/voltha-k8s.yml -i ansible/hosts/k8s-master -e 'deploy_voltha=true' |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 276 | |
| 277 | else |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 278 | # Legacy swarm instructions |
Stephane Barbarie | bcea2f4 | 2018-03-02 18:46:32 -0500 | [diff] [blame] | 279 | |
| 280 | # Create the daemon.json file for the swarm |
| 281 | echo "{" > daemon.json |
| 282 | echo -n ' "insecure-registries" : [' >> daemon.json |
| 283 | first="" |
| 284 | for i in .keys/* |
| 285 | do |
| 286 | if [ -z "$first" ]; then |
| 287 | echo -n '"'`basename $i`':5001"' >> daemon.json |
| 288 | first="not" |
| 289 | else |
| 290 | echo -n ' , "'`basename $i`':5001"' >> daemon.json |
| 291 | fi |
| 292 | done |
| 293 | echo "]" >> daemon.json |
| 294 | echo "}" >> daemon.json |
| 295 | unset first |
| 296 | |
| 297 | # Running ansible |
| 298 | echo -e "${lBlue}Running ansible${NC}" |
| 299 | cp ansible/ansible.cfg .ansible.cfg |
| 300 | ansible-playbook ansible/voltha.yml -i ansible/hosts/cluster |
| 301 | |
| 302 | # Now all 3 servers need to be rebooted because of software installs. |
| 303 | # Reboot them and wait patiently until they all come back. |
| 304 | # Note this destroys the registry tunnel wich is no longer needed. |
| 305 | hList="" |
| 306 | for i in $hosts |
| 307 | do |
| 308 | echo -e "${lBlue}Rebooting cluster hosts${NC}" |
| 309 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i .keys/$i voltha@$i sudo telinit 6 |
| 310 | hList="$i $hList" |
| 311 | done |
| 312 | |
| 313 | # Give the hosts time to shut down so that pings stop working or the |
| 314 | # script just falls through the next loop and the rest fails. |
| 315 | echo -e "${lBlue}Waiting for shutdown${NC}" |
| 316 | sleep 5 |
| 317 | |
| 318 | |
| 319 | while [ ! -z "$hList" ]; |
| 320 | do |
| 321 | # Attempt to ping the VMs on the list one by one. |
| 322 | echo -e "${lBlue}Waiting for hosts to reboot ${yellow}$hList${NC}" |
| 323 | for i in $hList |
| 324 | do |
| 325 | ping -q -c 1 $i > /dev/null 2>&1 |
| 326 | ret=$? |
| 327 | if [ $ret -eq 0 ]; then |
| 328 | ipExpr=`echo $i | sed -e "s/\./[.]/g"` |
| 329 | hList=`echo $hList | sed -e "s/$ipExpr//" | sed -e "s/^ //" | sed -e "s/ $//"` |
| 330 | fi |
| 331 | done |
| 332 | |
| 333 | done |
| 334 | |
| 335 | # Now initialize the the docker swarm cluster with managers. |
| 336 | # The first server needs to be the primary swarm manager |
| 337 | # the other nodes are backup mangers that join the swarm. |
| 338 | # In the future, worker nodes will likely be added. |
| 339 | |
| 340 | echo "[swarm-master]" > ansible/hosts/swarm-master |
| 341 | echo "[swarm-master-backup]" > ansible/hosts/swarm-master-backup |
| 342 | |
| 343 | ctr=1 |
| 344 | for i in $hosts |
| 345 | do |
| 346 | if [ $ctr -eq 1 ]; then |
| 347 | echo $i >> ansible/hosts/swarm-master |
| 348 | echo "swarm_master_addr: \"$i\"" >> ansible/group_vars/all |
| 349 | ctr=0 |
| 350 | else |
| 351 | echo $i >> ansible/hosts/swarm-master-backup |
| 352 | fi |
| 353 | done |
| 354 | ansible-playbook ansible/swarm.yml -i ansible/hosts/swarm-master |
| 355 | ansible-playbook ansible/swarm.yml -i ansible/hosts/swarm-master-backup |
| 356 | ansible-playbook ansible/voltha.yml -i ansible/hosts/swarm-master |
| 357 | |
| 358 | fi |