blob: 200386788b2832f210dc94ab7e8370caaf73cd61 [file] [log] [blame]
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -04001#!/bin/bash
2
Sergio Slobodrian7c483622017-06-13 15:51:34 -04003
Sergio Slobodrianc5477712017-06-07 11:56:56 -04004iVmName="vInstaller"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -04005vVmName="voltha_voltha"
Sergio Slobodrian7c483622017-06-13 15:51:34 -04006baseImage="Ubuntu1604LTS"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -04007iVmNetwork="vagrant-libvirt"
Sergio Slobodrianc5477712017-06-07 11:56:56 -04008installerArchive="installer.tar.bz2"
9installerDirectory="volthaInstaller"
10installerPart="installer.part"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040011shutdownTimeout=5
12ipTimeout=10
13
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040014# Command line argument variables
15testMode="no"
16
17
18
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040019lBlue='\033[1;34m'
20green='\033[0;32m'
21orange='\033[0;33m'
22NC='\033[0m'
23red='\033[0;31m'
24yellow='\033[1;33m'
25dGrey='\033[1;30m'
26lGrey='\033[1;37m'
27lCyan='\033[1;36m'
28
Sergio Slobodrian7c483622017-06-13 15:51:34 -040029uId=`id -u`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040030wd=`pwd`
31
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040032parse_args()
33{
34 for i in $@
35 do
36 case "$i" in
37 "test" )
38 testMode="yes"
39 echo -e "${lBlue}Test mode is ${green}enabled${NC}"
40 ;;
41 esac
42 done
43}
44
45
46######################################
47# MAIN MAIN MAIN MAIN MAIN MAIN MAIN #
48######################################
49parse_args $@
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040050# Validate that vagrant is installed.
51echo -e "${lBlue}Ensure that ${lCyan}vagrant${lBlue} is installed${NC}"
52vInst=`which vagrant`
53
54if [ -z "$vInst" ]; then
55 wget https://releases.hashicorp.com/vagrant/1.9.5/vagrant_1.9.5_x86_64.deb
56 sudo dpkg -i vagrant_1.8.5_x86_64.deb
57 rm vagrant_1.8.5_x86_64.deb
58fi
59unset vInst
60
61# Validate that ansible is installed
62echo -e "${lBlue}Ensure that ${lCyan}ansible${lBlue} is installed${NC}"
63aInst=`which ansible`
64
65if [ -z "$aInst" ]; then
Sergio Slobodrianc5477712017-06-07 11:56:56 -040066 sudo apt-get install -y software-properties-common
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040067 sudo apt-add-repository ppa:ansible/ansible
68 sudo apt-get update
Sergio Slobodrianc5477712017-06-07 11:56:56 -040069 sudo apt-get install -y ansible
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040070fi
71unset vInst
72
Sergio Slobodrian61287792017-06-27 12:14:05 -040073# Verify if this is intended to be a test environment, if so
74# configure the 3 VMs which will be started later to emulate
75# the production installation cluster.
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040076if [ "$testMode" == "yes" ]; then
77 echo -e "${lBlue}Test mode ${green}enabled${lBlue}, configure the ${lCyan}ha-serv${lBlue} VMs${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -040078 # Update the vagrant settings file
79 sed -i -e '/server_name/s/.*/server_name: "ha-serv'${uId}'-"/' settings.vagrant.yaml
80 sed -i -e '/docker_push_registry/s/.*/docker_push_registry: "vinstall'${uId}':5000"/' ansible/group_vars/all
81 sed -i -e "/vinstall/s/vinstall/vinstall${uId}/" ../ansible/roles/docker/templates/daemon.json
82
83 # Set the insecure registry configuration based on the installer hostname
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040084 echo -e "${lBlue}Set up the insecure registry config for hostname ${lCyan}vinstall${uId}${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -040085 echo '{' > ansible/roles/voltha/templates/daemon.json
86 echo '"insecure-registries" : ["vinstall'${uId}':5000"]' >> ansible/roles/voltha/templates/daemon.json
87 echo '}' >> ansible/roles/voltha/templates/daemon.json
88
Sergio Slobodrian7c483622017-06-13 15:51:34 -040089 # Change the installer name
90 iVmName="vInstaller${uId}"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040091else
92 rm -fr .test
Sergio Slobodrianc5477712017-06-07 11:56:56 -040093 # Clean out the install config file keeping only the commented lines
94 # which serve as documentation.
95 sed -i -e '/^#/!d' install.cfg
Sergio Slobodrian7c483622017-06-13 15:51:34 -040096 # Set the insecure registry configuration based on the installer hostname
Sergio Slobodrian61287792017-06-27 12:14:05 -040097 echo -e "${lBlue}Set up the inescure registry config for hostname ${lCyan}vinstall${uId}${NC}"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -040098 sed -i -e '/docker_push_registry/s/.*/docker_push_registry: "vinstall:5000"/' ansible/group_vars/all
Sergio Slobodrian7c483622017-06-13 15:51:34 -040099 echo '{' > ansible/roles/voltha/templates/daemon.json
100 echo '"insecure-registries" : ["vinstall:5000"]' >> ansible/roles/voltha/templates/daemon.json
101 echo '}' >> ansible/roles/voltha/templates/daemon.json
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400102fi
103
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400104
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400105# Shut down the domain in case it's running.
106echo -e "${lBlue}Shut down the ${lCyan}$iVmName${lBlue} VM if running${NC}"
107ctr=0
108vStat=`virsh list | grep $iVmName`
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400109virsh shutdown $iVmName
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400110while [ ! -z "$vStat" ];
111do
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400112 echo "Waiting for $iVmName to shut down"
113 sleep 2
114 vStat=`virsh list | grep $iVmName`
115 ctr=`expr $ctr + 1`
116 if [ $ctr -eq $shutdownTimeout ]; then
117 echo -e "${red}Tired of waiting, forcing the VM off${NC}"
118 virsh destroy $iVmName
119 vStat=`virsh list | grep $iVmName`
120 fi
121done
122
123
124# Delete the VM and ignore any errors should they occur
125echo -e "${lBlue}Undefining the ${lCyan}$iVmName${lBlue} domain${NC}"
126virsh undefine $iVmName
127
128# Remove the associated volume
129echo -e "${lBlue}Removing the ${lCyan}$iVmName.qcow2${lBlue} volume${NC}"
130virsh vol-delete "${iVmName}.qcow2" default
131
132# Clone the base vanilla ubuntu install
133echo -e "${lBlue}Cloning the ${lCyan}$baseImage.qcow2${lBlue} to ${lCyan}$iVmName.qcow2${NC}"
134virsh vol-clone "${baseImage}.qcow2" "${iVmName}.qcow2" default
135
136# Create the xml file and define the VM for virsh
137echo -e "${lBlue}Defining the ${lCyan}$iVmName${lBlue} virtual machine${NC}"
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400138cat vmTemplate.xml | sed -e "s/{{ VMName }}/$iVmName/g" | sed -e "s/{{ VMNetwork }}/$iVmNetwork/g" > tmp.xml
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400139
140virsh define tmp.xml
141
142rm tmp.xml
143
144# Start the VMm, if it's already running just ignore the error
145echo -e "${lBlue}Starting the ${lCyan}$iVmName${lBlue} virtual machine${NC}"
146virsh start $iVmName > /dev/null 2>&1
147
148# Generate a keypair for communicating with the VM
149echo -e "${lBlue}Generating the key-pair for communication with the VM${NC}"
150ssh-keygen -f ./key -t rsa -N ''
151
152mv key key.pem
153
154# Clone BashLogin.sh and add the public key to it for later use.
155echo -e "${lBlue}Creating the pre-configuration script${NC}"
156cp BashLogin.sh bash_login.sh
157echo "cat <<HERE > .ssh/authorized_keys" >> bash_login.sh
158cat key.pub >> bash_login.sh
159echo "HERE" >> bash_login.sh
160echo "chmod 400 .ssh/authorized_keys" >> bash_login.sh
161echo "rm .bash_login" >> bash_login.sh
162echo "logout" >> bash_login.sh
163rm key.pub
164
165
166
167# Get the VM's IP address
168ctr=0
169ipAddr=""
170while [ -z "$ipAddr" ];
171do
172 echo -e "${lBlue}Waiting for the VM's IP address${NC}"
173 ipAddr=`virsh domifaddr $iVmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
174 sleep 3
175 if [ $ctr -eq $ipTimeout ]; then
176 echo -e "${red}Tired of waiting, please adjust the ipTimeout if the VM is slow to start${NC}"
177 exit
178 fi
179 ctr=`expr $ctr + 1`
180done
181
182echo -e "${lBlue}The IP address is: ${lCyan}$ipAddr${NC}"
183
184# Copy the pre-config file to the VM
185echo -e "${lBlue}Transfering pre-configuration script to the VM${NC}"
186scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no bash_login.sh vinstall@$ipAddr:.bash_login
187
188rm bash_login.sh
189
190# Run the pre-config file on the VM
191echo -e "${lBlue}Running the pre-configuration script on the VM${NC}"
192ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no vinstall@$ipAddr
193
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400194# If we're in test mode, change the hostname of the installer vm
Sergio Slobodrian61287792017-06-27 12:14:05 -0400195# also start the 3 vagrant target VMs
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400196if [ "$testMode" == "yes" ]; then
197 echo -e "${lBlue}Test mode, change the installer host name to ${lCyan}vinstall${uId}${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400198 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr \
199 sudo hostnamectl set-hostname vinstall${uId}
200 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr \
201 sudo service networking restart
Sergio Slobodrian61287792017-06-27 12:14:05 -0400202
203 echo -e "${lBlue}Testing, start the ${lCyan}ha-serv${lBlue} VMs${NC}"
204 vagrant destroy ha-serv${uId}-{1,2,3}
205 vagrant up ha-serv${uId}-{1,2,3}
206 ./devSetHostList.sh
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400207fi
208
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400209# Ensure that the voltha VM is running so that images can be secured
210echo -e "${lBlue}Ensure that the ${lCyan}voltha VM${lBlue} is running${NC}"
211vVM=`virsh list | grep voltha_voltha${uId}`
212
213if [ -z "$vVM" ]; then
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400214 if [ "$testMode" == "yes" ]; then
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400215 ./BuildVoltha.sh $1
Sergio Slobodrian61287792017-06-27 12:14:05 -0400216 rtrn=$?
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400217 else
218 # Default to installer mode
219 ./BuildVoltha.sh install
Sergio Slobodrian61287792017-06-27 12:14:05 -0400220 rtrn=$?
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400221 fi
222 if [ $rtrn -ne 0 ]; then
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400223 echo -e "${red}Voltha build failed!! ${lCyan}Please review the log and correct${lBlue} is running${NC}"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400224 exit 1
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400225 fi
226fi
227
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400228# Extract all the image names and tags from the running voltha VM
Sergio Slobodrian61287792017-06-27 12:14:05 -0400229# when running in test mode. This will provide the entire suite
230# of available containers to the VM cluster.
231
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400232if [ "$testMode" == "yes" ]; then
Sergio Slobodrian61287792017-06-27 12:14:05 -0400233 echo -e "${lBlue}Extracting the docker image list from the voltha VM${NC}"
234 volIpAddr=`virsh domifaddr $vVmName${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
235 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ../.vagrant/machines/voltha${uId}/libvirt/private_key vagrant@$volIpAddr "docker image ls" > images.tmp
236 cat images.tmp | grep -v 5000 | tail -n +2 | awk '{printf(" - %s:%s\n", $1, $2)}' > image-list.cfg
237 rm -f images.tmp
238 sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all
239 echo "voltha_containers:" >> ansible/group_vars/all
240 cat image-list.cfg >> ansible/group_vars/all
241 rm -f image-list.cfg
242else
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400243 echo -e "${lBlue}Set up the docker image list from ${lCyan}containers.cfg${NC}"
Sergio Slobodrian61287792017-06-27 12:14:05 -0400244 sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all
245 cat containers.cfg >> ansible/group_vars/all
246fi
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400247
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400248# Install python which is required for ansible
249echo -e "${lBlue}Installing python${NC}"
250ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get update
251ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get -y install python
252
253# Make sure the VM is up-to-date
254echo -e "${lBlue}Ensure that the VM is up-to-date${NC}"
255ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get update
256ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get -y upgrade
257
258
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400259# Create the docker.cfg file in the ansible tree using the VMs IP address
260echo 'DOCKER_OPTS="$DOCKER_OPTS --insecure-registry '$ipAddr':5000 -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --registry-mirror=http://'$ipAddr':5001"' > ansible/roles/docker/templates/docker.cfg
261
262# Add the voltha vm's information to the ansible tree
263echo -e "${lBlue}Add the voltha vm and key to the ansible accessible hosts${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400264vIpAddr=`virsh domifaddr voltha_voltha${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400265echo "[voltha]" > ansible/hosts/voltha
266echo $vIpAddr >> ansible/hosts/voltha
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400267echo "ansible_ssh_private_key_file: $wd/../.vagrant/machines/voltha${uId}/libvirt/private_key" > ansible/host_vars/$vIpAddr
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400268
269
270# Prepare to launch the ansible playbook to configure the installer VM
271echo -e "${lBlue}Prepare to launch the ansible playbook to configure the VM${NC}"
272echo "[installer]" > ansible/hosts/installer
273echo "$ipAddr" >> ansible/hosts/installer
274echo "ansible_ssh_private_key_file: $wd/key.pem" > ansible/host_vars/$ipAddr
275
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400276# Launch the ansible playbooks
277
278echo -e "${lBlue}Launching the ${lCyan}volthainstall${lBlue} ansible playbook on the installer vm${NC}"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400279ansible-playbook ansible/volthainstall.yml -i ansible/hosts/installer
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400280rtrn=$?
281if [ $rtrn -ne 0 ]; then
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400282 echo -e "${red}PLAYBOOK FAILED, Exiting${NC}"
283 exit
284fi
285
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400286
287echo -e "${lBlue}Launching the ${lCyan}volthainstall${lBlue} ansible playbook on the voltha vm${NC}"
288ansible-playbook ansible/volthainstall.yml -i ansible/hosts/voltha
289rtrn=$?
290if [ $rtrn -ne 0 ]; then
291 echo -e "${red}PLAYBOOK FAILED, Exiting${NC}"
292 exit
293fi
294
295if [ "$testMode" == "yes" ]; then
Sergio Slobodriand24189e2017-06-10 23:27:15 -0400296 echo -e "${lBlue}Testing, the install image ${red}WILL NOT${lBlue} be built${NC}"
Sergio Slobodrian61287792017-06-27 12:14:05 -0400297
298
299 # Reboot the installer
300 echo -e "${lBlue}Rebooting the installer${NC}"
301 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo telinit 6
302 # Wait for the host to shut down
303 sleep 5
304
305 ctr=0
306 ipAddr=""
307 while [ -z "$ipAddr" ];
308 do
309 echo -e "${lBlue}Waiting for the VM's IP address${NC}"
310 ipAddr=`virsh domifaddr $iVmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
311 sleep 3
312 if [ $ctr -eq $ipTimeout ]; then
313 echo -e "${red}Tired of waiting, please adjust the ipTimeout if the VM is slow to start${NC}"
314 exit
315 fi
316 ctr=`expr $ctr + 1`
317 done
318
319 echo -e "${lBlue}Running the installer${NC}"
320 echo "~/installer.sh" > tmp_bash_login
321 echo "rm ~/.bash_login" >> tmp_bash_login
322 echo "logout" >> tmp_bash_login
323 scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem tmp_bash_login vinstall@$ipAddr:.bash_login
324 rm -f tmp_bash_login
325 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr
326
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400327else
328 echo -e "${lBlue}Building, the install image (this can take a while)${NC}"
329 # Create a temporary directory for all the installer files
330 mkdir tmp_installer
331 cp vmTemplate.xml tmp_installer
332 # Shut down the installer vm
333 ctr=0
334 vStat=`virsh list | grep $iVmName`
335 virsh shutdown $iVmName
336 while [ ! -z "$vStat" ];
337 do
338 echo "Waiting for $iVmName to shut down"
339 sleep 2
340 vStat=`virsh list | grep $iVmName`
341 ctr=`expr $ctr + 1`
342 if [ $ctr -eq $shutdownTimeout ]; then
343 echo -e "${red}Tired of waiting, forcing the VM off${NC}"
344 virsh destroy $iVmName
345 vStat=`virsh list | grep $iVmName`
346 fi
347 done
348 # Copy the install bootstrap script to the installer directory
349 cp BootstrapInstaller.sh tmp_installer
350 # Copy the private key to access the VM
351 cp key.pem tmp_installer
352 pushd tmp_installer > /dev/null 2>&1
353 # Copy the vm image to the installer directory
354 virsh vol-dumpxml $iVmName.qcow2 default | sed -e 's/<key.*key>//' | sed -e '/^[ ]*$/d' > ${iVmName}_volume.xml
355 virsh pool-create-as installer --type dir --target `pwd`
356 virsh vol-create-from installer ${iVmName}_volume.xml $iVmName.qcow2 --inputpool default
357 virsh pool-destroy installer
358 # The image is copied in as root. It needs to have ownership changed
359 # this will result in a password prompt.
360 sudo chown `whoami`.`whoami` $iVmName.qcow2
361 # Now create the installer tar file
362 tar cjf ../$installerArchive .
363 popd > /dev/null 2>&1
364 # Clean up
365 rm -fr tmp_installer
366 # Final location for the installer
367 rm -fr $installerDirectory
368 mkdir $installerDirectory
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400369 cp deployInstaller.sh $installerDirectory
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400370 # Check the image size and determine if it needs to be split.
371 # To be safe, split the image into chunks smaller than 2G so that
372 # it will fit on a FAT32 volume.
373 fSize=`ls -l $installerArchive | awk '{print $5'}`
374 if [ $fSize -gt 2000000000 ]; then
375 echo -e "${lBlue}Installer file too large, breaking into parts${NC}"
376 # The file is too large, breaking it up into parts
377 sPos=0
378 fnn="00"
379 while dd if=$installerArchive of=${installerDirectory}/${installerPart}$fnn \
380 bs=1900MB count=1 skip=$sPos > /dev/null 2>&1
381 do
382 sPos=`expr $sPos + 1`
383 if [ ! -s ${installerDirectory}/${installerPart}$fnn ]; then
384 rm -f ${installerDirectory}/${installerPart}$fnn
385 break
386 fi
387 if [ $sPos -lt 10 ]; then
388 fnn="0$sPos"
389 else
390 fnn="$sPos"
391 fi
392 done
393 else
394 cp $installerArchive $installerDirectory
395 fi
396 # Clean up
397 rm $installerArchive
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400398 echo -e "${lBlue}The install image is built and can be found in ${lCyan}$installerDirectory${NC}"
399 echo -e "${lBlue}Copy all the files in ${lCyan}$installerDirectory${lBlue} to the traasnport media${NC}"
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400400fi