blob: 9ca37d734f474e7b8783f8c760658b39bacaa1e3 [file] [log] [blame]
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -04001#!/bin/bash
2
Sergio Slobodrian7c483622017-06-13 15:51:34 -04003
Sergio Slobodrianc5477712017-06-07 11:56:56 -04004iVmName="vInstaller"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -04005vVmName="voltha_voltha"
Sergio Slobodrian7c483622017-06-13 15:51:34 -04006baseImage="Ubuntu1604LTS"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -04007iVmNetwork="vagrant-libvirt"
Sergio Slobodrianc5477712017-06-07 11:56:56 -04008installerArchive="installer.tar.bz2"
9installerDirectory="volthaInstaller"
10installerPart="installer.part"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040011shutdownTimeout=5
12ipTimeout=10
13
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040014# Command line argument variables
15testMode="no"
Sergio Slobodriancab0a392017-07-13 08:42:10 -040016rebuildVoltha="no"
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040017
18
19
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040020lBlue='\033[1;34m'
21green='\033[0;32m'
22orange='\033[0;33m'
23NC='\033[0m'
24red='\033[0;31m'
25yellow='\033[1;33m'
26dGrey='\033[1;30m'
27lGrey='\033[1;37m'
28lCyan='\033[1;36m'
29
Sergio Slobodrian7c483622017-06-13 15:51:34 -040030uId=`id -u`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040031wd=`pwd`
32
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040033parse_args()
34{
35 for i in $@
36 do
37 case "$i" in
38 "test" )
39 testMode="yes"
40 echo -e "${lBlue}Test mode is ${green}enabled${NC}"
41 ;;
Sergio Slobodriancab0a392017-07-13 08:42:10 -040042 "rebuild" )
43 rebuildVoltha="yes"
44 echo -e "${lBlue}Voltha rebuild is ${green}enabled${NC}"
45 ;;
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040046 esac
47 done
48}
49
50
51######################################
52# MAIN MAIN MAIN MAIN MAIN MAIN MAIN #
53######################################
54parse_args $@
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040055# Validate that vagrant is installed.
56echo -e "${lBlue}Ensure that ${lCyan}vagrant${lBlue} is installed${NC}"
57vInst=`which vagrant`
58
59if [ -z "$vInst" ]; then
60 wget https://releases.hashicorp.com/vagrant/1.9.5/vagrant_1.9.5_x86_64.deb
61 sudo dpkg -i vagrant_1.8.5_x86_64.deb
62 rm vagrant_1.8.5_x86_64.deb
63fi
64unset vInst
65
66# Validate that ansible is installed
67echo -e "${lBlue}Ensure that ${lCyan}ansible${lBlue} is installed${NC}"
68aInst=`which ansible`
69
70if [ -z "$aInst" ]; then
Sergio Slobodrianc5477712017-06-07 11:56:56 -040071 sudo apt-get install -y software-properties-common
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040072 sudo apt-add-repository ppa:ansible/ansible
73 sudo apt-get update
Sergio Slobodrianc5477712017-06-07 11:56:56 -040074 sudo apt-get install -y ansible
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040075fi
76unset vInst
77
Sergio Slobodrian61287792017-06-27 12:14:05 -040078# Verify if this is intended to be a test environment, if so
79# configure the 3 VMs which will be started later to emulate
80# the production installation cluster.
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040081if [ "$testMode" == "yes" ]; then
82 echo -e "${lBlue}Test mode ${green}enabled${lBlue}, configure the ${lCyan}ha-serv${lBlue} VMs${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -040083 # Update the vagrant settings file
84 sed -i -e '/server_name/s/.*/server_name: "ha-serv'${uId}'-"/' settings.vagrant.yaml
85 sed -i -e '/docker_push_registry/s/.*/docker_push_registry: "vinstall'${uId}':5000"/' ansible/group_vars/all
86 sed -i -e "/vinstall/s/vinstall/vinstall${uId}/" ../ansible/roles/docker/templates/daemon.json
87
88 # Set the insecure registry configuration based on the installer hostname
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040089 echo -e "${lBlue}Set up the insecure registry config for hostname ${lCyan}vinstall${uId}${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -040090 echo '{' > ansible/roles/voltha/templates/daemon.json
91 echo '"insecure-registries" : ["vinstall'${uId}':5000"]' >> ansible/roles/voltha/templates/daemon.json
92 echo '}' >> ansible/roles/voltha/templates/daemon.json
93
Sergio Slobodrian7c483622017-06-13 15:51:34 -040094 # Change the installer name
95 iVmName="vInstaller${uId}"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040096else
97 rm -fr .test
Sergio Slobodrianc5477712017-06-07 11:56:56 -040098 # Clean out the install config file keeping only the commented lines
99 # which serve as documentation.
100 sed -i -e '/^#/!d' install.cfg
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400101 # Set the insecure registry configuration based on the installer hostname
Sergio Slobodrian497e6e82017-07-18 15:10:38 -0400102 echo -e "${lBlue}Set up the inescure registry config for hostname ${lCyan}vinstall${NC}"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400103 sed -i -e '/docker_push_registry/s/.*/docker_push_registry: "vinstall:5000"/' ansible/group_vars/all
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400104 echo '{' > ansible/roles/voltha/templates/daemon.json
105 echo '"insecure-registries" : ["vinstall:5000"]' >> ansible/roles/voltha/templates/daemon.json
106 echo '}' >> ansible/roles/voltha/templates/daemon.json
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400107fi
108
Sergio Slobodrian73e311a2017-07-26 11:12:45 -0400109# Check to make sure that the vagrant-libvirt network is both defined and started
110echo -e "${lBlue}Verify tha the ${lCyan}vagrant-libvirt${lBlue} network is defined and started${NC}"
Sergio Slobodrian7c5e8852017-07-31 20:17:14 -0400111virsh net-list --all | grep "vagrant-libvirt" > /dev/null
Sergio Slobodrian73e311a2017-07-26 11:12:45 -0400112rtrn=$?
113if [ $rtrn -eq 1 ]; then
Sergio Slobodrian7c5e8852017-07-31 20:17:14 -0400114 # Not defined
115 echo -e "${lBlue}Defining the ${lCyan}vagrant-libvirt${lBlue} network${NC}"
116 virsh net-define vagrant-libvirt.xml
117 echo -e "${lBlue}Starting the ${lCyan}vagrant-libvirt${lBlue} network${NC}"
118 virsh net-start vagrant-libvirt
119else
120 virsh net-list | grep "vagrant-libvirt" > /dev/null
Sergio Slobodrian73e311a2017-07-26 11:12:45 -0400121 rtrn=$?
122 if [ $rtrn -eq 1 ]; then
Sergio Slobodrian73e311a2017-07-26 11:12:45 -0400123 # Defined but not started
124 echo -e "${lBlue}Starting the ${lCyan}vagrant-libvirt${lBlue} network${NC}"
125 virsh net-start vagrant-libvirt
Sergio Slobodrian7c5e8852017-07-31 20:17:14 -0400126
127 else
128 # Defined and running
129 echo -e "${lBlue}The ${lCyan}vagrant-libvirt${lBlue} network is ${green} running${NC}"
Sergio Slobodrian73e311a2017-07-26 11:12:45 -0400130 fi
Sergio Slobodrian7c5e8852017-07-31 20:17:14 -0400131fi
132
133# Check that the default storage pool exists and create it if it doesn't
134virsh pool-list --all | grep default > /dev/null
135rtrn=$?
136if [ $rtrn -eq 1 ]; then
137 # Not defined
138 echo -e "${lBlue}Defining the ${lCyan}defaul${lBlue} storage pool${NC}"
139 virsh pool-define-as --name default --type dir --target /var/lib/libvirt/images/
140 virsh pool-autostart default
141 echo -e "${lBlue}Starting the ${lCyan}defaul${lBlue} storage pool${NC}"
142 virsh pool-start default
Sergio Slobodrian73e311a2017-07-26 11:12:45 -0400143else
Sergio Slobodrian7c5e8852017-07-31 20:17:14 -0400144 virsh pool-list | grep default > /dev/null
145 rtrn=$?
146 if [ $rtrn -eq 1 ]; then
147 # Defined but not started
148 echo -e "${lBlue}Starting the ${lCyan}defaul${lBlue} storage pool${NC}"
149 virsh pool-start default
150 else
151 # Defined and running
152 echo -e "${lBlue}The ${lCyan}default${lBlue} storage pool ${green} running${NC}"
153 fi
Sergio Slobodrian73e311a2017-07-26 11:12:45 -0400154fi
155
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400156
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400157# Shut down the domain in case it's running.
158echo -e "${lBlue}Shut down the ${lCyan}$iVmName${lBlue} VM if running${NC}"
159ctr=0
160vStat=`virsh list | grep $iVmName`
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400161virsh shutdown $iVmName
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400162while [ ! -z "$vStat" ];
163do
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400164 echo "Waiting for $iVmName to shut down"
165 sleep 2
Sergio Slobodrian497e6e82017-07-18 15:10:38 -0400166 vStat=`virsh list | grep "$iVmName "`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400167 ctr=`expr $ctr + 1`
168 if [ $ctr -eq $shutdownTimeout ]; then
169 echo -e "${red}Tired of waiting, forcing the VM off${NC}"
170 virsh destroy $iVmName
Sergio Slobodrian497e6e82017-07-18 15:10:38 -0400171 vStat=`virsh list | grep "$iVmName "`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400172 fi
173done
174
175
176# Delete the VM and ignore any errors should they occur
177echo -e "${lBlue}Undefining the ${lCyan}$iVmName${lBlue} domain${NC}"
178virsh undefine $iVmName
179
180# Remove the associated volume
181echo -e "${lBlue}Removing the ${lCyan}$iVmName.qcow2${lBlue} volume${NC}"
182virsh vol-delete "${iVmName}.qcow2" default
183
184# Clone the base vanilla ubuntu install
185echo -e "${lBlue}Cloning the ${lCyan}$baseImage.qcow2${lBlue} to ${lCyan}$iVmName.qcow2${NC}"
186virsh vol-clone "${baseImage}.qcow2" "${iVmName}.qcow2" default
187
188# Create the xml file and define the VM for virsh
189echo -e "${lBlue}Defining the ${lCyan}$iVmName${lBlue} virtual machine${NC}"
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400190cat vmTemplate.xml | sed -e "s/{{ VMName }}/$iVmName/g" | sed -e "s/{{ VMNetwork }}/$iVmNetwork/g" > tmp.xml
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400191
192virsh define tmp.xml
193
194rm tmp.xml
195
196# Start the VMm, if it's already running just ignore the error
197echo -e "${lBlue}Starting the ${lCyan}$iVmName${lBlue} virtual machine${NC}"
198virsh start $iVmName > /dev/null 2>&1
199
200# Generate a keypair for communicating with the VM
201echo -e "${lBlue}Generating the key-pair for communication with the VM${NC}"
202ssh-keygen -f ./key -t rsa -N ''
203
204mv key key.pem
205
206# Clone BashLogin.sh and add the public key to it for later use.
207echo -e "${lBlue}Creating the pre-configuration script${NC}"
208cp BashLogin.sh bash_login.sh
209echo "cat <<HERE > .ssh/authorized_keys" >> bash_login.sh
210cat key.pub >> bash_login.sh
211echo "HERE" >> bash_login.sh
212echo "chmod 400 .ssh/authorized_keys" >> bash_login.sh
213echo "rm .bash_login" >> bash_login.sh
214echo "logout" >> bash_login.sh
215rm key.pub
216
217
218
219# Get the VM's IP address
220ctr=0
221ipAddr=""
222while [ -z "$ipAddr" ];
223do
224 echo -e "${lBlue}Waiting for the VM's IP address${NC}"
225 ipAddr=`virsh domifaddr $iVmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
226 sleep 3
227 if [ $ctr -eq $ipTimeout ]; then
228 echo -e "${red}Tired of waiting, please adjust the ipTimeout if the VM is slow to start${NC}"
229 exit
230 fi
231 ctr=`expr $ctr + 1`
232done
233
234echo -e "${lBlue}The IP address is: ${lCyan}$ipAddr${NC}"
235
236# Copy the pre-config file to the VM
237echo -e "${lBlue}Transfering pre-configuration script to the VM${NC}"
238scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no bash_login.sh vinstall@$ipAddr:.bash_login
239
240rm bash_login.sh
241
242# Run the pre-config file on the VM
243echo -e "${lBlue}Running the pre-configuration script on the VM${NC}"
244ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no vinstall@$ipAddr
245
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400246# If we're in test mode, change the hostname of the installer vm
Sergio Slobodrian61287792017-06-27 12:14:05 -0400247# also start the 3 vagrant target VMs
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400248if [ "$testMode" == "yes" ]; then
249 echo -e "${lBlue}Test mode, change the installer host name to ${lCyan}vinstall${uId}${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400250 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr \
251 sudo hostnamectl set-hostname vinstall${uId}
252 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr \
253 sudo service networking restart
Sergio Slobodrian61287792017-06-27 12:14:05 -0400254
255 echo -e "${lBlue}Testing, start the ${lCyan}ha-serv${lBlue} VMs${NC}"
256 vagrant destroy ha-serv${uId}-{1,2,3}
257 vagrant up ha-serv${uId}-{1,2,3}
258 ./devSetHostList.sh
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400259fi
260
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400261# Ensure that the voltha VM is running so that images can be secured
262echo -e "${lBlue}Ensure that the ${lCyan}voltha VM${lBlue} is running${NC}"
Sergio Slobodriancab0a392017-07-13 08:42:10 -0400263vVm=`virsh list | grep "voltha_voltha${uId}"`
264#echo "vVm: $vVm"
265#echo "rebuildVoltha: $rebuildVoltha"
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400266
Sergio Slobodriancab0a392017-07-13 08:42:10 -0400267
268if [ -z "$vVm" -o "$rebuildVoltha" == "yes" ]; then
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400269 if [ "$testMode" == "yes" ]; then
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400270 ./BuildVoltha.sh $1
Sergio Slobodrian61287792017-06-27 12:14:05 -0400271 rtrn=$?
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400272 else
273 # Default to installer mode
274 ./BuildVoltha.sh install
Sergio Slobodrian61287792017-06-27 12:14:05 -0400275 rtrn=$?
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400276 fi
277 if [ $rtrn -ne 0 ]; then
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400278 echo -e "${red}Voltha build failed!! ${lCyan}Please review the log and correct${lBlue} is running${NC}"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400279 exit 1
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400280 fi
281fi
282
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400283# Extract all the image names and tags from the running voltha VM
Sergio Slobodrian61287792017-06-27 12:14:05 -0400284# when running in test mode. This will provide the entire suite
285# of available containers to the VM cluster.
286
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400287if [ "$testMode" == "yes" ]; then
Sergio Slobodrian61287792017-06-27 12:14:05 -0400288 echo -e "${lBlue}Extracting the docker image list from the voltha VM${NC}"
289 volIpAddr=`virsh domifaddr $vVmName${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
290 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ../.vagrant/machines/voltha${uId}/libvirt/private_key vagrant@$volIpAddr "docker image ls" > images.tmp
291 cat images.tmp | grep -v 5000 | tail -n +2 | awk '{printf(" - %s:%s\n", $1, $2)}' > image-list.cfg
292 rm -f images.tmp
293 sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all
294 echo "voltha_containers:" >> ansible/group_vars/all
295 cat image-list.cfg >> ansible/group_vars/all
296 rm -f image-list.cfg
297else
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400298 echo -e "${lBlue}Set up the docker image list from ${lCyan}containers.cfg${NC}"
Sergio Slobodrian61287792017-06-27 12:14:05 -0400299 sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all
300 cat containers.cfg >> ansible/group_vars/all
301fi
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400302
Sergio Slobodrian9d9c8442017-07-25 07:55:42 -0400303
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400304# Install python which is required for ansible
Sergio Slobodrian9d9c8442017-07-25 07:55:42 -0400305echo -e "${lBlue}Installing ${lCyan}Python${NC}"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400306ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get update
Sergio Slobodrian9d9c8442017-07-25 07:55:42 -0400307ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get -y install python
308
309# Move all the python deb files to their own directory so they can be installed first
310echo -e "${lBlue}Caching ${lCyan}Python${lBlue} install${NC}"
311ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr mkdir python-deb
312ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr "sudo mv /var/cache/apt/archives/*.deb /home/vinstall/python-deb"
313ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr "sudo chown -R vinstall.vinstall /home/vinstall/python-deb"
314
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400315
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400316# Create the docker.cfg file in the ansible tree using the VMs IP address
317echo 'DOCKER_OPTS="$DOCKER_OPTS --insecure-registry '$ipAddr':5000 -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --registry-mirror=http://'$ipAddr':5001"' > ansible/roles/docker/templates/docker.cfg
318
319# Add the voltha vm's information to the ansible tree
320echo -e "${lBlue}Add the voltha vm and key to the ansible accessible hosts${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400321vIpAddr=`virsh domifaddr voltha_voltha${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400322echo "[voltha]" > ansible/hosts/voltha
323echo $vIpAddr >> ansible/hosts/voltha
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400324echo "ansible_ssh_private_key_file: $wd/../.vagrant/machines/voltha${uId}/libvirt/private_key" > ansible/host_vars/$vIpAddr
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400325
326
327# Prepare to launch the ansible playbook to configure the installer VM
328echo -e "${lBlue}Prepare to launch the ansible playbook to configure the VM${NC}"
329echo "[installer]" > ansible/hosts/installer
330echo "$ipAddr" >> ansible/hosts/installer
331echo "ansible_ssh_private_key_file: $wd/key.pem" > ansible/host_vars/$ipAddr
332
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400333# Launch the ansible playbooks
334
335echo -e "${lBlue}Launching the ${lCyan}volthainstall${lBlue} ansible playbook on the installer vm${NC}"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400336ansible-playbook ansible/volthainstall.yml -i ansible/hosts/installer
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400337rtrn=$?
338if [ $rtrn -ne 0 ]; then
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400339 echo -e "${red}PLAYBOOK FAILED, Exiting${NC}"
340 exit
341fi
342
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400343
344echo -e "${lBlue}Launching the ${lCyan}volthainstall${lBlue} ansible playbook on the voltha vm${NC}"
345ansible-playbook ansible/volthainstall.yml -i ansible/hosts/voltha
346rtrn=$?
347if [ $rtrn -ne 0 ]; then
348 echo -e "${red}PLAYBOOK FAILED, Exiting${NC}"
349 exit
350fi
351
352if [ "$testMode" == "yes" ]; then
Sergio Slobodriand24189e2017-06-10 23:27:15 -0400353 echo -e "${lBlue}Testing, the install image ${red}WILL NOT${lBlue} be built${NC}"
Sergio Slobodrian61287792017-06-27 12:14:05 -0400354
355
356 # Reboot the installer
357 echo -e "${lBlue}Rebooting the installer${NC}"
358 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo telinit 6
359 # Wait for the host to shut down
360 sleep 5
361
362 ctr=0
363 ipAddr=""
364 while [ -z "$ipAddr" ];
365 do
366 echo -e "${lBlue}Waiting for the VM's IP address${NC}"
367 ipAddr=`virsh domifaddr $iVmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
368 sleep 3
369 if [ $ctr -eq $ipTimeout ]; then
370 echo -e "${red}Tired of waiting, please adjust the ipTimeout if the VM is slow to start${NC}"
371 exit
372 fi
373 ctr=`expr $ctr + 1`
374 done
375
376 echo -e "${lBlue}Running the installer${NC}"
377 echo "~/installer.sh" > tmp_bash_login
378 echo "rm ~/.bash_login" >> tmp_bash_login
379 echo "logout" >> tmp_bash_login
380 scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem tmp_bash_login vinstall@$ipAddr:.bash_login
381 rm -f tmp_bash_login
382 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr
383
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400384else
385 echo -e "${lBlue}Building, the install image (this can take a while)${NC}"
386 # Create a temporary directory for all the installer files
387 mkdir tmp_installer
388 cp vmTemplate.xml tmp_installer
389 # Shut down the installer vm
390 ctr=0
391 vStat=`virsh list | grep $iVmName`
392 virsh shutdown $iVmName
393 while [ ! -z "$vStat" ];
394 do
395 echo "Waiting for $iVmName to shut down"
396 sleep 2
Sergio Slobodrian497e6e82017-07-18 15:10:38 -0400397 vStat=`virsh list | grep "$iVmName "`
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400398 ctr=`expr $ctr + 1`
399 if [ $ctr -eq $shutdownTimeout ]; then
400 echo -e "${red}Tired of waiting, forcing the VM off${NC}"
401 virsh destroy $iVmName
Sergio Slobodrian497e6e82017-07-18 15:10:38 -0400402 vStat=`virsh list | grep "$iVmName "`
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400403 fi
404 done
405 # Copy the install bootstrap script to the installer directory
406 cp BootstrapInstaller.sh tmp_installer
407 # Copy the private key to access the VM
408 cp key.pem tmp_installer
409 pushd tmp_installer > /dev/null 2>&1
410 # Copy the vm image to the installer directory
411 virsh vol-dumpxml $iVmName.qcow2 default | sed -e 's/<key.*key>//' | sed -e '/^[ ]*$/d' > ${iVmName}_volume.xml
412 virsh pool-create-as installer --type dir --target `pwd`
413 virsh vol-create-from installer ${iVmName}_volume.xml $iVmName.qcow2 --inputpool default
414 virsh pool-destroy installer
415 # The image is copied in as root. It needs to have ownership changed
416 # this will result in a password prompt.
417 sudo chown `whoami`.`whoami` $iVmName.qcow2
418 # Now create the installer tar file
419 tar cjf ../$installerArchive .
420 popd > /dev/null 2>&1
421 # Clean up
422 rm -fr tmp_installer
423 # Final location for the installer
424 rm -fr $installerDirectory
425 mkdir $installerDirectory
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400426 cp deployInstaller.sh $installerDirectory
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400427 # Check the image size and determine if it needs to be split.
428 # To be safe, split the image into chunks smaller than 2G so that
429 # it will fit on a FAT32 volume.
430 fSize=`ls -l $installerArchive | awk '{print $5'}`
431 if [ $fSize -gt 2000000000 ]; then
432 echo -e "${lBlue}Installer file too large, breaking into parts${NC}"
433 # The file is too large, breaking it up into parts
434 sPos=0
435 fnn="00"
436 while dd if=$installerArchive of=${installerDirectory}/${installerPart}$fnn \
437 bs=1900MB count=1 skip=$sPos > /dev/null 2>&1
438 do
439 sPos=`expr $sPos + 1`
440 if [ ! -s ${installerDirectory}/${installerPart}$fnn ]; then
441 rm -f ${installerDirectory}/${installerPart}$fnn
442 break
443 fi
444 if [ $sPos -lt 10 ]; then
445 fnn="0$sPos"
446 else
447 fnn="$sPos"
448 fi
449 done
450 else
451 cp $installerArchive $installerDirectory
452 fi
453 # Clean up
454 rm $installerArchive
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400455 echo -e "${lBlue}The install image is built and can be found in ${lCyan}$installerDirectory${NC}"
456 echo -e "${lBlue}Copy all the files in ${lCyan}$installerDirectory${lBlue} to the traasnport media${NC}"
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400457fi