blob: 6a45a374dd639d766b2eb3cb1273abe8e170eafc [file] [log] [blame]
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -04001#!/bin/bash
2
Sergio Slobodrian7c483622017-06-13 15:51:34 -04003
Sergio Slobodrianc5477712017-06-07 11:56:56 -04004iVmName="vInstaller"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -04005vVmName="voltha_voltha"
Sergio Slobodrian7c483622017-06-13 15:51:34 -04006baseImage="Ubuntu1604LTS"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -04007iVmNetwork="vagrant-libvirt"
Sergio Slobodrianc5477712017-06-07 11:56:56 -04008installerArchive="installer.tar.bz2"
9installerDirectory="volthaInstaller"
10installerPart="installer.part"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040011shutdownTimeout=5
12ipTimeout=10
13
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040014# Command line argument variables
15testMode="no"
Sergio Slobodriancab0a392017-07-13 08:42:10 -040016rebuildVoltha="no"
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040017
18
19
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040020lBlue='\033[1;34m'
21green='\033[0;32m'
22orange='\033[0;33m'
23NC='\033[0m'
24red='\033[0;31m'
25yellow='\033[1;33m'
26dGrey='\033[1;30m'
27lGrey='\033[1;37m'
28lCyan='\033[1;36m'
29
Sergio Slobodrian7c483622017-06-13 15:51:34 -040030uId=`id -u`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040031wd=`pwd`
32
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040033parse_args()
34{
35 for i in $@
36 do
37 case "$i" in
38 "test" )
39 testMode="yes"
40 echo -e "${lBlue}Test mode is ${green}enabled${NC}"
41 ;;
Sergio Slobodriancab0a392017-07-13 08:42:10 -040042 "rebuild" )
43 rebuildVoltha="yes"
44 echo -e "${lBlue}Voltha rebuild is ${green}enabled${NC}"
45 ;;
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040046 esac
47 done
48}
49
50
51######################################
52# MAIN MAIN MAIN MAIN MAIN MAIN MAIN #
53######################################
54parse_args $@
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040055# Validate that vagrant is installed.
56echo -e "${lBlue}Ensure that ${lCyan}vagrant${lBlue} is installed${NC}"
57vInst=`which vagrant`
58
59if [ -z "$vInst" ]; then
60 wget https://releases.hashicorp.com/vagrant/1.9.5/vagrant_1.9.5_x86_64.deb
61 sudo dpkg -i vagrant_1.8.5_x86_64.deb
62 rm vagrant_1.8.5_x86_64.deb
63fi
64unset vInst
65
66# Validate that ansible is installed
67echo -e "${lBlue}Ensure that ${lCyan}ansible${lBlue} is installed${NC}"
68aInst=`which ansible`
69
70if [ -z "$aInst" ]; then
Sergio Slobodrianc5477712017-06-07 11:56:56 -040071 sudo apt-get install -y software-properties-common
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040072 sudo apt-add-repository ppa:ansible/ansible
73 sudo apt-get update
Sergio Slobodrianc5477712017-06-07 11:56:56 -040074 sudo apt-get install -y ansible
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040075fi
76unset vInst
77
Sergio Slobodrian61287792017-06-27 12:14:05 -040078# Verify if this is intended to be a test environment, if so
79# configure the 3 VMs which will be started later to emulate
80# the production installation cluster.
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040081if [ "$testMode" == "yes" ]; then
82 echo -e "${lBlue}Test mode ${green}enabled${lBlue}, configure the ${lCyan}ha-serv${lBlue} VMs${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -040083 # Update the vagrant settings file
84 sed -i -e '/server_name/s/.*/server_name: "ha-serv'${uId}'-"/' settings.vagrant.yaml
85 sed -i -e '/docker_push_registry/s/.*/docker_push_registry: "vinstall'${uId}':5000"/' ansible/group_vars/all
86 sed -i -e "/vinstall/s/vinstall/vinstall${uId}/" ../ansible/roles/docker/templates/daemon.json
87
88 # Set the insecure registry configuration based on the installer hostname
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040089 echo -e "${lBlue}Set up the insecure registry config for hostname ${lCyan}vinstall${uId}${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -040090 echo '{' > ansible/roles/voltha/templates/daemon.json
91 echo '"insecure-registries" : ["vinstall'${uId}':5000"]' >> ansible/roles/voltha/templates/daemon.json
92 echo '}' >> ansible/roles/voltha/templates/daemon.json
93
Sergio Slobodrian7c483622017-06-13 15:51:34 -040094 # Change the installer name
95 iVmName="vInstaller${uId}"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040096else
97 rm -fr .test
Sergio Slobodrianc5477712017-06-07 11:56:56 -040098 # Clean out the install config file keeping only the commented lines
99 # which serve as documentation.
100 sed -i -e '/^#/!d' install.cfg
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400101 # Set the insecure registry configuration based on the installer hostname
Sergio Slobodrian497e6e82017-07-18 15:10:38 -0400102 echo -e "${lBlue}Set up the inescure registry config for hostname ${lCyan}vinstall${NC}"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400103 sed -i -e '/docker_push_registry/s/.*/docker_push_registry: "vinstall:5000"/' ansible/group_vars/all
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400104 echo '{' > ansible/roles/voltha/templates/daemon.json
105 echo '"insecure-registries" : ["vinstall:5000"]' >> ansible/roles/voltha/templates/daemon.json
106 echo '}' >> ansible/roles/voltha/templates/daemon.json
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400107fi
108
Sergio Slobodrian73e311a2017-07-26 11:12:45 -0400109# Check to make sure that the vagrant-libvirt network is both defined and started
110echo -e "${lBlue}Verify tha the ${lCyan}vagrant-libvirt${lBlue} network is defined and started${NC}"
111virsh net-list | grep "vagrant-libvirt" > /dev/null
112rtrn=$?
113if [ $rtrn -eq 1 ]; then
114 # The network isn't running, check if it's defined
115 virsh net-list --all | grep "vagrant-libvirt" > /dev/null
116 rtrn=$?
117 if [ $rtrn -eq 1 ]; then
118 # Not defined either
119 echo -e "${lBlue}Defining the ${lCyan}vagrant-libvirt${lBlue} network${NC}"
120 virsh net-define vagrant-libvirt.xml
121 echo -e "${lBlue}Starting the ${lCyan}vagrant-libvirt${lBlue} network${NC}"
122 virsh net-start vagrant-libvirt
123 else
124 # Defined but not started
125 echo -e "${lBlue}Starting the ${lCyan}vagrant-libvirt${lBlue} network${NC}"
126 virsh net-start vagrant-libvirt
127 fi
128else
129 echo -e "${lBlue}The ${lCyan}vagrant-libvirt${lBlue} network is ${green} running${NC}"
130fi
131
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400132
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400133# Shut down the domain in case it's running.
134echo -e "${lBlue}Shut down the ${lCyan}$iVmName${lBlue} VM if running${NC}"
135ctr=0
136vStat=`virsh list | grep $iVmName`
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400137virsh shutdown $iVmName
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400138while [ ! -z "$vStat" ];
139do
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400140 echo "Waiting for $iVmName to shut down"
141 sleep 2
Sergio Slobodrian497e6e82017-07-18 15:10:38 -0400142 vStat=`virsh list | grep "$iVmName "`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400143 ctr=`expr $ctr + 1`
144 if [ $ctr -eq $shutdownTimeout ]; then
145 echo -e "${red}Tired of waiting, forcing the VM off${NC}"
146 virsh destroy $iVmName
Sergio Slobodrian497e6e82017-07-18 15:10:38 -0400147 vStat=`virsh list | grep "$iVmName "`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400148 fi
149done
150
151
152# Delete the VM and ignore any errors should they occur
153echo -e "${lBlue}Undefining the ${lCyan}$iVmName${lBlue} domain${NC}"
154virsh undefine $iVmName
155
156# Remove the associated volume
157echo -e "${lBlue}Removing the ${lCyan}$iVmName.qcow2${lBlue} volume${NC}"
158virsh vol-delete "${iVmName}.qcow2" default
159
160# Clone the base vanilla ubuntu install
161echo -e "${lBlue}Cloning the ${lCyan}$baseImage.qcow2${lBlue} to ${lCyan}$iVmName.qcow2${NC}"
162virsh vol-clone "${baseImage}.qcow2" "${iVmName}.qcow2" default
163
164# Create the xml file and define the VM for virsh
165echo -e "${lBlue}Defining the ${lCyan}$iVmName${lBlue} virtual machine${NC}"
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400166cat vmTemplate.xml | sed -e "s/{{ VMName }}/$iVmName/g" | sed -e "s/{{ VMNetwork }}/$iVmNetwork/g" > tmp.xml
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400167
168virsh define tmp.xml
169
170rm tmp.xml
171
172# Start the VMm, if it's already running just ignore the error
173echo -e "${lBlue}Starting the ${lCyan}$iVmName${lBlue} virtual machine${NC}"
174virsh start $iVmName > /dev/null 2>&1
175
176# Generate a keypair for communicating with the VM
177echo -e "${lBlue}Generating the key-pair for communication with the VM${NC}"
178ssh-keygen -f ./key -t rsa -N ''
179
180mv key key.pem
181
182# Clone BashLogin.sh and add the public key to it for later use.
183echo -e "${lBlue}Creating the pre-configuration script${NC}"
184cp BashLogin.sh bash_login.sh
185echo "cat <<HERE > .ssh/authorized_keys" >> bash_login.sh
186cat key.pub >> bash_login.sh
187echo "HERE" >> bash_login.sh
188echo "chmod 400 .ssh/authorized_keys" >> bash_login.sh
189echo "rm .bash_login" >> bash_login.sh
190echo "logout" >> bash_login.sh
191rm key.pub
192
193
194
195# Get the VM's IP address
196ctr=0
197ipAddr=""
198while [ -z "$ipAddr" ];
199do
200 echo -e "${lBlue}Waiting for the VM's IP address${NC}"
201 ipAddr=`virsh domifaddr $iVmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
202 sleep 3
203 if [ $ctr -eq $ipTimeout ]; then
204 echo -e "${red}Tired of waiting, please adjust the ipTimeout if the VM is slow to start${NC}"
205 exit
206 fi
207 ctr=`expr $ctr + 1`
208done
209
210echo -e "${lBlue}The IP address is: ${lCyan}$ipAddr${NC}"
211
212# Copy the pre-config file to the VM
213echo -e "${lBlue}Transfering pre-configuration script to the VM${NC}"
214scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no bash_login.sh vinstall@$ipAddr:.bash_login
215
216rm bash_login.sh
217
218# Run the pre-config file on the VM
219echo -e "${lBlue}Running the pre-configuration script on the VM${NC}"
220ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no vinstall@$ipAddr
221
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400222# If we're in test mode, change the hostname of the installer vm
Sergio Slobodrian61287792017-06-27 12:14:05 -0400223# also start the 3 vagrant target VMs
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400224if [ "$testMode" == "yes" ]; then
225 echo -e "${lBlue}Test mode, change the installer host name to ${lCyan}vinstall${uId}${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400226 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr \
227 sudo hostnamectl set-hostname vinstall${uId}
228 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr \
229 sudo service networking restart
Sergio Slobodrian61287792017-06-27 12:14:05 -0400230
231 echo -e "${lBlue}Testing, start the ${lCyan}ha-serv${lBlue} VMs${NC}"
232 vagrant destroy ha-serv${uId}-{1,2,3}
233 vagrant up ha-serv${uId}-{1,2,3}
234 ./devSetHostList.sh
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400235fi
236
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400237# Ensure that the voltha VM is running so that images can be secured
238echo -e "${lBlue}Ensure that the ${lCyan}voltha VM${lBlue} is running${NC}"
Sergio Slobodriancab0a392017-07-13 08:42:10 -0400239vVm=`virsh list | grep "voltha_voltha${uId}"`
240#echo "vVm: $vVm"
241#echo "rebuildVoltha: $rebuildVoltha"
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400242
Sergio Slobodriancab0a392017-07-13 08:42:10 -0400243
244if [ -z "$vVm" -o "$rebuildVoltha" == "yes" ]; then
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400245 if [ "$testMode" == "yes" ]; then
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400246 ./BuildVoltha.sh $1
Sergio Slobodrian61287792017-06-27 12:14:05 -0400247 rtrn=$?
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400248 else
249 # Default to installer mode
250 ./BuildVoltha.sh install
Sergio Slobodrian61287792017-06-27 12:14:05 -0400251 rtrn=$?
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400252 fi
253 if [ $rtrn -ne 0 ]; then
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400254 echo -e "${red}Voltha build failed!! ${lCyan}Please review the log and correct${lBlue} is running${NC}"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400255 exit 1
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400256 fi
257fi
258
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400259# Extract all the image names and tags from the running voltha VM
Sergio Slobodrian61287792017-06-27 12:14:05 -0400260# when running in test mode. This will provide the entire suite
261# of available containers to the VM cluster.
262
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400263if [ "$testMode" == "yes" ]; then
Sergio Slobodrian61287792017-06-27 12:14:05 -0400264 echo -e "${lBlue}Extracting the docker image list from the voltha VM${NC}"
265 volIpAddr=`virsh domifaddr $vVmName${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
266 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ../.vagrant/machines/voltha${uId}/libvirt/private_key vagrant@$volIpAddr "docker image ls" > images.tmp
267 cat images.tmp | grep -v 5000 | tail -n +2 | awk '{printf(" - %s:%s\n", $1, $2)}' > image-list.cfg
268 rm -f images.tmp
269 sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all
270 echo "voltha_containers:" >> ansible/group_vars/all
271 cat image-list.cfg >> ansible/group_vars/all
272 rm -f image-list.cfg
273else
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400274 echo -e "${lBlue}Set up the docker image list from ${lCyan}containers.cfg${NC}"
Sergio Slobodrian61287792017-06-27 12:14:05 -0400275 sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all
276 cat containers.cfg >> ansible/group_vars/all
277fi
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400278
Sergio Slobodrian9d9c8442017-07-25 07:55:42 -0400279
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400280# Install python which is required for ansible
Sergio Slobodrian9d9c8442017-07-25 07:55:42 -0400281echo -e "${lBlue}Installing ${lCyan}Python${NC}"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400282ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get update
Sergio Slobodrian9d9c8442017-07-25 07:55:42 -0400283ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get -y install python
284
285# Move all the python deb files to their own directory so they can be installed first
286echo -e "${lBlue}Caching ${lCyan}Python${lBlue} install${NC}"
287ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr mkdir python-deb
288ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr "sudo mv /var/cache/apt/archives/*.deb /home/vinstall/python-deb"
289ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr "sudo chown -R vinstall.vinstall /home/vinstall/python-deb"
290
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400291
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400292# Create the docker.cfg file in the ansible tree using the VMs IP address
293echo 'DOCKER_OPTS="$DOCKER_OPTS --insecure-registry '$ipAddr':5000 -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --registry-mirror=http://'$ipAddr':5001"' > ansible/roles/docker/templates/docker.cfg
294
295# Add the voltha vm's information to the ansible tree
296echo -e "${lBlue}Add the voltha vm and key to the ansible accessible hosts${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400297vIpAddr=`virsh domifaddr voltha_voltha${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400298echo "[voltha]" > ansible/hosts/voltha
299echo $vIpAddr >> ansible/hosts/voltha
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400300echo "ansible_ssh_private_key_file: $wd/../.vagrant/machines/voltha${uId}/libvirt/private_key" > ansible/host_vars/$vIpAddr
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400301
302
303# Prepare to launch the ansible playbook to configure the installer VM
304echo -e "${lBlue}Prepare to launch the ansible playbook to configure the VM${NC}"
305echo "[installer]" > ansible/hosts/installer
306echo "$ipAddr" >> ansible/hosts/installer
307echo "ansible_ssh_private_key_file: $wd/key.pem" > ansible/host_vars/$ipAddr
308
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400309# Launch the ansible playbooks
310
311echo -e "${lBlue}Launching the ${lCyan}volthainstall${lBlue} ansible playbook on the installer vm${NC}"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400312ansible-playbook ansible/volthainstall.yml -i ansible/hosts/installer
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400313rtrn=$?
314if [ $rtrn -ne 0 ]; then
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400315 echo -e "${red}PLAYBOOK FAILED, Exiting${NC}"
316 exit
317fi
318
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400319
320echo -e "${lBlue}Launching the ${lCyan}volthainstall${lBlue} ansible playbook on the voltha vm${NC}"
321ansible-playbook ansible/volthainstall.yml -i ansible/hosts/voltha
322rtrn=$?
323if [ $rtrn -ne 0 ]; then
324 echo -e "${red}PLAYBOOK FAILED, Exiting${NC}"
325 exit
326fi
327
328if [ "$testMode" == "yes" ]; then
Sergio Slobodriand24189e2017-06-10 23:27:15 -0400329 echo -e "${lBlue}Testing, the install image ${red}WILL NOT${lBlue} be built${NC}"
Sergio Slobodrian61287792017-06-27 12:14:05 -0400330
331
332 # Reboot the installer
333 echo -e "${lBlue}Rebooting the installer${NC}"
334 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo telinit 6
335 # Wait for the host to shut down
336 sleep 5
337
338 ctr=0
339 ipAddr=""
340 while [ -z "$ipAddr" ];
341 do
342 echo -e "${lBlue}Waiting for the VM's IP address${NC}"
343 ipAddr=`virsh domifaddr $iVmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
344 sleep 3
345 if [ $ctr -eq $ipTimeout ]; then
346 echo -e "${red}Tired of waiting, please adjust the ipTimeout if the VM is slow to start${NC}"
347 exit
348 fi
349 ctr=`expr $ctr + 1`
350 done
351
352 echo -e "${lBlue}Running the installer${NC}"
353 echo "~/installer.sh" > tmp_bash_login
354 echo "rm ~/.bash_login" >> tmp_bash_login
355 echo "logout" >> tmp_bash_login
356 scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem tmp_bash_login vinstall@$ipAddr:.bash_login
357 rm -f tmp_bash_login
358 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr
359
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400360else
361 echo -e "${lBlue}Building, the install image (this can take a while)${NC}"
362 # Create a temporary directory for all the installer files
363 mkdir tmp_installer
364 cp vmTemplate.xml tmp_installer
365 # Shut down the installer vm
366 ctr=0
367 vStat=`virsh list | grep $iVmName`
368 virsh shutdown $iVmName
369 while [ ! -z "$vStat" ];
370 do
371 echo "Waiting for $iVmName to shut down"
372 sleep 2
Sergio Slobodrian497e6e82017-07-18 15:10:38 -0400373 vStat=`virsh list | grep "$iVmName "`
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400374 ctr=`expr $ctr + 1`
375 if [ $ctr -eq $shutdownTimeout ]; then
376 echo -e "${red}Tired of waiting, forcing the VM off${NC}"
377 virsh destroy $iVmName
Sergio Slobodrian497e6e82017-07-18 15:10:38 -0400378 vStat=`virsh list | grep "$iVmName "`
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400379 fi
380 done
381 # Copy the install bootstrap script to the installer directory
382 cp BootstrapInstaller.sh tmp_installer
383 # Copy the private key to access the VM
384 cp key.pem tmp_installer
385 pushd tmp_installer > /dev/null 2>&1
386 # Copy the vm image to the installer directory
387 virsh vol-dumpxml $iVmName.qcow2 default | sed -e 's/<key.*key>//' | sed -e '/^[ ]*$/d' > ${iVmName}_volume.xml
388 virsh pool-create-as installer --type dir --target `pwd`
389 virsh vol-create-from installer ${iVmName}_volume.xml $iVmName.qcow2 --inputpool default
390 virsh pool-destroy installer
391 # The image is copied in as root. It needs to have ownership changed
392 # this will result in a password prompt.
393 sudo chown `whoami`.`whoami` $iVmName.qcow2
394 # Now create the installer tar file
395 tar cjf ../$installerArchive .
396 popd > /dev/null 2>&1
397 # Clean up
398 rm -fr tmp_installer
399 # Final location for the installer
400 rm -fr $installerDirectory
401 mkdir $installerDirectory
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400402 cp deployInstaller.sh $installerDirectory
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400403 # Check the image size and determine if it needs to be split.
404 # To be safe, split the image into chunks smaller than 2G so that
405 # it will fit on a FAT32 volume.
406 fSize=`ls -l $installerArchive | awk '{print $5'}`
407 if [ $fSize -gt 2000000000 ]; then
408 echo -e "${lBlue}Installer file too large, breaking into parts${NC}"
409 # The file is too large, breaking it up into parts
410 sPos=0
411 fnn="00"
412 while dd if=$installerArchive of=${installerDirectory}/${installerPart}$fnn \
413 bs=1900MB count=1 skip=$sPos > /dev/null 2>&1
414 do
415 sPos=`expr $sPos + 1`
416 if [ ! -s ${installerDirectory}/${installerPart}$fnn ]; then
417 rm -f ${installerDirectory}/${installerPart}$fnn
418 break
419 fi
420 if [ $sPos -lt 10 ]; then
421 fnn="0$sPos"
422 else
423 fnn="$sPos"
424 fi
425 done
426 else
427 cp $installerArchive $installerDirectory
428 fi
429 # Clean up
430 rm $installerArchive
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400431 echo -e "${lBlue}The install image is built and can be found in ${lCyan}$installerDirectory${NC}"
432 echo -e "${lBlue}Copy all the files in ${lCyan}$installerDirectory${lBlue} to the traasnport media${NC}"
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400433fi