blob: 14244f4d6a870e3a4c492762b864dc88cec25976 [file] [log] [blame]
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -04001#!/bin/bash
2
Sergio Slobodrian7c483622017-06-13 15:51:34 -04003
Sergio Slobodrianc5477712017-06-07 11:56:56 -04004iVmName="vInstaller"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -04005vVmName="voltha_voltha"
Sergio Slobodrian7c483622017-06-13 15:51:34 -04006baseImage="Ubuntu1604LTS"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -04007iVmNetwork="vagrant-libvirt"
Sergio Slobodrianc5477712017-06-07 11:56:56 -04008installerArchive="installer.tar.bz2"
9installerDirectory="volthaInstaller"
10installerPart="installer.part"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040011shutdownTimeout=5
12ipTimeout=10
13
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040014# Command line argument variables
15testMode="no"
Sergio Slobodriancab0a392017-07-13 08:42:10 -040016rebuildVoltha="no"
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040017
18
19
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040020lBlue='\033[1;34m'
21green='\033[0;32m'
22orange='\033[0;33m'
23NC='\033[0m'
24red='\033[0;31m'
25yellow='\033[1;33m'
26dGrey='\033[1;30m'
27lGrey='\033[1;37m'
28lCyan='\033[1;36m'
29
Sergio Slobodrian7c483622017-06-13 15:51:34 -040030uId=`id -u`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040031wd=`pwd`
32
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040033parse_args()
34{
35 for i in $@
36 do
37 case "$i" in
38 "test" )
39 testMode="yes"
40 echo -e "${lBlue}Test mode is ${green}enabled${NC}"
41 ;;
Sergio Slobodriancab0a392017-07-13 08:42:10 -040042 "rebuild" )
43 rebuildVoltha="yes"
44 echo -e "${lBlue}Voltha rebuild is ${green}enabled${NC}"
45 ;;
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040046 esac
47 done
48}
49
50
51######################################
52# MAIN MAIN MAIN MAIN MAIN MAIN MAIN #
53######################################
54parse_args $@
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040055# Validate that vagrant is installed.
56echo -e "${lBlue}Ensure that ${lCyan}vagrant${lBlue} is installed${NC}"
57vInst=`which vagrant`
58
59if [ -z "$vInst" ]; then
60 wget https://releases.hashicorp.com/vagrant/1.9.5/vagrant_1.9.5_x86_64.deb
61 sudo dpkg -i vagrant_1.8.5_x86_64.deb
62 rm vagrant_1.8.5_x86_64.deb
63fi
64unset vInst
65
66# Validate that ansible is installed
67echo -e "${lBlue}Ensure that ${lCyan}ansible${lBlue} is installed${NC}"
68aInst=`which ansible`
69
70if [ -z "$aInst" ]; then
Sergio Slobodrianc5477712017-06-07 11:56:56 -040071 sudo apt-get install -y software-properties-common
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040072 sudo apt-add-repository ppa:ansible/ansible
73 sudo apt-get update
Sergio Slobodrianc5477712017-06-07 11:56:56 -040074 sudo apt-get install -y ansible
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040075fi
76unset vInst
77
Sergio Slobodrian61287792017-06-27 12:14:05 -040078# Verify if this is intended to be a test environment, if so
79# configure the 3 VMs which will be started later to emulate
80# the production installation cluster.
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040081if [ "$testMode" == "yes" ]; then
82 echo -e "${lBlue}Test mode ${green}enabled${lBlue}, configure the ${lCyan}ha-serv${lBlue} VMs${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -040083 # Update the vagrant settings file
84 sed -i -e '/server_name/s/.*/server_name: "ha-serv'${uId}'-"/' settings.vagrant.yaml
85 sed -i -e '/docker_push_registry/s/.*/docker_push_registry: "vinstall'${uId}':5000"/' ansible/group_vars/all
86 sed -i -e "/vinstall/s/vinstall/vinstall${uId}/" ../ansible/roles/docker/templates/daemon.json
87
88 # Set the insecure registry configuration based on the installer hostname
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040089 echo -e "${lBlue}Set up the insecure registry config for hostname ${lCyan}vinstall${uId}${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -040090 echo '{' > ansible/roles/voltha/templates/daemon.json
91 echo '"insecure-registries" : ["vinstall'${uId}':5000"]' >> ansible/roles/voltha/templates/daemon.json
92 echo '}' >> ansible/roles/voltha/templates/daemon.json
93
Sergio Slobodrian5727e982017-06-28 21:02:27 -040094 # Check to make sure that the vagrant-libvirt network is both defined and started
95 echo -e "${lBlue}Verify tha the ${lCyan}vagrant-libvirt${lBlue} network is defined and started${NC}"
96 virsh net-list | grep "vagrant-libvirt" > /dev/null
97 rtrn=$?
98 if [ $rtrn -eq 1 ]; then
99 # The network isn't running, check if it's defined
100 virsh net-list --all | grep "vagrant-libvirt" > /dev/null
101 rtrn=$?
102 if [ $rtrn -eq 1 ]; then
103 # Not defined either
104 echo -e "${lBlue}Defining the ${lCyan}vagrant-libvirt${lBlue} network${NC}"
105 virsh net-define vagrant-libvirt.xml
106 echo -e "${lBlue}Starting the ${lCyan}vagrant-libvirt${lBlue} network${NC}"
107 virsh net-start vagrant-libvirt
108 else
109 # Defined but not started
110 echo -e "${lBlue}Starting the ${lCyan}vagrant-libvirt${lBlue} network${NC}"
111 virsh net-start vagrant-libvirt
112 fi
113 else
114 echo -e "${lBlue}The ${lCyan}vagrant-libvirt${lBlue} network is ${green} running${NC}"
115 fi
116
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400117 # Change the installer name
118 iVmName="vInstaller${uId}"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400119else
120 rm -fr .test
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400121 # Clean out the install config file keeping only the commented lines
122 # which serve as documentation.
123 sed -i -e '/^#/!d' install.cfg
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400124 # Set the insecure registry configuration based on the installer hostname
Sergio Slobodrian497e6e82017-07-18 15:10:38 -0400125 echo -e "${lBlue}Set up the inescure registry config for hostname ${lCyan}vinstall${NC}"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400126 sed -i -e '/docker_push_registry/s/.*/docker_push_registry: "vinstall:5000"/' ansible/group_vars/all
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400127 echo '{' > ansible/roles/voltha/templates/daemon.json
128 echo '"insecure-registries" : ["vinstall:5000"]' >> ansible/roles/voltha/templates/daemon.json
129 echo '}' >> ansible/roles/voltha/templates/daemon.json
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400130fi
131
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400132
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400133# Shut down the domain in case it's running.
134echo -e "${lBlue}Shut down the ${lCyan}$iVmName${lBlue} VM if running${NC}"
135ctr=0
136vStat=`virsh list | grep $iVmName`
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400137virsh shutdown $iVmName
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400138while [ ! -z "$vStat" ];
139do
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400140 echo "Waiting for $iVmName to shut down"
141 sleep 2
Sergio Slobodrian497e6e82017-07-18 15:10:38 -0400142 vStat=`virsh list | grep "$iVmName "`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400143 ctr=`expr $ctr + 1`
144 if [ $ctr -eq $shutdownTimeout ]; then
145 echo -e "${red}Tired of waiting, forcing the VM off${NC}"
146 virsh destroy $iVmName
Sergio Slobodrian497e6e82017-07-18 15:10:38 -0400147 vStat=`virsh list | grep "$iVmName "`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400148 fi
149done
150
151
152# Delete the VM and ignore any errors should they occur
153echo -e "${lBlue}Undefining the ${lCyan}$iVmName${lBlue} domain${NC}"
154virsh undefine $iVmName
155
156# Remove the associated volume
157echo -e "${lBlue}Removing the ${lCyan}$iVmName.qcow2${lBlue} volume${NC}"
158virsh vol-delete "${iVmName}.qcow2" default
159
160# Clone the base vanilla ubuntu install
161echo -e "${lBlue}Cloning the ${lCyan}$baseImage.qcow2${lBlue} to ${lCyan}$iVmName.qcow2${NC}"
162virsh vol-clone "${baseImage}.qcow2" "${iVmName}.qcow2" default
163
164# Create the xml file and define the VM for virsh
165echo -e "${lBlue}Defining the ${lCyan}$iVmName${lBlue} virtual machine${NC}"
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400166cat vmTemplate.xml | sed -e "s/{{ VMName }}/$iVmName/g" | sed -e "s/{{ VMNetwork }}/$iVmNetwork/g" > tmp.xml
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400167
168virsh define tmp.xml
169
170rm tmp.xml
171
172# Start the VMm, if it's already running just ignore the error
173echo -e "${lBlue}Starting the ${lCyan}$iVmName${lBlue} virtual machine${NC}"
174virsh start $iVmName > /dev/null 2>&1
175
176# Generate a keypair for communicating with the VM
177echo -e "${lBlue}Generating the key-pair for communication with the VM${NC}"
178ssh-keygen -f ./key -t rsa -N ''
179
180mv key key.pem
181
182# Clone BashLogin.sh and add the public key to it for later use.
183echo -e "${lBlue}Creating the pre-configuration script${NC}"
184cp BashLogin.sh bash_login.sh
185echo "cat <<HERE > .ssh/authorized_keys" >> bash_login.sh
186cat key.pub >> bash_login.sh
187echo "HERE" >> bash_login.sh
188echo "chmod 400 .ssh/authorized_keys" >> bash_login.sh
189echo "rm .bash_login" >> bash_login.sh
190echo "logout" >> bash_login.sh
191rm key.pub
192
193
194
195# Get the VM's IP address
196ctr=0
197ipAddr=""
198while [ -z "$ipAddr" ];
199do
200 echo -e "${lBlue}Waiting for the VM's IP address${NC}"
201 ipAddr=`virsh domifaddr $iVmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
202 sleep 3
203 if [ $ctr -eq $ipTimeout ]; then
204 echo -e "${red}Tired of waiting, please adjust the ipTimeout if the VM is slow to start${NC}"
205 exit
206 fi
207 ctr=`expr $ctr + 1`
208done
209
210echo -e "${lBlue}The IP address is: ${lCyan}$ipAddr${NC}"
211
212# Copy the pre-config file to the VM
213echo -e "${lBlue}Transfering pre-configuration script to the VM${NC}"
214scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no bash_login.sh vinstall@$ipAddr:.bash_login
215
216rm bash_login.sh
217
218# Run the pre-config file on the VM
219echo -e "${lBlue}Running the pre-configuration script on the VM${NC}"
220ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no vinstall@$ipAddr
221
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400222# If we're in test mode, change the hostname of the installer vm
Sergio Slobodrian61287792017-06-27 12:14:05 -0400223# also start the 3 vagrant target VMs
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400224if [ "$testMode" == "yes" ]; then
225 echo -e "${lBlue}Test mode, change the installer host name to ${lCyan}vinstall${uId}${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400226 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr \
227 sudo hostnamectl set-hostname vinstall${uId}
228 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr \
229 sudo service networking restart
Sergio Slobodrian61287792017-06-27 12:14:05 -0400230
231 echo -e "${lBlue}Testing, start the ${lCyan}ha-serv${lBlue} VMs${NC}"
232 vagrant destroy ha-serv${uId}-{1,2,3}
233 vagrant up ha-serv${uId}-{1,2,3}
234 ./devSetHostList.sh
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400235fi
236
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400237# Ensure that the voltha VM is running so that images can be secured
238echo -e "${lBlue}Ensure that the ${lCyan}voltha VM${lBlue} is running${NC}"
Sergio Slobodriancab0a392017-07-13 08:42:10 -0400239vVm=`virsh list | grep "voltha_voltha${uId}"`
240#echo "vVm: $vVm"
241#echo "rebuildVoltha: $rebuildVoltha"
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400242
Sergio Slobodriancab0a392017-07-13 08:42:10 -0400243
244if [ -z "$vVm" -o "$rebuildVoltha" == "yes" ]; then
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400245 if [ "$testMode" == "yes" ]; then
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400246 ./BuildVoltha.sh $1
Sergio Slobodrian61287792017-06-27 12:14:05 -0400247 rtrn=$?
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400248 else
249 # Default to installer mode
250 ./BuildVoltha.sh install
Sergio Slobodrian61287792017-06-27 12:14:05 -0400251 rtrn=$?
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400252 fi
253 if [ $rtrn -ne 0 ]; then
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400254 echo -e "${red}Voltha build failed!! ${lCyan}Please review the log and correct${lBlue} is running${NC}"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400255 exit 1
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400256 fi
257fi
258
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400259# Extract all the image names and tags from the running voltha VM
Sergio Slobodrian61287792017-06-27 12:14:05 -0400260# when running in test mode. This will provide the entire suite
261# of available containers to the VM cluster.
262
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400263if [ "$testMode" == "yes" ]; then
Sergio Slobodrian61287792017-06-27 12:14:05 -0400264 echo -e "${lBlue}Extracting the docker image list from the voltha VM${NC}"
265 volIpAddr=`virsh domifaddr $vVmName${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
266 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ../.vagrant/machines/voltha${uId}/libvirt/private_key vagrant@$volIpAddr "docker image ls" > images.tmp
267 cat images.tmp | grep -v 5000 | tail -n +2 | awk '{printf(" - %s:%s\n", $1, $2)}' > image-list.cfg
268 rm -f images.tmp
269 sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all
270 echo "voltha_containers:" >> ansible/group_vars/all
271 cat image-list.cfg >> ansible/group_vars/all
272 rm -f image-list.cfg
273else
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400274 echo -e "${lBlue}Set up the docker image list from ${lCyan}containers.cfg${NC}"
Sergio Slobodrian61287792017-06-27 12:14:05 -0400275 sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all
276 cat containers.cfg >> ansible/group_vars/all
277fi
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400278
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400279# Install python which is required for ansible
280echo -e "${lBlue}Installing python${NC}"
281ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get update
282ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get -y install python
283
284# Make sure the VM is up-to-date
285echo -e "${lBlue}Ensure that the VM is up-to-date${NC}"
286ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get update
287ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get -y upgrade
288
289
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400290# Create the docker.cfg file in the ansible tree using the VMs IP address
291echo 'DOCKER_OPTS="$DOCKER_OPTS --insecure-registry '$ipAddr':5000 -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --registry-mirror=http://'$ipAddr':5001"' > ansible/roles/docker/templates/docker.cfg
292
293# Add the voltha vm's information to the ansible tree
294echo -e "${lBlue}Add the voltha vm and key to the ansible accessible hosts${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400295vIpAddr=`virsh domifaddr voltha_voltha${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400296echo "[voltha]" > ansible/hosts/voltha
297echo $vIpAddr >> ansible/hosts/voltha
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400298echo "ansible_ssh_private_key_file: $wd/../.vagrant/machines/voltha${uId}/libvirt/private_key" > ansible/host_vars/$vIpAddr
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400299
300
301# Prepare to launch the ansible playbook to configure the installer VM
302echo -e "${lBlue}Prepare to launch the ansible playbook to configure the VM${NC}"
303echo "[installer]" > ansible/hosts/installer
304echo "$ipAddr" >> ansible/hosts/installer
305echo "ansible_ssh_private_key_file: $wd/key.pem" > ansible/host_vars/$ipAddr
306
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400307# Launch the ansible playbooks
308
309echo -e "${lBlue}Launching the ${lCyan}volthainstall${lBlue} ansible playbook on the installer vm${NC}"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400310ansible-playbook ansible/volthainstall.yml -i ansible/hosts/installer
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400311rtrn=$?
312if [ $rtrn -ne 0 ]; then
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400313 echo -e "${red}PLAYBOOK FAILED, Exiting${NC}"
314 exit
315fi
316
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400317
318echo -e "${lBlue}Launching the ${lCyan}volthainstall${lBlue} ansible playbook on the voltha vm${NC}"
319ansible-playbook ansible/volthainstall.yml -i ansible/hosts/voltha
320rtrn=$?
321if [ $rtrn -ne 0 ]; then
322 echo -e "${red}PLAYBOOK FAILED, Exiting${NC}"
323 exit
324fi
325
326if [ "$testMode" == "yes" ]; then
Sergio Slobodriand24189e2017-06-10 23:27:15 -0400327 echo -e "${lBlue}Testing, the install image ${red}WILL NOT${lBlue} be built${NC}"
Sergio Slobodrian61287792017-06-27 12:14:05 -0400328
329
330 # Reboot the installer
331 echo -e "${lBlue}Rebooting the installer${NC}"
332 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo telinit 6
333 # Wait for the host to shut down
334 sleep 5
335
336 ctr=0
337 ipAddr=""
338 while [ -z "$ipAddr" ];
339 do
340 echo -e "${lBlue}Waiting for the VM's IP address${NC}"
341 ipAddr=`virsh domifaddr $iVmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
342 sleep 3
343 if [ $ctr -eq $ipTimeout ]; then
344 echo -e "${red}Tired of waiting, please adjust the ipTimeout if the VM is slow to start${NC}"
345 exit
346 fi
347 ctr=`expr $ctr + 1`
348 done
349
350 echo -e "${lBlue}Running the installer${NC}"
351 echo "~/installer.sh" > tmp_bash_login
352 echo "rm ~/.bash_login" >> tmp_bash_login
353 echo "logout" >> tmp_bash_login
354 scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem tmp_bash_login vinstall@$ipAddr:.bash_login
355 rm -f tmp_bash_login
356 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr
357
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400358else
359 echo -e "${lBlue}Building, the install image (this can take a while)${NC}"
360 # Create a temporary directory for all the installer files
361 mkdir tmp_installer
362 cp vmTemplate.xml tmp_installer
363 # Shut down the installer vm
364 ctr=0
365 vStat=`virsh list | grep $iVmName`
366 virsh shutdown $iVmName
367 while [ ! -z "$vStat" ];
368 do
369 echo "Waiting for $iVmName to shut down"
370 sleep 2
Sergio Slobodrian497e6e82017-07-18 15:10:38 -0400371 vStat=`virsh list | grep "$iVmName "`
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400372 ctr=`expr $ctr + 1`
373 if [ $ctr -eq $shutdownTimeout ]; then
374 echo -e "${red}Tired of waiting, forcing the VM off${NC}"
375 virsh destroy $iVmName
Sergio Slobodrian497e6e82017-07-18 15:10:38 -0400376 vStat=`virsh list | grep "$iVmName "`
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400377 fi
378 done
379 # Copy the install bootstrap script to the installer directory
380 cp BootstrapInstaller.sh tmp_installer
381 # Copy the private key to access the VM
382 cp key.pem tmp_installer
383 pushd tmp_installer > /dev/null 2>&1
384 # Copy the vm image to the installer directory
385 virsh vol-dumpxml $iVmName.qcow2 default | sed -e 's/<key.*key>//' | sed -e '/^[ ]*$/d' > ${iVmName}_volume.xml
386 virsh pool-create-as installer --type dir --target `pwd`
387 virsh vol-create-from installer ${iVmName}_volume.xml $iVmName.qcow2 --inputpool default
388 virsh pool-destroy installer
389 # The image is copied in as root. It needs to have ownership changed
390 # this will result in a password prompt.
391 sudo chown `whoami`.`whoami` $iVmName.qcow2
392 # Now create the installer tar file
393 tar cjf ../$installerArchive .
394 popd > /dev/null 2>&1
395 # Clean up
396 rm -fr tmp_installer
397 # Final location for the installer
398 rm -fr $installerDirectory
399 mkdir $installerDirectory
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400400 cp deployInstaller.sh $installerDirectory
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400401 # Check the image size and determine if it needs to be split.
402 # To be safe, split the image into chunks smaller than 2G so that
403 # it will fit on a FAT32 volume.
404 fSize=`ls -l $installerArchive | awk '{print $5'}`
405 if [ $fSize -gt 2000000000 ]; then
406 echo -e "${lBlue}Installer file too large, breaking into parts${NC}"
407 # The file is too large, breaking it up into parts
408 sPos=0
409 fnn="00"
410 while dd if=$installerArchive of=${installerDirectory}/${installerPart}$fnn \
411 bs=1900MB count=1 skip=$sPos > /dev/null 2>&1
412 do
413 sPos=`expr $sPos + 1`
414 if [ ! -s ${installerDirectory}/${installerPart}$fnn ]; then
415 rm -f ${installerDirectory}/${installerPart}$fnn
416 break
417 fi
418 if [ $sPos -lt 10 ]; then
419 fnn="0$sPos"
420 else
421 fnn="$sPos"
422 fi
423 done
424 else
425 cp $installerArchive $installerDirectory
426 fi
427 # Clean up
428 rm $installerArchive
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400429 echo -e "${lBlue}The install image is built and can be found in ${lCyan}$installerDirectory${NC}"
430 echo -e "${lBlue}Copy all the files in ${lCyan}$installerDirectory${lBlue} to the traasnport media${NC}"
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400431fi