blob: 4a79b370514365215610043db8a6a3f8113a5531 [file] [log] [blame]
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -04001#!/bin/bash
2
Sergio Slobodrian7c483622017-06-13 15:51:34 -04003
Sergio Slobodrianc5477712017-06-07 11:56:56 -04004iVmName="vInstaller"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -04005vVmName="voltha_voltha"
Sergio Slobodrian7c483622017-06-13 15:51:34 -04006baseImage="Ubuntu1604LTS"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -04007iVmNetwork="vagrant-libvirt"
Sergio Slobodrianc5477712017-06-07 11:56:56 -04008installerArchive="installer.tar.bz2"
9installerDirectory="volthaInstaller"
10installerPart="installer.part"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040011shutdownTimeout=5
12ipTimeout=10
13
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040014# Command line argument variables
15testMode="no"
16
17
18
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040019lBlue='\033[1;34m'
20green='\033[0;32m'
21orange='\033[0;33m'
22NC='\033[0m'
23red='\033[0;31m'
24yellow='\033[1;33m'
25dGrey='\033[1;30m'
26lGrey='\033[1;37m'
27lCyan='\033[1;36m'
28
Sergio Slobodrian7c483622017-06-13 15:51:34 -040029uId=`id -u`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040030wd=`pwd`
31
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040032parse_args()
33{
34 for i in $@
35 do
36 case "$i" in
37 "test" )
38 testMode="yes"
39 echo -e "${lBlue}Test mode is ${green}enabled${NC}"
40 ;;
41 esac
42 done
43}
44
45
46######################################
47# MAIN MAIN MAIN MAIN MAIN MAIN MAIN #
48######################################
49parse_args $@
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040050# Validate that vagrant is installed.
51echo -e "${lBlue}Ensure that ${lCyan}vagrant${lBlue} is installed${NC}"
52vInst=`which vagrant`
53
54if [ -z "$vInst" ]; then
55 wget https://releases.hashicorp.com/vagrant/1.9.5/vagrant_1.9.5_x86_64.deb
56 sudo dpkg -i vagrant_1.8.5_x86_64.deb
57 rm vagrant_1.8.5_x86_64.deb
58fi
59unset vInst
60
61# Validate that ansible is installed
62echo -e "${lBlue}Ensure that ${lCyan}ansible${lBlue} is installed${NC}"
63aInst=`which ansible`
64
65if [ -z "$aInst" ]; then
Sergio Slobodrianc5477712017-06-07 11:56:56 -040066 sudo apt-get install -y software-properties-common
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040067 sudo apt-add-repository ppa:ansible/ansible
68 sudo apt-get update
Sergio Slobodrianc5477712017-06-07 11:56:56 -040069 sudo apt-get install -y ansible
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040070fi
71unset vInst
72
Sergio Slobodrian61287792017-06-27 12:14:05 -040073# Verify if this is intended to be a test environment, if so
74# configure the 3 VMs which will be started later to emulate
75# the production installation cluster.
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040076if [ "$testMode" == "yes" ]; then
77 echo -e "${lBlue}Test mode ${green}enabled${lBlue}, configure the ${lCyan}ha-serv${lBlue} VMs${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -040078 # Update the vagrant settings file
79 sed -i -e '/server_name/s/.*/server_name: "ha-serv'${uId}'-"/' settings.vagrant.yaml
80 sed -i -e '/docker_push_registry/s/.*/docker_push_registry: "vinstall'${uId}':5000"/' ansible/group_vars/all
81 sed -i -e "/vinstall/s/vinstall/vinstall${uId}/" ../ansible/roles/docker/templates/daemon.json
82
83 # Set the insecure registry configuration based on the installer hostname
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040084 echo -e "${lBlue}Set up the insecure registry config for hostname ${lCyan}vinstall${uId}${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -040085 echo '{' > ansible/roles/voltha/templates/daemon.json
86 echo '"insecure-registries" : ["vinstall'${uId}':5000"]' >> ansible/roles/voltha/templates/daemon.json
87 echo '}' >> ansible/roles/voltha/templates/daemon.json
88
Sergio Slobodrian5727e982017-06-28 21:02:27 -040089 # Check to make sure that the vagrant-libvirt network is both defined and started
90 echo -e "${lBlue}Verify tha the ${lCyan}vagrant-libvirt${lBlue} network is defined and started${NC}"
91 virsh net-list | grep "vagrant-libvirt" > /dev/null
92 rtrn=$?
93 if [ $rtrn -eq 1 ]; then
94 # The network isn't running, check if it's defined
95 virsh net-list --all | grep "vagrant-libvirt" > /dev/null
96 rtrn=$?
97 if [ $rtrn -eq 1 ]; then
98 # Not defined either
99 echo -e "${lBlue}Defining the ${lCyan}vagrant-libvirt${lBlue} network${NC}"
100 virsh net-define vagrant-libvirt.xml
101 echo -e "${lBlue}Starting the ${lCyan}vagrant-libvirt${lBlue} network${NC}"
102 virsh net-start vagrant-libvirt
103 else
104 # Defined but not started
105 echo -e "${lBlue}Starting the ${lCyan}vagrant-libvirt${lBlue} network${NC}"
106 virsh net-start vagrant-libvirt
107 fi
108 else
109 echo -e "${lBlue}The ${lCyan}vagrant-libvirt${lBlue} network is ${green} running${NC}"
110 fi
111
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400112 # Change the installer name
113 iVmName="vInstaller${uId}"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400114else
115 rm -fr .test
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400116 # Clean out the install config file keeping only the commented lines
117 # which serve as documentation.
118 sed -i -e '/^#/!d' install.cfg
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400119 # Set the insecure registry configuration based on the installer hostname
Sergio Slobodrian61287792017-06-27 12:14:05 -0400120 echo -e "${lBlue}Set up the inescure registry config for hostname ${lCyan}vinstall${uId}${NC}"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400121 sed -i -e '/docker_push_registry/s/.*/docker_push_registry: "vinstall:5000"/' ansible/group_vars/all
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400122 echo '{' > ansible/roles/voltha/templates/daemon.json
123 echo '"insecure-registries" : ["vinstall:5000"]' >> ansible/roles/voltha/templates/daemon.json
124 echo '}' >> ansible/roles/voltha/templates/daemon.json
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400125fi
126
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400127
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400128# Shut down the domain in case it's running.
129echo -e "${lBlue}Shut down the ${lCyan}$iVmName${lBlue} VM if running${NC}"
130ctr=0
131vStat=`virsh list | grep $iVmName`
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400132virsh shutdown $iVmName
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400133while [ ! -z "$vStat" ];
134do
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400135 echo "Waiting for $iVmName to shut down"
136 sleep 2
137 vStat=`virsh list | grep $iVmName`
138 ctr=`expr $ctr + 1`
139 if [ $ctr -eq $shutdownTimeout ]; then
140 echo -e "${red}Tired of waiting, forcing the VM off${NC}"
141 virsh destroy $iVmName
142 vStat=`virsh list | grep $iVmName`
143 fi
144done
145
146
147# Delete the VM and ignore any errors should they occur
148echo -e "${lBlue}Undefining the ${lCyan}$iVmName${lBlue} domain${NC}"
149virsh undefine $iVmName
150
151# Remove the associated volume
152echo -e "${lBlue}Removing the ${lCyan}$iVmName.qcow2${lBlue} volume${NC}"
153virsh vol-delete "${iVmName}.qcow2" default
154
155# Clone the base vanilla ubuntu install
156echo -e "${lBlue}Cloning the ${lCyan}$baseImage.qcow2${lBlue} to ${lCyan}$iVmName.qcow2${NC}"
157virsh vol-clone "${baseImage}.qcow2" "${iVmName}.qcow2" default
158
159# Create the xml file and define the VM for virsh
160echo -e "${lBlue}Defining the ${lCyan}$iVmName${lBlue} virtual machine${NC}"
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400161cat vmTemplate.xml | sed -e "s/{{ VMName }}/$iVmName/g" | sed -e "s/{{ VMNetwork }}/$iVmNetwork/g" > tmp.xml
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400162
163virsh define tmp.xml
164
165rm tmp.xml
166
167# Start the VMm, if it's already running just ignore the error
168echo -e "${lBlue}Starting the ${lCyan}$iVmName${lBlue} virtual machine${NC}"
169virsh start $iVmName > /dev/null 2>&1
170
171# Generate a keypair for communicating with the VM
172echo -e "${lBlue}Generating the key-pair for communication with the VM${NC}"
173ssh-keygen -f ./key -t rsa -N ''
174
175mv key key.pem
176
177# Clone BashLogin.sh and add the public key to it for later use.
178echo -e "${lBlue}Creating the pre-configuration script${NC}"
179cp BashLogin.sh bash_login.sh
180echo "cat <<HERE > .ssh/authorized_keys" >> bash_login.sh
181cat key.pub >> bash_login.sh
182echo "HERE" >> bash_login.sh
183echo "chmod 400 .ssh/authorized_keys" >> bash_login.sh
184echo "rm .bash_login" >> bash_login.sh
185echo "logout" >> bash_login.sh
186rm key.pub
187
188
189
190# Get the VM's IP address
191ctr=0
192ipAddr=""
193while [ -z "$ipAddr" ];
194do
195 echo -e "${lBlue}Waiting for the VM's IP address${NC}"
196 ipAddr=`virsh domifaddr $iVmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
197 sleep 3
198 if [ $ctr -eq $ipTimeout ]; then
199 echo -e "${red}Tired of waiting, please adjust the ipTimeout if the VM is slow to start${NC}"
200 exit
201 fi
202 ctr=`expr $ctr + 1`
203done
204
205echo -e "${lBlue}The IP address is: ${lCyan}$ipAddr${NC}"
206
207# Copy the pre-config file to the VM
208echo -e "${lBlue}Transfering pre-configuration script to the VM${NC}"
209scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no bash_login.sh vinstall@$ipAddr:.bash_login
210
211rm bash_login.sh
212
213# Run the pre-config file on the VM
214echo -e "${lBlue}Running the pre-configuration script on the VM${NC}"
215ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no vinstall@$ipAddr
216
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400217# If we're in test mode, change the hostname of the installer vm
Sergio Slobodrian61287792017-06-27 12:14:05 -0400218# also start the 3 vagrant target VMs
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400219if [ "$testMode" == "yes" ]; then
220 echo -e "${lBlue}Test mode, change the installer host name to ${lCyan}vinstall${uId}${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400221 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr \
222 sudo hostnamectl set-hostname vinstall${uId}
223 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr \
224 sudo service networking restart
Sergio Slobodrian61287792017-06-27 12:14:05 -0400225
226 echo -e "${lBlue}Testing, start the ${lCyan}ha-serv${lBlue} VMs${NC}"
227 vagrant destroy ha-serv${uId}-{1,2,3}
228 vagrant up ha-serv${uId}-{1,2,3}
229 ./devSetHostList.sh
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400230fi
231
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400232# Ensure that the voltha VM is running so that images can be secured
233echo -e "${lBlue}Ensure that the ${lCyan}voltha VM${lBlue} is running${NC}"
234vVM=`virsh list | grep voltha_voltha${uId}`
235
236if [ -z "$vVM" ]; then
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400237 if [ "$testMode" == "yes" ]; then
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400238 ./BuildVoltha.sh $1
Sergio Slobodrian61287792017-06-27 12:14:05 -0400239 rtrn=$?
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400240 else
241 # Default to installer mode
242 ./BuildVoltha.sh install
Sergio Slobodrian61287792017-06-27 12:14:05 -0400243 rtrn=$?
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400244 fi
245 if [ $rtrn -ne 0 ]; then
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400246 echo -e "${red}Voltha build failed!! ${lCyan}Please review the log and correct${lBlue} is running${NC}"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400247 exit 1
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400248 fi
249fi
250
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400251# Extract all the image names and tags from the running voltha VM
Sergio Slobodrian61287792017-06-27 12:14:05 -0400252# when running in test mode. This will provide the entire suite
253# of available containers to the VM cluster.
254
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400255if [ "$testMode" == "yes" ]; then
Sergio Slobodrian61287792017-06-27 12:14:05 -0400256 echo -e "${lBlue}Extracting the docker image list from the voltha VM${NC}"
257 volIpAddr=`virsh domifaddr $vVmName${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
258 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ../.vagrant/machines/voltha${uId}/libvirt/private_key vagrant@$volIpAddr "docker image ls" > images.tmp
259 cat images.tmp | grep -v 5000 | tail -n +2 | awk '{printf(" - %s:%s\n", $1, $2)}' > image-list.cfg
260 rm -f images.tmp
261 sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all
262 echo "voltha_containers:" >> ansible/group_vars/all
263 cat image-list.cfg >> ansible/group_vars/all
264 rm -f image-list.cfg
265else
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400266 echo -e "${lBlue}Set up the docker image list from ${lCyan}containers.cfg${NC}"
Sergio Slobodrian61287792017-06-27 12:14:05 -0400267 sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all
268 cat containers.cfg >> ansible/group_vars/all
269fi
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400270
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400271# Install python which is required for ansible
272echo -e "${lBlue}Installing python${NC}"
273ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get update
274ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get -y install python
275
276# Make sure the VM is up-to-date
277echo -e "${lBlue}Ensure that the VM is up-to-date${NC}"
278ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get update
279ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get -y upgrade
280
281
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400282# Create the docker.cfg file in the ansible tree using the VMs IP address
283echo 'DOCKER_OPTS="$DOCKER_OPTS --insecure-registry '$ipAddr':5000 -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --registry-mirror=http://'$ipAddr':5001"' > ansible/roles/docker/templates/docker.cfg
284
285# Add the voltha vm's information to the ansible tree
286echo -e "${lBlue}Add the voltha vm and key to the ansible accessible hosts${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400287vIpAddr=`virsh domifaddr voltha_voltha${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400288echo "[voltha]" > ansible/hosts/voltha
289echo $vIpAddr >> ansible/hosts/voltha
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400290echo "ansible_ssh_private_key_file: $wd/../.vagrant/machines/voltha${uId}/libvirt/private_key" > ansible/host_vars/$vIpAddr
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400291
292
293# Prepare to launch the ansible playbook to configure the installer VM
294echo -e "${lBlue}Prepare to launch the ansible playbook to configure the VM${NC}"
295echo "[installer]" > ansible/hosts/installer
296echo "$ipAddr" >> ansible/hosts/installer
297echo "ansible_ssh_private_key_file: $wd/key.pem" > ansible/host_vars/$ipAddr
298
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400299# Launch the ansible playbooks
300
301echo -e "${lBlue}Launching the ${lCyan}volthainstall${lBlue} ansible playbook on the installer vm${NC}"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400302ansible-playbook ansible/volthainstall.yml -i ansible/hosts/installer
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400303rtrn=$?
304if [ $rtrn -ne 0 ]; then
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400305 echo -e "${red}PLAYBOOK FAILED, Exiting${NC}"
306 exit
307fi
308
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400309
310echo -e "${lBlue}Launching the ${lCyan}volthainstall${lBlue} ansible playbook on the voltha vm${NC}"
311ansible-playbook ansible/volthainstall.yml -i ansible/hosts/voltha
312rtrn=$?
313if [ $rtrn -ne 0 ]; then
314 echo -e "${red}PLAYBOOK FAILED, Exiting${NC}"
315 exit
316fi
317
318if [ "$testMode" == "yes" ]; then
Sergio Slobodriand24189e2017-06-10 23:27:15 -0400319 echo -e "${lBlue}Testing, the install image ${red}WILL NOT${lBlue} be built${NC}"
Sergio Slobodrian61287792017-06-27 12:14:05 -0400320
321
322 # Reboot the installer
323 echo -e "${lBlue}Rebooting the installer${NC}"
324 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo telinit 6
325 # Wait for the host to shut down
326 sleep 5
327
328 ctr=0
329 ipAddr=""
330 while [ -z "$ipAddr" ];
331 do
332 echo -e "${lBlue}Waiting for the VM's IP address${NC}"
333 ipAddr=`virsh domifaddr $iVmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
334 sleep 3
335 if [ $ctr -eq $ipTimeout ]; then
336 echo -e "${red}Tired of waiting, please adjust the ipTimeout if the VM is slow to start${NC}"
337 exit
338 fi
339 ctr=`expr $ctr + 1`
340 done
341
342 echo -e "${lBlue}Running the installer${NC}"
343 echo "~/installer.sh" > tmp_bash_login
344 echo "rm ~/.bash_login" >> tmp_bash_login
345 echo "logout" >> tmp_bash_login
346 scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem tmp_bash_login vinstall@$ipAddr:.bash_login
347 rm -f tmp_bash_login
348 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr
349
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400350else
351 echo -e "${lBlue}Building, the install image (this can take a while)${NC}"
352 # Create a temporary directory for all the installer files
353 mkdir tmp_installer
354 cp vmTemplate.xml tmp_installer
355 # Shut down the installer vm
356 ctr=0
357 vStat=`virsh list | grep $iVmName`
358 virsh shutdown $iVmName
359 while [ ! -z "$vStat" ];
360 do
361 echo "Waiting for $iVmName to shut down"
362 sleep 2
363 vStat=`virsh list | grep $iVmName`
364 ctr=`expr $ctr + 1`
365 if [ $ctr -eq $shutdownTimeout ]; then
366 echo -e "${red}Tired of waiting, forcing the VM off${NC}"
367 virsh destroy $iVmName
368 vStat=`virsh list | grep $iVmName`
369 fi
370 done
371 # Copy the install bootstrap script to the installer directory
372 cp BootstrapInstaller.sh tmp_installer
373 # Copy the private key to access the VM
374 cp key.pem tmp_installer
375 pushd tmp_installer > /dev/null 2>&1
376 # Copy the vm image to the installer directory
377 virsh vol-dumpxml $iVmName.qcow2 default | sed -e 's/<key.*key>//' | sed -e '/^[ ]*$/d' > ${iVmName}_volume.xml
378 virsh pool-create-as installer --type dir --target `pwd`
379 virsh vol-create-from installer ${iVmName}_volume.xml $iVmName.qcow2 --inputpool default
380 virsh pool-destroy installer
381 # The image is copied in as root. It needs to have ownership changed
382 # this will result in a password prompt.
383 sudo chown `whoami`.`whoami` $iVmName.qcow2
384 # Now create the installer tar file
385 tar cjf ../$installerArchive .
386 popd > /dev/null 2>&1
387 # Clean up
388 rm -fr tmp_installer
389 # Final location for the installer
390 rm -fr $installerDirectory
391 mkdir $installerDirectory
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400392 cp deployInstaller.sh $installerDirectory
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400393 # Check the image size and determine if it needs to be split.
394 # To be safe, split the image into chunks smaller than 2G so that
395 # it will fit on a FAT32 volume.
396 fSize=`ls -l $installerArchive | awk '{print $5'}`
397 if [ $fSize -gt 2000000000 ]; then
398 echo -e "${lBlue}Installer file too large, breaking into parts${NC}"
399 # The file is too large, breaking it up into parts
400 sPos=0
401 fnn="00"
402 while dd if=$installerArchive of=${installerDirectory}/${installerPart}$fnn \
403 bs=1900MB count=1 skip=$sPos > /dev/null 2>&1
404 do
405 sPos=`expr $sPos + 1`
406 if [ ! -s ${installerDirectory}/${installerPart}$fnn ]; then
407 rm -f ${installerDirectory}/${installerPart}$fnn
408 break
409 fi
410 if [ $sPos -lt 10 ]; then
411 fnn="0$sPos"
412 else
413 fnn="$sPos"
414 fi
415 done
416 else
417 cp $installerArchive $installerDirectory
418 fi
419 # Clean up
420 rm $installerArchive
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400421 echo -e "${lBlue}The install image is built and can be found in ${lCyan}$installerDirectory${NC}"
422 echo -e "${lBlue}Copy all the files in ${lCyan}$installerDirectory${lBlue} to the traasnport media${NC}"
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400423fi