blob: 870dfa80764d0d349726350fdfec365375a34b64 [file] [log] [blame]
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -04001#!/bin/bash
2
Sergio Slobodrian7c483622017-06-13 15:51:34 -04003
Sergio Slobodrianc5477712017-06-07 11:56:56 -04004iVmName="vInstaller"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -04005vVmName="voltha_voltha"
Sergio Slobodrian7c483622017-06-13 15:51:34 -04006baseImage="Ubuntu1604LTS"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -04007iVmNetwork="vagrant-libvirt"
Sergio Slobodrianc5477712017-06-07 11:56:56 -04008installerArchive="installer.tar.bz2"
9installerDirectory="volthaInstaller"
10installerPart="installer.part"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040011shutdownTimeout=5
12ipTimeout=10
13
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040014# Command line argument variables
15testMode="no"
Sergio Slobodriancab0a392017-07-13 08:42:10 -040016rebuildVoltha="no"
Stephane Barbarie2cbffca2018-03-26 16:20:03 -040017useKubernetes="no"
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040018
19
20
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040021lBlue='\033[1;34m'
22green='\033[0;32m'
23orange='\033[0;33m'
24NC='\033[0m'
25red='\033[0;31m'
26yellow='\033[1;33m'
27dGrey='\033[1;30m'
28lGrey='\033[1;37m'
29lCyan='\033[1;36m'
30
Sergio Slobodrian7c483622017-06-13 15:51:34 -040031uId=`id -u`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040032wd=`pwd`
33
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040034parse_args()
35{
36 for i in $@
37 do
38 case "$i" in
39 "test" )
40 testMode="yes"
41 echo -e "${lBlue}Test mode is ${green}enabled${NC}"
42 ;;
Sergio Slobodriancab0a392017-07-13 08:42:10 -040043 "rebuild" )
44 rebuildVoltha="yes"
45 echo -e "${lBlue}Voltha rebuild is ${green}enabled${NC}"
46 ;;
Stephane Barbarie2cbffca2018-03-26 16:20:03 -040047 "k8s" )
48 useKubernetes="yes"
49 echo -e "${lBlue}Kubernetes framework is ${green}enabled${NC}"
50 ;;
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040051 esac
52 done
53}
54
55
56######################################
57# MAIN MAIN MAIN MAIN MAIN MAIN MAIN #
58######################################
59parse_args $@
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040060# Validate that vagrant is installed.
61echo -e "${lBlue}Ensure that ${lCyan}vagrant${lBlue} is installed${NC}"
62vInst=`which vagrant`
63
64if [ -z "$vInst" ]; then
65 wget https://releases.hashicorp.com/vagrant/1.9.5/vagrant_1.9.5_x86_64.deb
66 sudo dpkg -i vagrant_1.8.5_x86_64.deb
67 rm vagrant_1.8.5_x86_64.deb
68fi
69unset vInst
70
71# Validate that ansible is installed
72echo -e "${lBlue}Ensure that ${lCyan}ansible${lBlue} is installed${NC}"
73aInst=`which ansible`
74
75if [ -z "$aInst" ]; then
Sergio Slobodrianc5477712017-06-07 11:56:56 -040076 sudo apt-get install -y software-properties-common
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040077 sudo apt-add-repository ppa:ansible/ansible
78 sudo apt-get update
Sergio Slobodrianc5477712017-06-07 11:56:56 -040079 sudo apt-get install -y ansible
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040080fi
81unset vInst
82
Sergio Slobodrian61287792017-06-27 12:14:05 -040083# Verify if this is intended to be a test environment, if so
84# configure the 3 VMs which will be started later to emulate
85# the production installation cluster.
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040086if [ "$testMode" == "yes" ]; then
87 echo -e "${lBlue}Test mode ${green}enabled${lBlue}, configure the ${lCyan}ha-serv${lBlue} VMs${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -040088 # Update the vagrant settings file
89 sed -i -e '/server_name/s/.*/server_name: "ha-serv'${uId}'-"/' settings.vagrant.yaml
90 sed -i -e '/docker_push_registry/s/.*/docker_push_registry: "vinstall'${uId}':5000"/' ansible/group_vars/all
91 sed -i -e "/vinstall/s/vinstall/vinstall${uId}/" ../ansible/roles/docker/templates/daemon.json
92
93 # Set the insecure registry configuration based on the installer hostname
Sergio Slobodrianf74fa072017-06-28 09:33:24 -040094 echo -e "${lBlue}Set up the insecure registry config for hostname ${lCyan}vinstall${uId}${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -040095 echo '{' > ansible/roles/voltha/templates/daemon.json
96 echo '"insecure-registries" : ["vinstall'${uId}':5000"]' >> ansible/roles/voltha/templates/daemon.json
97 echo '}' >> ansible/roles/voltha/templates/daemon.json
98
Sergio Slobodrian7c483622017-06-13 15:51:34 -040099 # Change the installer name
100 iVmName="vInstaller${uId}"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400101else
102 rm -fr .test
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400103 # Clean out the install config file keeping only the commented lines
104 # which serve as documentation.
105 sed -i -e '/^#/!d' install.cfg
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400106 # Set the insecure registry configuration based on the installer hostname
Sergio Slobodrian497e6e82017-07-18 15:10:38 -0400107 echo -e "${lBlue}Set up the inescure registry config for hostname ${lCyan}vinstall${NC}"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400108 sed -i -e '/docker_push_registry/s/.*/docker_push_registry: "vinstall:5000"/' ansible/group_vars/all
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400109 echo '{' > ansible/roles/voltha/templates/daemon.json
110 echo '"insecure-registries" : ["vinstall:5000"]' >> ansible/roles/voltha/templates/daemon.json
111 echo '}' >> ansible/roles/voltha/templates/daemon.json
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400112fi
113
Sergio Slobodrian73e311a2017-07-26 11:12:45 -0400114# Check to make sure that the vagrant-libvirt network is both defined and started
115echo -e "${lBlue}Verify tha the ${lCyan}vagrant-libvirt${lBlue} network is defined and started${NC}"
Sergio Slobodrian7c5e8852017-07-31 20:17:14 -0400116virsh net-list --all | grep "vagrant-libvirt" > /dev/null
Sergio Slobodrian73e311a2017-07-26 11:12:45 -0400117rtrn=$?
118if [ $rtrn -eq 1 ]; then
Sergio Slobodrian7c5e8852017-07-31 20:17:14 -0400119 # Not defined
120 echo -e "${lBlue}Defining the ${lCyan}vagrant-libvirt${lBlue} network${NC}"
121 virsh net-define vagrant-libvirt.xml
122 echo -e "${lBlue}Starting the ${lCyan}vagrant-libvirt${lBlue} network${NC}"
123 virsh net-start vagrant-libvirt
124else
125 virsh net-list | grep "vagrant-libvirt" > /dev/null
Sergio Slobodrian73e311a2017-07-26 11:12:45 -0400126 rtrn=$?
127 if [ $rtrn -eq 1 ]; then
Sergio Slobodrian73e311a2017-07-26 11:12:45 -0400128 # Defined but not started
129 echo -e "${lBlue}Starting the ${lCyan}vagrant-libvirt${lBlue} network${NC}"
130 virsh net-start vagrant-libvirt
Sergio Slobodrian7c5e8852017-07-31 20:17:14 -0400131
132 else
133 # Defined and running
134 echo -e "${lBlue}The ${lCyan}vagrant-libvirt${lBlue} network is ${green} running${NC}"
Sergio Slobodrian73e311a2017-07-26 11:12:45 -0400135 fi
Sergio Slobodrian7c5e8852017-07-31 20:17:14 -0400136fi
137
138# Check that the default storage pool exists and create it if it doesn't
139virsh pool-list --all | grep default > /dev/null
140rtrn=$?
141if [ $rtrn -eq 1 ]; then
142 # Not defined
143 echo -e "${lBlue}Defining the ${lCyan}defaul${lBlue} storage pool${NC}"
144 virsh pool-define-as --name default --type dir --target /var/lib/libvirt/images/
145 virsh pool-autostart default
146 echo -e "${lBlue}Starting the ${lCyan}defaul${lBlue} storage pool${NC}"
147 virsh pool-start default
Sergio Slobodrian73e311a2017-07-26 11:12:45 -0400148else
Sergio Slobodrian7c5e8852017-07-31 20:17:14 -0400149 virsh pool-list | grep default > /dev/null
150 rtrn=$?
151 if [ $rtrn -eq 1 ]; then
152 # Defined but not started
153 echo -e "${lBlue}Starting the ${lCyan}defaul${lBlue} storage pool${NC}"
154 virsh pool-start default
155 else
156 # Defined and running
157 echo -e "${lBlue}The ${lCyan}default${lBlue} storage pool ${green} running${NC}"
158 fi
Sergio Slobodrian73e311a2017-07-26 11:12:45 -0400159fi
160
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400161
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400162# Shut down the domain in case it's running.
163echo -e "${lBlue}Shut down the ${lCyan}$iVmName${lBlue} VM if running${NC}"
164ctr=0
165vStat=`virsh list | grep $iVmName`
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400166virsh shutdown $iVmName
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400167while [ ! -z "$vStat" ];
168do
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400169 echo "Waiting for $iVmName to shut down"
170 sleep 2
Sergio Slobodrian497e6e82017-07-18 15:10:38 -0400171 vStat=`virsh list | grep "$iVmName "`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400172 ctr=`expr $ctr + 1`
173 if [ $ctr -eq $shutdownTimeout ]; then
174 echo -e "${red}Tired of waiting, forcing the VM off${NC}"
175 virsh destroy $iVmName
Sergio Slobodrian497e6e82017-07-18 15:10:38 -0400176 vStat=`virsh list | grep "$iVmName "`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400177 fi
178done
179
180
181# Delete the VM and ignore any errors should they occur
182echo -e "${lBlue}Undefining the ${lCyan}$iVmName${lBlue} domain${NC}"
183virsh undefine $iVmName
184
185# Remove the associated volume
186echo -e "${lBlue}Removing the ${lCyan}$iVmName.qcow2${lBlue} volume${NC}"
187virsh vol-delete "${iVmName}.qcow2" default
188
189# Clone the base vanilla ubuntu install
190echo -e "${lBlue}Cloning the ${lCyan}$baseImage.qcow2${lBlue} to ${lCyan}$iVmName.qcow2${NC}"
191virsh vol-clone "${baseImage}.qcow2" "${iVmName}.qcow2" default
192
193# Create the xml file and define the VM for virsh
194echo -e "${lBlue}Defining the ${lCyan}$iVmName${lBlue} virtual machine${NC}"
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400195cat vmTemplate.xml | sed -e "s/{{ VMName }}/$iVmName/g" | sed -e "s/{{ VMNetwork }}/$iVmNetwork/g" > tmp.xml
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400196
197virsh define tmp.xml
198
199rm tmp.xml
200
201# Start the VMm, if it's already running just ignore the error
202echo -e "${lBlue}Starting the ${lCyan}$iVmName${lBlue} virtual machine${NC}"
203virsh start $iVmName > /dev/null 2>&1
204
205# Generate a keypair for communicating with the VM
206echo -e "${lBlue}Generating the key-pair for communication with the VM${NC}"
207ssh-keygen -f ./key -t rsa -N ''
208
209mv key key.pem
210
211# Clone BashLogin.sh and add the public key to it for later use.
212echo -e "${lBlue}Creating the pre-configuration script${NC}"
213cp BashLogin.sh bash_login.sh
214echo "cat <<HERE > .ssh/authorized_keys" >> bash_login.sh
215cat key.pub >> bash_login.sh
216echo "HERE" >> bash_login.sh
217echo "chmod 400 .ssh/authorized_keys" >> bash_login.sh
218echo "rm .bash_login" >> bash_login.sh
219echo "logout" >> bash_login.sh
220rm key.pub
221
222
223
224# Get the VM's IP address
225ctr=0
226ipAddr=""
227while [ -z "$ipAddr" ];
228do
229 echo -e "${lBlue}Waiting for the VM's IP address${NC}"
230 ipAddr=`virsh domifaddr $iVmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
231 sleep 3
232 if [ $ctr -eq $ipTimeout ]; then
233 echo -e "${red}Tired of waiting, please adjust the ipTimeout if the VM is slow to start${NC}"
234 exit
235 fi
236 ctr=`expr $ctr + 1`
237done
238
239echo -e "${lBlue}The IP address is: ${lCyan}$ipAddr${NC}"
240
241# Copy the pre-config file to the VM
242echo -e "${lBlue}Transfering pre-configuration script to the VM${NC}"
243scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no bash_login.sh vinstall@$ipAddr:.bash_login
244
245rm bash_login.sh
246
247# Run the pre-config file on the VM
248echo -e "${lBlue}Running the pre-configuration script on the VM${NC}"
249ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no vinstall@$ipAddr
250
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400251# If we're in test mode, change the hostname of the installer vm
Sergio Slobodrian61287792017-06-27 12:14:05 -0400252# also start the 3 vagrant target VMs
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400253if [ "$testMode" == "yes" ]; then
254 echo -e "${lBlue}Test mode, change the installer host name to ${lCyan}vinstall${uId}${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400255 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr \
256 sudo hostnamectl set-hostname vinstall${uId}
257 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr \
258 sudo service networking restart
Sergio Slobodrian61287792017-06-27 12:14:05 -0400259
260 echo -e "${lBlue}Testing, start the ${lCyan}ha-serv${lBlue} VMs${NC}"
261 vagrant destroy ha-serv${uId}-{1,2,3}
262 vagrant up ha-serv${uId}-{1,2,3}
263 ./devSetHostList.sh
Stephane Barbarie2cbffca2018-03-26 16:20:03 -0400264
265 if [ "$useKubernetes" == "yes" ]; then
266 ./devSetKubernetes.sh
267 fi
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400268fi
269
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400270# Ensure that the voltha VM is running so that images can be secured
271echo -e "${lBlue}Ensure that the ${lCyan}voltha VM${lBlue} is running${NC}"
Sergio Slobodriancab0a392017-07-13 08:42:10 -0400272vVm=`virsh list | grep "voltha_voltha${uId}"`
273#echo "vVm: $vVm"
274#echo "rebuildVoltha: $rebuildVoltha"
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400275
Sergio Slobodriancab0a392017-07-13 08:42:10 -0400276
277if [ -z "$vVm" -o "$rebuildVoltha" == "yes" ]; then
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400278 if [ "$testMode" == "yes" ]; then
Sergio Slobodrian92136d02017-08-22 21:48:42 -0400279 ./BuildVoltha.sh "test"
Sergio Slobodrian61287792017-06-27 12:14:05 -0400280 rtrn=$?
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400281 else
282 # Default to installer mode
Sergio Slobodrian92136d02017-08-22 21:48:42 -0400283 ./BuildVoltha.sh "install"
Sergio Slobodrian61287792017-06-27 12:14:05 -0400284 rtrn=$?
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400285 fi
286 if [ $rtrn -ne 0 ]; then
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400287 echo -e "${red}Voltha build failed!! ${lCyan}Please review the log and correct${lBlue} is running${NC}"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400288 exit 1
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400289 fi
Stephane Barbarie2cbffca2018-03-26 16:20:03 -0400290
291 if [ "$useKubernetes" == "yes" ]; then
292 # Load required k8s libraries on the voltha instance
293 ./preloadKubernetes.sh
294 fi
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400295fi
296
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400297# Extract all the image names and tags from the running voltha VM
Sergio Slobodrian61287792017-06-27 12:14:05 -0400298# when running in test mode. This will provide the entire suite
299# of available containers to the VM cluster.
300
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400301if [ "$testMode" == "yes" ]; then
Sergio Slobodrian61287792017-06-27 12:14:05 -0400302 echo -e "${lBlue}Extracting the docker image list from the voltha VM${NC}"
303 volIpAddr=`virsh domifaddr $vVmName${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
304 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ../.vagrant/machines/voltha${uId}/libvirt/private_key vagrant@$volIpAddr "docker image ls" > images.tmp
Stephane Barbarie2cbffca2018-03-26 16:20:03 -0400305 # Construct list of images; exclude all entries that point to the registry
306 cat images.tmp | grep -v :5000 | tail -n +2 | awk '{printf(" - %s:%s\n", $1, $2)}' | grep -v "<none>" > image-list.cfg
Sergio Slobodrian61287792017-06-27 12:14:05 -0400307 rm -f images.tmp
308 sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all
309 echo "voltha_containers:" >> ansible/group_vars/all
310 cat image-list.cfg >> ansible/group_vars/all
311 rm -f image-list.cfg
Sergio Slobodrian6e270c12017-08-09 23:06:49 -0400312 echo -e "${lBlue}Gussing at the cord home directory for ${yellow}`whoami`${NC}"
313 sed -i -e "/cord_home:/s#.*#cord_home: `pwd | sed -e 's~/incubator/voltha/install~~'`#" ansible/group_vars/all
Sergio Slobodrian61287792017-06-27 12:14:05 -0400314else
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400315 echo -e "${lBlue}Set up the docker image list from ${lCyan}containers.cfg${NC}"
Sergio Slobodrian61287792017-06-27 12:14:05 -0400316 sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all
Stephane Barbarie2cbffca2018-03-26 16:20:03 -0400317
318 if [ "$useKubernetes" == "yes" ]; then
319 cat containers.cfg.k8s >> ansible/group_vars/all
320 else
321 cat containers.cfg >> ansible/group_vars/all
322 fi
Sergio Slobodrian61287792017-06-27 12:14:05 -0400323fi
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400324
Sergio Slobodrian9d9c8442017-07-25 07:55:42 -0400325
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400326# Install python which is required for ansible
Sergio Slobodrian9d9c8442017-07-25 07:55:42 -0400327echo -e "${lBlue}Installing ${lCyan}Python${NC}"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400328ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get update
Stephane Barbarie2cbffca2018-03-26 16:20:03 -0400329ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get -y install python python-netaddr
Sergio Slobodrian9d9c8442017-07-25 07:55:42 -0400330
331# Move all the python deb files to their own directory so they can be installed first
332echo -e "${lBlue}Caching ${lCyan}Python${lBlue} install${NC}"
333ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr mkdir python-deb
334ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr "sudo mv /var/cache/apt/archives/*.deb /home/vinstall/python-deb"
335ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr "sudo chown -R vinstall.vinstall /home/vinstall/python-deb"
336
Stephane Barbarie2cbffca2018-03-26 16:20:03 -0400337if [ "$useKubernetes" == "yes" ]; then
338 echo -e "${lBlue}Cloning ${lCyan}Kubespray${lBlue} repository${NC}"
339 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr "git clone --branch v2.4.0 https://github.com/kubernetes-incubator/kubespray.git /home/vinstall/kubespray"
340 #ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr "git clone https://github.com/kubernetes-incubator/kubespray.git /home/vinstall/kubespray"
341 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr "sudo chown -R vinstall.vinstall /home/vinstall/kubespray"
342fi
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400343
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400344# Create the docker.cfg file in the ansible tree using the VMs IP address
345echo 'DOCKER_OPTS="$DOCKER_OPTS --insecure-registry '$ipAddr':5000 -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --registry-mirror=http://'$ipAddr':5001"' > ansible/roles/docker/templates/docker.cfg
346
347# Add the voltha vm's information to the ansible tree
348echo -e "${lBlue}Add the voltha vm and key to the ansible accessible hosts${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400349vIpAddr=`virsh domifaddr voltha_voltha${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400350echo "[voltha]" > ansible/hosts/voltha
351echo $vIpAddr >> ansible/hosts/voltha
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400352echo "ansible_ssh_private_key_file: $wd/../.vagrant/machines/voltha${uId}/libvirt/private_key" > ansible/host_vars/$vIpAddr
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400353
354
355# Prepare to launch the ansible playbook to configure the installer VM
356echo -e "${lBlue}Prepare to launch the ansible playbook to configure the VM${NC}"
357echo "[installer]" > ansible/hosts/installer
358echo "$ipAddr" >> ansible/hosts/installer
359echo "ansible_ssh_private_key_file: $wd/key.pem" > ansible/host_vars/$ipAddr
360
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400361# Launch the ansible playbooks
362
363echo -e "${lBlue}Launching the ${lCyan}volthainstall${lBlue} ansible playbook on the installer vm${NC}"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400364ansible-playbook ansible/volthainstall.yml -i ansible/hosts/installer
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400365rtrn=$?
366if [ $rtrn -ne 0 ]; then
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400367 echo -e "${red}PLAYBOOK FAILED, Exiting${NC}"
368 exit
369fi
370
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400371
372echo -e "${lBlue}Launching the ${lCyan}volthainstall${lBlue} ansible playbook on the voltha vm${NC}"
373ansible-playbook ansible/volthainstall.yml -i ansible/hosts/voltha
374rtrn=$?
375if [ $rtrn -ne 0 ]; then
376 echo -e "${red}PLAYBOOK FAILED, Exiting${NC}"
377 exit
378fi
379
380if [ "$testMode" == "yes" ]; then
Sergio Slobodriand24189e2017-06-10 23:27:15 -0400381 echo -e "${lBlue}Testing, the install image ${red}WILL NOT${lBlue} be built${NC}"
Sergio Slobodrian61287792017-06-27 12:14:05 -0400382
383
384 # Reboot the installer
385 echo -e "${lBlue}Rebooting the installer${NC}"
386 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo telinit 6
387 # Wait for the host to shut down
388 sleep 5
389
390 ctr=0
391 ipAddr=""
392 while [ -z "$ipAddr" ];
393 do
394 echo -e "${lBlue}Waiting for the VM's IP address${NC}"
395 ipAddr=`virsh domifaddr $iVmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
396 sleep 3
397 if [ $ctr -eq $ipTimeout ]; then
398 echo -e "${red}Tired of waiting, please adjust the ipTimeout if the VM is slow to start${NC}"
399 exit
400 fi
401 ctr=`expr $ctr + 1`
402 done
403
404 echo -e "${lBlue}Running the installer${NC}"
405 echo "~/installer.sh" > tmp_bash_login
406 echo "rm ~/.bash_login" >> tmp_bash_login
407 echo "logout" >> tmp_bash_login
408 scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem tmp_bash_login vinstall@$ipAddr:.bash_login
409 rm -f tmp_bash_login
410 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr
411
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400412else
413 echo -e "${lBlue}Building, the install image (this can take a while)${NC}"
414 # Create a temporary directory for all the installer files
415 mkdir tmp_installer
416 cp vmTemplate.xml tmp_installer
417 # Shut down the installer vm
418 ctr=0
419 vStat=`virsh list | grep $iVmName`
420 virsh shutdown $iVmName
421 while [ ! -z "$vStat" ];
422 do
423 echo "Waiting for $iVmName to shut down"
424 sleep 2
Sergio Slobodrian497e6e82017-07-18 15:10:38 -0400425 vStat=`virsh list | grep "$iVmName "`
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400426 ctr=`expr $ctr + 1`
427 if [ $ctr -eq $shutdownTimeout ]; then
428 echo -e "${red}Tired of waiting, forcing the VM off${NC}"
429 virsh destroy $iVmName
Sergio Slobodrian497e6e82017-07-18 15:10:38 -0400430 vStat=`virsh list | grep "$iVmName "`
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400431 fi
432 done
433 # Copy the install bootstrap script to the installer directory
434 cp BootstrapInstaller.sh tmp_installer
435 # Copy the private key to access the VM
436 cp key.pem tmp_installer
437 pushd tmp_installer > /dev/null 2>&1
438 # Copy the vm image to the installer directory
439 virsh vol-dumpxml $iVmName.qcow2 default | sed -e 's/<key.*key>//' | sed -e '/^[ ]*$/d' > ${iVmName}_volume.xml
440 virsh pool-create-as installer --type dir --target `pwd`
441 virsh vol-create-from installer ${iVmName}_volume.xml $iVmName.qcow2 --inputpool default
442 virsh pool-destroy installer
443 # The image is copied in as root. It needs to have ownership changed
444 # this will result in a password prompt.
445 sudo chown `whoami`.`whoami` $iVmName.qcow2
446 # Now create the installer tar file
447 tar cjf ../$installerArchive .
448 popd > /dev/null 2>&1
449 # Clean up
450 rm -fr tmp_installer
451 # Final location for the installer
452 rm -fr $installerDirectory
453 mkdir $installerDirectory
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400454 cp deployInstaller.sh $installerDirectory
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400455 # Check the image size and determine if it needs to be split.
456 # To be safe, split the image into chunks smaller than 2G so that
457 # it will fit on a FAT32 volume.
458 fSize=`ls -l $installerArchive | awk '{print $5'}`
459 if [ $fSize -gt 2000000000 ]; then
460 echo -e "${lBlue}Installer file too large, breaking into parts${NC}"
461 # The file is too large, breaking it up into parts
462 sPos=0
463 fnn="00"
464 while dd if=$installerArchive of=${installerDirectory}/${installerPart}$fnn \
465 bs=1900MB count=1 skip=$sPos > /dev/null 2>&1
466 do
467 sPos=`expr $sPos + 1`
468 if [ ! -s ${installerDirectory}/${installerPart}$fnn ]; then
469 rm -f ${installerDirectory}/${installerPart}$fnn
470 break
471 fi
472 if [ $sPos -lt 10 ]; then
473 fnn="0$sPos"
474 else
475 fnn="$sPos"
476 fi
477 done
478 else
479 cp $installerArchive $installerDirectory
480 fi
481 # Clean up
482 rm $installerArchive
Sergio Slobodrianf74fa072017-06-28 09:33:24 -0400483 echo -e "${lBlue}The install image is built and can be found in ${lCyan}$installerDirectory${NC}"
484 echo -e "${lBlue}Copy all the files in ${lCyan}$installerDirectory${lBlue} to the traasnport media${NC}"
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400485fi