blob: cabc1e8c4fbb880214270cc24a00210e968e862d [file] [log] [blame]
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -04001#!/bin/bash
2
Sergio Slobodrian7c483622017-06-13 15:51:34 -04003
Sergio Slobodrianc5477712017-06-07 11:56:56 -04004iVmName="vInstaller"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -04005vVmName="voltha_voltha"
Sergio Slobodrian7c483622017-06-13 15:51:34 -04006baseImage="Ubuntu1604LTS"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -04007iVmNetwork="vagrant-libvirt"
Sergio Slobodrianc5477712017-06-07 11:56:56 -04008installerArchive="installer.tar.bz2"
9installerDirectory="volthaInstaller"
10installerPart="installer.part"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040011shutdownTimeout=5
12ipTimeout=10
13
14lBlue='\033[1;34m'
15green='\033[0;32m'
16orange='\033[0;33m'
17NC='\033[0m'
18red='\033[0;31m'
19yellow='\033[1;33m'
20dGrey='\033[1;30m'
21lGrey='\033[1;37m'
22lCyan='\033[1;36m'
23
Sergio Slobodrian7c483622017-06-13 15:51:34 -040024uId=`id -u`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040025wd=`pwd`
26
27# Validate that vagrant is installed.
28echo -e "${lBlue}Ensure that ${lCyan}vagrant${lBlue} is installed${NC}"
29vInst=`which vagrant`
30
31if [ -z "$vInst" ]; then
32 wget https://releases.hashicorp.com/vagrant/1.9.5/vagrant_1.9.5_x86_64.deb
33 sudo dpkg -i vagrant_1.8.5_x86_64.deb
34 rm vagrant_1.8.5_x86_64.deb
35fi
36unset vInst
37
38# Validate that ansible is installed
39echo -e "${lBlue}Ensure that ${lCyan}ansible${lBlue} is installed${NC}"
40aInst=`which ansible`
41
42if [ -z "$aInst" ]; then
Sergio Slobodrianc5477712017-06-07 11:56:56 -040043 sudo apt-get install -y software-properties-common
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040044 sudo apt-add-repository ppa:ansible/ansible
45 sudo apt-get update
Sergio Slobodrianc5477712017-06-07 11:56:56 -040046 sudo apt-get install -y ansible
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040047fi
48unset vInst
49
Sergio Slobodrian61287792017-06-27 12:14:05 -040050# Verify if this is intended to be a test environment, if so
51# configure the 3 VMs which will be started later to emulate
52# the production installation cluster.
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040053if [ $# -eq 1 -a "$1" == "test" ]; then
Sergio Slobodrian61287792017-06-27 12:14:05 -040054 echo -e "${lBlue}Testing, configure the ${lCyan}ha-serv${lBlue} VMs${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -040055 # Update the vagrant settings file
56 sed -i -e '/server_name/s/.*/server_name: "ha-serv'${uId}'-"/' settings.vagrant.yaml
57 sed -i -e '/docker_push_registry/s/.*/docker_push_registry: "vinstall'${uId}':5000"/' ansible/group_vars/all
58 sed -i -e "/vinstall/s/vinstall/vinstall${uId}/" ../ansible/roles/docker/templates/daemon.json
59
60 # Set the insecure registry configuration based on the installer hostname
Sergio Slobodrian61287792017-06-27 12:14:05 -040061 echo -e "${lBlue}Set up the inescure registry config for hostname ${lCyan}vinstall${uId}${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -040062 echo '{' > ansible/roles/voltha/templates/daemon.json
63 echo '"insecure-registries" : ["vinstall'${uId}':5000"]' >> ansible/roles/voltha/templates/daemon.json
64 echo '}' >> ansible/roles/voltha/templates/daemon.json
65
Sergio Slobodrian7c483622017-06-13 15:51:34 -040066 # Change the installer name
67 iVmName="vInstaller${uId}"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040068else
69 rm -fr .test
Sergio Slobodrianc5477712017-06-07 11:56:56 -040070 # Clean out the install config file keeping only the commented lines
71 # which serve as documentation.
72 sed -i -e '/^#/!d' install.cfg
Sergio Slobodrian7c483622017-06-13 15:51:34 -040073 # Set the insecure registry configuration based on the installer hostname
Sergio Slobodrian61287792017-06-27 12:14:05 -040074 echo -e "${lBlue}Set up the inescure registry config for hostname ${lCyan}vinstall${uId}${NC}"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -040075 sed -i -e '/docker_push_registry/s/.*/docker_push_registry: "vinstall:5000"/' ansible/group_vars/all
Sergio Slobodrian7c483622017-06-13 15:51:34 -040076 echo '{' > ansible/roles/voltha/templates/daemon.json
77 echo '"insecure-registries" : ["vinstall:5000"]' >> ansible/roles/voltha/templates/daemon.json
78 echo '}' >> ansible/roles/voltha/templates/daemon.json
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040079fi
80
Sergio Slobodrian7c483622017-06-13 15:51:34 -040081
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040082# Shut down the domain in case it's running.
83echo -e "${lBlue}Shut down the ${lCyan}$iVmName${lBlue} VM if running${NC}"
84ctr=0
85vStat=`virsh list | grep $iVmName`
Sergio Slobodrianc5477712017-06-07 11:56:56 -040086virsh shutdown $iVmName
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040087while [ ! -z "$vStat" ];
88do
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040089 echo "Waiting for $iVmName to shut down"
90 sleep 2
91 vStat=`virsh list | grep $iVmName`
92 ctr=`expr $ctr + 1`
93 if [ $ctr -eq $shutdownTimeout ]; then
94 echo -e "${red}Tired of waiting, forcing the VM off${NC}"
95 virsh destroy $iVmName
96 vStat=`virsh list | grep $iVmName`
97 fi
98done
99
100
101# Delete the VM and ignore any errors should they occur
102echo -e "${lBlue}Undefining the ${lCyan}$iVmName${lBlue} domain${NC}"
103virsh undefine $iVmName
104
105# Remove the associated volume
106echo -e "${lBlue}Removing the ${lCyan}$iVmName.qcow2${lBlue} volume${NC}"
107virsh vol-delete "${iVmName}.qcow2" default
108
109# Clone the base vanilla ubuntu install
110echo -e "${lBlue}Cloning the ${lCyan}$baseImage.qcow2${lBlue} to ${lCyan}$iVmName.qcow2${NC}"
111virsh vol-clone "${baseImage}.qcow2" "${iVmName}.qcow2" default
112
113# Create the xml file and define the VM for virsh
114echo -e "${lBlue}Defining the ${lCyan}$iVmName${lBlue} virtual machine${NC}"
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400115cat vmTemplate.xml | sed -e "s/{{ VMName }}/$iVmName/g" | sed -e "s/{{ VMNetwork }}/$iVmNetwork/g" > tmp.xml
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400116
117virsh define tmp.xml
118
119rm tmp.xml
120
121# Start the VMm, if it's already running just ignore the error
122echo -e "${lBlue}Starting the ${lCyan}$iVmName${lBlue} virtual machine${NC}"
123virsh start $iVmName > /dev/null 2>&1
124
125# Generate a keypair for communicating with the VM
126echo -e "${lBlue}Generating the key-pair for communication with the VM${NC}"
127ssh-keygen -f ./key -t rsa -N ''
128
129mv key key.pem
130
131# Clone BashLogin.sh and add the public key to it for later use.
132echo -e "${lBlue}Creating the pre-configuration script${NC}"
133cp BashLogin.sh bash_login.sh
134echo "cat <<HERE > .ssh/authorized_keys" >> bash_login.sh
135cat key.pub >> bash_login.sh
136echo "HERE" >> bash_login.sh
137echo "chmod 400 .ssh/authorized_keys" >> bash_login.sh
138echo "rm .bash_login" >> bash_login.sh
139echo "logout" >> bash_login.sh
140rm key.pub
141
142
143
144# Get the VM's IP address
145ctr=0
146ipAddr=""
147while [ -z "$ipAddr" ];
148do
149 echo -e "${lBlue}Waiting for the VM's IP address${NC}"
150 ipAddr=`virsh domifaddr $iVmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
151 sleep 3
152 if [ $ctr -eq $ipTimeout ]; then
153 echo -e "${red}Tired of waiting, please adjust the ipTimeout if the VM is slow to start${NC}"
154 exit
155 fi
156 ctr=`expr $ctr + 1`
157done
158
159echo -e "${lBlue}The IP address is: ${lCyan}$ipAddr${NC}"
160
161# Copy the pre-config file to the VM
162echo -e "${lBlue}Transfering pre-configuration script to the VM${NC}"
163scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no bash_login.sh vinstall@$ipAddr:.bash_login
164
165rm bash_login.sh
166
167# Run the pre-config file on the VM
168echo -e "${lBlue}Running the pre-configuration script on the VM${NC}"
169ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no vinstall@$ipAddr
170
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400171# If we're in test mode, change the hostname of the installer vm
Sergio Slobodrian61287792017-06-27 12:14:05 -0400172# also start the 3 vagrant target VMs
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400173if [ $# -eq 1 -a "$1" == "test" ]; then
174 echo -e "${lBlue}Test mode, change the installer host name to ${yellow}vinstall${uId}${NC}"
175 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr \
176 sudo hostnamectl set-hostname vinstall${uId}
177 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr \
178 sudo service networking restart
Sergio Slobodrian61287792017-06-27 12:14:05 -0400179
180 echo -e "${lBlue}Testing, start the ${lCyan}ha-serv${lBlue} VMs${NC}"
181 vagrant destroy ha-serv${uId}-{1,2,3}
182 vagrant up ha-serv${uId}-{1,2,3}
183 ./devSetHostList.sh
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400184fi
185
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400186# Ensure that the voltha VM is running so that images can be secured
187echo -e "${lBlue}Ensure that the ${lCyan}voltha VM${lBlue} is running${NC}"
188vVM=`virsh list | grep voltha_voltha${uId}`
189
190if [ -z "$vVM" ]; then
191 if [ $# -eq 1 -a "$1" == "test" ]; then
192 ./BuildVoltha.sh $1
Sergio Slobodrian61287792017-06-27 12:14:05 -0400193 rtrn=$?
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400194 else
195 # Default to installer mode
196 ./BuildVoltha.sh install
Sergio Slobodrian61287792017-06-27 12:14:05 -0400197 rtrn=$?
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400198 fi
199 if [ $rtrn -ne 0 ]; then
200 echo -e "${red}Voltha build failed!! ${yellow}Please review the log and correct${lBlue} is running${NC}"
201 exit 1
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400202 fi
203fi
204
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400205# Extract all the image names and tags from the running voltha VM
Sergio Slobodrian61287792017-06-27 12:14:05 -0400206# when running in test mode. This will provide the entire suite
207# of available containers to the VM cluster.
208
209if [ $# -eq 1 -a "$1" == "test" ]; then
210 echo -e "${lBlue}Extracting the docker image list from the voltha VM${NC}"
211 volIpAddr=`virsh domifaddr $vVmName${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
212 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ../.vagrant/machines/voltha${uId}/libvirt/private_key vagrant@$volIpAddr "docker image ls" > images.tmp
213 cat images.tmp | grep -v 5000 | tail -n +2 | awk '{printf(" - %s:%s\n", $1, $2)}' > image-list.cfg
214 rm -f images.tmp
215 sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all
216 echo "voltha_containers:" >> ansible/group_vars/all
217 cat image-list.cfg >> ansible/group_vars/all
218 rm -f image-list.cfg
219else
220 echo -e "${lBlue}Set up the docker image list from ${yellow}containers.cfg${NC}"
221 sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all
222 cat containers.cfg >> ansible/group_vars/all
223fi
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400224
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400225# Install python which is required for ansible
226echo -e "${lBlue}Installing python${NC}"
227ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get update
228ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get -y install python
229
230# Make sure the VM is up-to-date
231echo -e "${lBlue}Ensure that the VM is up-to-date${NC}"
232ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get update
233ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get -y upgrade
234
235
236
237# Copy the apt repository to the VM because it's way too slow using ansible
238#echo -e "${red}NOT COPYING${lBlue} the apt-repository to the VM, ${red}TESTING ONLY REMOVE FOR PRODUCTION${NC}"
239#echo -e "${lBlue}Copy the apt-repository to the VM${NC}"
240#scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem -r apt-mirror vinstall@$ipAddr:apt-mirror
241
242# Create the docker.cfg file in the ansible tree using the VMs IP address
243echo 'DOCKER_OPTS="$DOCKER_OPTS --insecure-registry '$ipAddr':5000 -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --registry-mirror=http://'$ipAddr':5001"' > ansible/roles/docker/templates/docker.cfg
244
245# Add the voltha vm's information to the ansible tree
246echo -e "${lBlue}Add the voltha vm and key to the ansible accessible hosts${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400247vIpAddr=`virsh domifaddr voltha_voltha${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400248echo "[voltha]" > ansible/hosts/voltha
249echo $vIpAddr >> ansible/hosts/voltha
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400250echo "ansible_ssh_private_key_file: $wd/../.vagrant/machines/voltha${uId}/libvirt/private_key" > ansible/host_vars/$vIpAddr
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400251
252
253# Prepare to launch the ansible playbook to configure the installer VM
254echo -e "${lBlue}Prepare to launch the ansible playbook to configure the VM${NC}"
255echo "[installer]" > ansible/hosts/installer
256echo "$ipAddr" >> ansible/hosts/installer
257echo "ansible_ssh_private_key_file: $wd/key.pem" > ansible/host_vars/$ipAddr
258
259# Launch the ansible playbook
260echo -e "${lBlue}Launching the ansible playbook${NC}"
261ansible-playbook ansible/volthainstall.yml -i ansible/hosts/installer
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400262if [ $? -ne 0 ]; then
263 echo -e "${red}PLAYBOOK FAILED, Exiting${NC}"
264 exit
265fi
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400266ansible-playbook ansible/volthainstall.yml -i ansible/hosts/voltha
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400267if [ $? -ne 0 ]; then
268 echo -e "${red}PLAYBOOK FAILED, Exiting${NC}"
269 exit
270fi
271
272if [ $# -eq 1 -a "$1" == "test" ]; then
Sergio Slobodriand24189e2017-06-10 23:27:15 -0400273 echo -e "${lBlue}Testing, the install image ${red}WILL NOT${lBlue} be built${NC}"
Sergio Slobodrian61287792017-06-27 12:14:05 -0400274
275
276 # Reboot the installer
277 echo -e "${lBlue}Rebooting the installer${NC}"
278 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo telinit 6
279 # Wait for the host to shut down
280 sleep 5
281
282 ctr=0
283 ipAddr=""
284 while [ -z "$ipAddr" ];
285 do
286 echo -e "${lBlue}Waiting for the VM's IP address${NC}"
287 ipAddr=`virsh domifaddr $iVmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
288 sleep 3
289 if [ $ctr -eq $ipTimeout ]; then
290 echo -e "${red}Tired of waiting, please adjust the ipTimeout if the VM is slow to start${NC}"
291 exit
292 fi
293 ctr=`expr $ctr + 1`
294 done
295
296 echo -e "${lBlue}Running the installer${NC}"
297 echo "~/installer.sh" > tmp_bash_login
298 echo "rm ~/.bash_login" >> tmp_bash_login
299 echo "logout" >> tmp_bash_login
300 scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem tmp_bash_login vinstall@$ipAddr:.bash_login
301 rm -f tmp_bash_login
302 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr
303
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400304else
305 echo -e "${lBlue}Building, the install image (this can take a while)${NC}"
306 # Create a temporary directory for all the installer files
307 mkdir tmp_installer
308 cp vmTemplate.xml tmp_installer
309 # Shut down the installer vm
310 ctr=0
311 vStat=`virsh list | grep $iVmName`
312 virsh shutdown $iVmName
313 while [ ! -z "$vStat" ];
314 do
315 echo "Waiting for $iVmName to shut down"
316 sleep 2
317 vStat=`virsh list | grep $iVmName`
318 ctr=`expr $ctr + 1`
319 if [ $ctr -eq $shutdownTimeout ]; then
320 echo -e "${red}Tired of waiting, forcing the VM off${NC}"
321 virsh destroy $iVmName
322 vStat=`virsh list | grep $iVmName`
323 fi
324 done
325 # Copy the install bootstrap script to the installer directory
326 cp BootstrapInstaller.sh tmp_installer
327 # Copy the private key to access the VM
328 cp key.pem tmp_installer
329 pushd tmp_installer > /dev/null 2>&1
330 # Copy the vm image to the installer directory
331 virsh vol-dumpxml $iVmName.qcow2 default | sed -e 's/<key.*key>//' | sed -e '/^[ ]*$/d' > ${iVmName}_volume.xml
332 virsh pool-create-as installer --type dir --target `pwd`
333 virsh vol-create-from installer ${iVmName}_volume.xml $iVmName.qcow2 --inputpool default
334 virsh pool-destroy installer
335 # The image is copied in as root. It needs to have ownership changed
336 # this will result in a password prompt.
337 sudo chown `whoami`.`whoami` $iVmName.qcow2
338 # Now create the installer tar file
339 tar cjf ../$installerArchive .
340 popd > /dev/null 2>&1
341 # Clean up
342 rm -fr tmp_installer
343 # Final location for the installer
344 rm -fr $installerDirectory
345 mkdir $installerDirectory
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400346 cp deployInstaller.sh $installerDirectory
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400347 # Check the image size and determine if it needs to be split.
348 # To be safe, split the image into chunks smaller than 2G so that
349 # it will fit on a FAT32 volume.
350 fSize=`ls -l $installerArchive | awk '{print $5'}`
351 if [ $fSize -gt 2000000000 ]; then
352 echo -e "${lBlue}Installer file too large, breaking into parts${NC}"
353 # The file is too large, breaking it up into parts
354 sPos=0
355 fnn="00"
356 while dd if=$installerArchive of=${installerDirectory}/${installerPart}$fnn \
357 bs=1900MB count=1 skip=$sPos > /dev/null 2>&1
358 do
359 sPos=`expr $sPos + 1`
360 if [ ! -s ${installerDirectory}/${installerPart}$fnn ]; then
361 rm -f ${installerDirectory}/${installerPart}$fnn
362 break
363 fi
364 if [ $sPos -lt 10 ]; then
365 fnn="0$sPos"
366 else
367 fnn="$sPos"
368 fi
369 done
370 else
371 cp $installerArchive $installerDirectory
372 fi
373 # Clean up
374 rm $installerArchive
375 echo -e "${lBlue}The install image is built and can be found in ${yellow}$installerDirectory${NC}"
376 echo -e "${lBlue}Copy all the files in ${yellow}$installerDirectory${lBlue} to the traasnport media${NC}"
377fi