blob: 7f06c09c013d7aceeac14f66aff6e5dae7ce42c2 [file] [log] [blame]
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -04001#!/bin/bash
2
Sergio Slobodrian7c483622017-06-13 15:51:34 -04003
Sergio Slobodrianc5477712017-06-07 11:56:56 -04004iVmName="vInstaller"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -04005vVmName="voltha_voltha"
Sergio Slobodrian7c483622017-06-13 15:51:34 -04006baseImage="Ubuntu1604LTS"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -04007iVmNetwork="vagrant-libvirt"
Sergio Slobodrianc5477712017-06-07 11:56:56 -04008installerArchive="installer.tar.bz2"
9installerDirectory="volthaInstaller"
10installerPart="installer.part"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040011shutdownTimeout=5
12ipTimeout=10
13
14lBlue='\033[1;34m'
15green='\033[0;32m'
16orange='\033[0;33m'
17NC='\033[0m'
18red='\033[0;31m'
19yellow='\033[1;33m'
20dGrey='\033[1;30m'
21lGrey='\033[1;37m'
22lCyan='\033[1;36m'
23
Sergio Slobodrian7c483622017-06-13 15:51:34 -040024uId=`id -u`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040025wd=`pwd`
26
27# Validate that vagrant is installed.
28echo -e "${lBlue}Ensure that ${lCyan}vagrant${lBlue} is installed${NC}"
29vInst=`which vagrant`
30
31if [ -z "$vInst" ]; then
32 wget https://releases.hashicorp.com/vagrant/1.9.5/vagrant_1.9.5_x86_64.deb
33 sudo dpkg -i vagrant_1.8.5_x86_64.deb
34 rm vagrant_1.8.5_x86_64.deb
35fi
36unset vInst
37
38# Validate that ansible is installed
39echo -e "${lBlue}Ensure that ${lCyan}ansible${lBlue} is installed${NC}"
40aInst=`which ansible`
41
42if [ -z "$aInst" ]; then
Sergio Slobodrianc5477712017-06-07 11:56:56 -040043 sudo apt-get install -y software-properties-common
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040044 sudo apt-add-repository ppa:ansible/ansible
45 sudo apt-get update
Sergio Slobodrianc5477712017-06-07 11:56:56 -040046 sudo apt-get install -y ansible
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040047fi
48unset vInst
49
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040050# Verify if this is intended to be a test environment, if so start 3 VMs
51# to emulate the production installation cluster.
52if [ $# -eq 1 -a "$1" == "test" ]; then
53 echo -e "${lBlue}Testing, create the ${lCyan}ha-serv${lBlue} VMs${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -040054 # Update the vagrant settings file
55 sed -i -e '/server_name/s/.*/server_name: "ha-serv'${uId}'-"/' settings.vagrant.yaml
56 sed -i -e '/docker_push_registry/s/.*/docker_push_registry: "vinstall'${uId}':5000"/' ansible/group_vars/all
57 sed -i -e "/vinstall/s/vinstall/vinstall${uId}/" ../ansible/roles/docker/templates/daemon.json
58
59 # Set the insecure registry configuration based on the installer hostname
60 echo -e "${lBlue}Set up the inescure registry hostname ${lCyan}vinstall${uId}${NC}"
61 echo '{' > ansible/roles/voltha/templates/daemon.json
62 echo '"insecure-registries" : ["vinstall'${uId}':5000"]' >> ansible/roles/voltha/templates/daemon.json
63 echo '}' >> ansible/roles/voltha/templates/daemon.json
64
65 vagrant destroy ha-serv${uId}-{1,2,3}
66 vagrant up ha-serv${uId}-{1,2,3}
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040067 ./devSetHostList.sh
Sergio Slobodrian7c483622017-06-13 15:51:34 -040068 # Change the installer name
69 iVmName="vInstaller${uId}"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040070else
71 rm -fr .test
Sergio Slobodrianc5477712017-06-07 11:56:56 -040072 # Clean out the install config file keeping only the commented lines
73 # which serve as documentation.
74 sed -i -e '/^#/!d' install.cfg
Sergio Slobodrian7c483622017-06-13 15:51:34 -040075 # Set the insecure registry configuration based on the installer hostname
76 echo -e "${lBlue}Set up the inescure registry hostname ${lCyan}vinstall${uId}${NC}"
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -040077 sed -i -e '/docker_push_registry/s/.*/docker_push_registry: "vinstall:5000"/' ansible/group_vars/all
Sergio Slobodrian7c483622017-06-13 15:51:34 -040078 echo '{' > ansible/roles/voltha/templates/daemon.json
79 echo '"insecure-registries" : ["vinstall:5000"]' >> ansible/roles/voltha/templates/daemon.json
80 echo '}' >> ansible/roles/voltha/templates/daemon.json
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040081fi
82
Sergio Slobodrian7c483622017-06-13 15:51:34 -040083
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040084# Shut down the domain in case it's running.
85echo -e "${lBlue}Shut down the ${lCyan}$iVmName${lBlue} VM if running${NC}"
86ctr=0
87vStat=`virsh list | grep $iVmName`
Sergio Slobodrianc5477712017-06-07 11:56:56 -040088virsh shutdown $iVmName
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040089while [ ! -z "$vStat" ];
90do
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040091 echo "Waiting for $iVmName to shut down"
92 sleep 2
93 vStat=`virsh list | grep $iVmName`
94 ctr=`expr $ctr + 1`
95 if [ $ctr -eq $shutdownTimeout ]; then
96 echo -e "${red}Tired of waiting, forcing the VM off${NC}"
97 virsh destroy $iVmName
98 vStat=`virsh list | grep $iVmName`
99 fi
100done
101
102
103# Delete the VM and ignore any errors should they occur
104echo -e "${lBlue}Undefining the ${lCyan}$iVmName${lBlue} domain${NC}"
105virsh undefine $iVmName
106
107# Remove the associated volume
108echo -e "${lBlue}Removing the ${lCyan}$iVmName.qcow2${lBlue} volume${NC}"
109virsh vol-delete "${iVmName}.qcow2" default
110
111# Clone the base vanilla ubuntu install
112echo -e "${lBlue}Cloning the ${lCyan}$baseImage.qcow2${lBlue} to ${lCyan}$iVmName.qcow2${NC}"
113virsh vol-clone "${baseImage}.qcow2" "${iVmName}.qcow2" default
114
115# Create the xml file and define the VM for virsh
116echo -e "${lBlue}Defining the ${lCyan}$iVmName${lBlue} virtual machine${NC}"
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400117cat vmTemplate.xml | sed -e "s/{{ VMName }}/$iVmName/g" | sed -e "s/{{ VMNetwork }}/$iVmNetwork/g" > tmp.xml
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400118
119virsh define tmp.xml
120
121rm tmp.xml
122
123# Start the VMm, if it's already running just ignore the error
124echo -e "${lBlue}Starting the ${lCyan}$iVmName${lBlue} virtual machine${NC}"
125virsh start $iVmName > /dev/null 2>&1
126
127# Generate a keypair for communicating with the VM
128echo -e "${lBlue}Generating the key-pair for communication with the VM${NC}"
129ssh-keygen -f ./key -t rsa -N ''
130
131mv key key.pem
132
133# Clone BashLogin.sh and add the public key to it for later use.
134echo -e "${lBlue}Creating the pre-configuration script${NC}"
135cp BashLogin.sh bash_login.sh
136echo "cat <<HERE > .ssh/authorized_keys" >> bash_login.sh
137cat key.pub >> bash_login.sh
138echo "HERE" >> bash_login.sh
139echo "chmod 400 .ssh/authorized_keys" >> bash_login.sh
140echo "rm .bash_login" >> bash_login.sh
141echo "logout" >> bash_login.sh
142rm key.pub
143
144
145
146# Get the VM's IP address
147ctr=0
148ipAddr=""
149while [ -z "$ipAddr" ];
150do
151 echo -e "${lBlue}Waiting for the VM's IP address${NC}"
152 ipAddr=`virsh domifaddr $iVmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
153 sleep 3
154 if [ $ctr -eq $ipTimeout ]; then
155 echo -e "${red}Tired of waiting, please adjust the ipTimeout if the VM is slow to start${NC}"
156 exit
157 fi
158 ctr=`expr $ctr + 1`
159done
160
161echo -e "${lBlue}The IP address is: ${lCyan}$ipAddr${NC}"
162
163# Copy the pre-config file to the VM
164echo -e "${lBlue}Transfering pre-configuration script to the VM${NC}"
165scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no bash_login.sh vinstall@$ipAddr:.bash_login
166
167rm bash_login.sh
168
169# Run the pre-config file on the VM
170echo -e "${lBlue}Running the pre-configuration script on the VM${NC}"
171ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no vinstall@$ipAddr
172
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400173# If we're in test mode, change the hostname of the installer vm
174if [ $# -eq 1 -a "$1" == "test" ]; then
175 echo -e "${lBlue}Test mode, change the installer host name to ${yellow}vinstall${uId}${NC}"
176 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr \
177 sudo hostnamectl set-hostname vinstall${uId}
178 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr \
179 sudo service networking restart
180fi
181
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400182# Ensure that the voltha VM is running so that images can be secured
183echo -e "${lBlue}Ensure that the ${lCyan}voltha VM${lBlue} is running${NC}"
184vVM=`virsh list | grep voltha_voltha${uId}`
185
186if [ -z "$vVM" ]; then
187 if [ $# -eq 1 -a "$1" == "test" ]; then
188 ./BuildVoltha.sh $1
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400189 rtrn=$#
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400190 else
191 # Default to installer mode
192 ./BuildVoltha.sh install
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400193 rtrn=$#
194 fi
195 if [ $rtrn -ne 0 ]; then
196 echo -e "${red}Voltha build failed!! ${yellow}Please review the log and correct${lBlue} is running${NC}"
197 exit 1
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400198 fi
199fi
200
Sergio Slobodrianba9cbd82017-06-22 11:45:49 -0400201# Extract all the image names and tags from the running voltha VM
202# No Don't do this, it's too error prone if the voltha VM is not
203# built correctly, going with a static list for now.
204#echo -e "${lBlue}Extracting the docker image list from the voltha VM${NC}"
205#volIpAddr=`virsh domifaddr $vVmName${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
206#ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ../.vagrant/machines/voltha${uId}/libvirt/private_key vagrant@$volIpAddr "docker image ls" > images.tmp
207#cat images.tmp | grep -v 5000 | tail -n +2 | awk '{printf(" - %s:%s\n", $1, $2)}' > image-list.cfg
208#rm -f images.tmp
209#sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all
210#echo "voltha_containers:" >> ansible/group_vars/all
211echo -e "${lBlue}Set up the docker image list from ${yellow}containers.cfg${NC}"
212sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all
213cat containers.cfg >> ansible/group_vars/all
214
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400215# Install python which is required for ansible
216echo -e "${lBlue}Installing python${NC}"
217ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get update
218ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get -y install python
219
220# Make sure the VM is up-to-date
221echo -e "${lBlue}Ensure that the VM is up-to-date${NC}"
222ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get update
223ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get -y upgrade
224
225
226
227# Copy the apt repository to the VM because it's way too slow using ansible
228#echo -e "${red}NOT COPYING${lBlue} the apt-repository to the VM, ${red}TESTING ONLY REMOVE FOR PRODUCTION${NC}"
229#echo -e "${lBlue}Copy the apt-repository to the VM${NC}"
230#scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem -r apt-mirror vinstall@$ipAddr:apt-mirror
231
232# Create the docker.cfg file in the ansible tree using the VMs IP address
233echo 'DOCKER_OPTS="$DOCKER_OPTS --insecure-registry '$ipAddr':5000 -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --registry-mirror=http://'$ipAddr':5001"' > ansible/roles/docker/templates/docker.cfg
234
235# Add the voltha vm's information to the ansible tree
236echo -e "${lBlue}Add the voltha vm and key to the ansible accessible hosts${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400237vIpAddr=`virsh domifaddr voltha_voltha${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400238echo "[voltha]" > ansible/hosts/voltha
239echo $vIpAddr >> ansible/hosts/voltha
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400240echo "ansible_ssh_private_key_file: $wd/../.vagrant/machines/voltha${uId}/libvirt/private_key" > ansible/host_vars/$vIpAddr
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400241
242
243# Prepare to launch the ansible playbook to configure the installer VM
244echo -e "${lBlue}Prepare to launch the ansible playbook to configure the VM${NC}"
245echo "[installer]" > ansible/hosts/installer
246echo "$ipAddr" >> ansible/hosts/installer
247echo "ansible_ssh_private_key_file: $wd/key.pem" > ansible/host_vars/$ipAddr
248
249# Launch the ansible playbook
250echo -e "${lBlue}Launching the ansible playbook${NC}"
251ansible-playbook ansible/volthainstall.yml -i ansible/hosts/installer
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400252if [ $? -ne 0 ]; then
253 echo -e "${red}PLAYBOOK FAILED, Exiting${NC}"
254 exit
255fi
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400256ansible-playbook ansible/volthainstall.yml -i ansible/hosts/voltha
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400257if [ $? -ne 0 ]; then
258 echo -e "${red}PLAYBOOK FAILED, Exiting${NC}"
259 exit
260fi
261
262if [ $# -eq 1 -a "$1" == "test" ]; then
Sergio Slobodriand24189e2017-06-10 23:27:15 -0400263 echo -e "${lBlue}Testing, the install image ${red}WILL NOT${lBlue} be built${NC}"
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400264else
265 echo -e "${lBlue}Building, the install image (this can take a while)${NC}"
266 # Create a temporary directory for all the installer files
267 mkdir tmp_installer
268 cp vmTemplate.xml tmp_installer
269 # Shut down the installer vm
270 ctr=0
271 vStat=`virsh list | grep $iVmName`
272 virsh shutdown $iVmName
273 while [ ! -z "$vStat" ];
274 do
275 echo "Waiting for $iVmName to shut down"
276 sleep 2
277 vStat=`virsh list | grep $iVmName`
278 ctr=`expr $ctr + 1`
279 if [ $ctr -eq $shutdownTimeout ]; then
280 echo -e "${red}Tired of waiting, forcing the VM off${NC}"
281 virsh destroy $iVmName
282 vStat=`virsh list | grep $iVmName`
283 fi
284 done
285 # Copy the install bootstrap script to the installer directory
286 cp BootstrapInstaller.sh tmp_installer
287 # Copy the private key to access the VM
288 cp key.pem tmp_installer
289 pushd tmp_installer > /dev/null 2>&1
290 # Copy the vm image to the installer directory
291 virsh vol-dumpxml $iVmName.qcow2 default | sed -e 's/<key.*key>//' | sed -e '/^[ ]*$/d' > ${iVmName}_volume.xml
292 virsh pool-create-as installer --type dir --target `pwd`
293 virsh vol-create-from installer ${iVmName}_volume.xml $iVmName.qcow2 --inputpool default
294 virsh pool-destroy installer
295 # The image is copied in as root. It needs to have ownership changed
296 # this will result in a password prompt.
297 sudo chown `whoami`.`whoami` $iVmName.qcow2
298 # Now create the installer tar file
299 tar cjf ../$installerArchive .
300 popd > /dev/null 2>&1
301 # Clean up
302 rm -fr tmp_installer
303 # Final location for the installer
304 rm -fr $installerDirectory
305 mkdir $installerDirectory
Sergio Slobodrian36e16552017-06-19 11:00:45 -0400306 cp deployInstaller.sh $installerDirectory
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400307 # Check the image size and determine if it needs to be split.
308 # To be safe, split the image into chunks smaller than 2G so that
309 # it will fit on a FAT32 volume.
310 fSize=`ls -l $installerArchive | awk '{print $5'}`
311 if [ $fSize -gt 2000000000 ]; then
312 echo -e "${lBlue}Installer file too large, breaking into parts${NC}"
313 # The file is too large, breaking it up into parts
314 sPos=0
315 fnn="00"
316 while dd if=$installerArchive of=${installerDirectory}/${installerPart}$fnn \
317 bs=1900MB count=1 skip=$sPos > /dev/null 2>&1
318 do
319 sPos=`expr $sPos + 1`
320 if [ ! -s ${installerDirectory}/${installerPart}$fnn ]; then
321 rm -f ${installerDirectory}/${installerPart}$fnn
322 break
323 fi
324 if [ $sPos -lt 10 ]; then
325 fnn="0$sPos"
326 else
327 fnn="$sPos"
328 fi
329 done
330 else
331 cp $installerArchive $installerDirectory
332 fi
333 # Clean up
334 rm $installerArchive
335 echo -e "${lBlue}The install image is built and can be found in ${yellow}$installerDirectory${NC}"
336 echo -e "${lBlue}Copy all the files in ${yellow}$installerDirectory${lBlue} to the traasnport media${NC}"
337fi