blob: 15a5cd0b4281b73d7f68a899923061a8041fc091 [file] [log] [blame]
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -04001#!/bin/bash
2
Sergio Slobodrian7c483622017-06-13 15:51:34 -04003
Sergio Slobodrianc5477712017-06-07 11:56:56 -04004iVmName="vInstaller"
Sergio Slobodrian7c483622017-06-13 15:51:34 -04005baseImage="Ubuntu1604LTS"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -04006iVmNetwork="vagrant-libvirt"
Sergio Slobodrianc5477712017-06-07 11:56:56 -04007installerArchive="installer.tar.bz2"
8installerDirectory="volthaInstaller"
9installerPart="installer.part"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040010shutdownTimeout=5
11ipTimeout=10
12
13lBlue='\033[1;34m'
14green='\033[0;32m'
15orange='\033[0;33m'
16NC='\033[0m'
17red='\033[0;31m'
18yellow='\033[1;33m'
19dGrey='\033[1;30m'
20lGrey='\033[1;37m'
21lCyan='\033[1;36m'
22
Sergio Slobodrian7c483622017-06-13 15:51:34 -040023uId=`id -u`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040024wd=`pwd`
25
26# Validate that vagrant is installed.
27echo -e "${lBlue}Ensure that ${lCyan}vagrant${lBlue} is installed${NC}"
28vInst=`which vagrant`
29
30if [ -z "$vInst" ]; then
31 wget https://releases.hashicorp.com/vagrant/1.9.5/vagrant_1.9.5_x86_64.deb
32 sudo dpkg -i vagrant_1.8.5_x86_64.deb
33 rm vagrant_1.8.5_x86_64.deb
34fi
35unset vInst
36
37# Validate that ansible is installed
38echo -e "${lBlue}Ensure that ${lCyan}ansible${lBlue} is installed${NC}"
39aInst=`which ansible`
40
41if [ -z "$aInst" ]; then
Sergio Slobodrianc5477712017-06-07 11:56:56 -040042 sudo apt-get install -y software-properties-common
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040043 sudo apt-add-repository ppa:ansible/ansible
44 sudo apt-get update
Sergio Slobodrianc5477712017-06-07 11:56:56 -040045 sudo apt-get install -y ansible
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040046fi
47unset vInst
48
49# Ensure that the voltha VM is running so that images can be secured
50echo -e "${lBlue}Ensure that the ${lCyan}voltha VM${lBlue} is running${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -040051vVM=`virsh list | grep voltha_voltha${uId}`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040052
53if [ -z "$vVM" ]; then
Sergio Slobodrian7c483622017-06-13 15:51:34 -040054 ./BuildVoltha.sh $1
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040055fi
56
57# Verify if this is intended to be a test environment, if so start 3 VMs
58# to emulate the production installation cluster.
59if [ $# -eq 1 -a "$1" == "test" ]; then
60 echo -e "${lBlue}Testing, create the ${lCyan}ha-serv${lBlue} VMs${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -040061 # Update the vagrant settings file
62 sed -i -e '/server_name/s/.*/server_name: "ha-serv'${uId}'-"/' settings.vagrant.yaml
63 sed -i -e '/docker_push_registry/s/.*/docker_push_registry: "vinstall'${uId}':5000"/' ansible/group_vars/all
64 sed -i -e "/vinstall/s/vinstall/vinstall${uId}/" ../ansible/roles/docker/templates/daemon.json
65
66 # Set the insecure registry configuration based on the installer hostname
67 echo -e "${lBlue}Set up the inescure registry hostname ${lCyan}vinstall${uId}${NC}"
68 echo '{' > ansible/roles/voltha/templates/daemon.json
69 echo '"insecure-registries" : ["vinstall'${uId}':5000"]' >> ansible/roles/voltha/templates/daemon.json
70 echo '}' >> ansible/roles/voltha/templates/daemon.json
71
72 vagrant destroy ha-serv${uId}-{1,2,3}
73 vagrant up ha-serv${uId}-{1,2,3}
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040074 ./devSetHostList.sh
Sergio Slobodrian7c483622017-06-13 15:51:34 -040075 # Change the installer name
76 iVmName="vInstaller${uId}"
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040077else
78 rm -fr .test
Sergio Slobodrianc5477712017-06-07 11:56:56 -040079 # Clean out the install config file keeping only the commented lines
80 # which serve as documentation.
81 sed -i -e '/^#/!d' install.cfg
Sergio Slobodrian7c483622017-06-13 15:51:34 -040082 # Set the insecure registry configuration based on the installer hostname
83 echo -e "${lBlue}Set up the inescure registry hostname ${lCyan}vinstall${uId}${NC}"
84 echo '{' > ansible/roles/voltha/templates/daemon.json
85 echo '"insecure-registries" : ["vinstall:5000"]' >> ansible/roles/voltha/templates/daemon.json
86 echo '}' >> ansible/roles/voltha/templates/daemon.json
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040087fi
88
Sergio Slobodrian7c483622017-06-13 15:51:34 -040089
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040090# Shut down the domain in case it's running.
91echo -e "${lBlue}Shut down the ${lCyan}$iVmName${lBlue} VM if running${NC}"
92ctr=0
93vStat=`virsh list | grep $iVmName`
Sergio Slobodrianc5477712017-06-07 11:56:56 -040094virsh shutdown $iVmName
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040095while [ ! -z "$vStat" ];
96do
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -040097 echo "Waiting for $iVmName to shut down"
98 sleep 2
99 vStat=`virsh list | grep $iVmName`
100 ctr=`expr $ctr + 1`
101 if [ $ctr -eq $shutdownTimeout ]; then
102 echo -e "${red}Tired of waiting, forcing the VM off${NC}"
103 virsh destroy $iVmName
104 vStat=`virsh list | grep $iVmName`
105 fi
106done
107
108
109# Delete the VM and ignore any errors should they occur
110echo -e "${lBlue}Undefining the ${lCyan}$iVmName${lBlue} domain${NC}"
111virsh undefine $iVmName
112
113# Remove the associated volume
114echo -e "${lBlue}Removing the ${lCyan}$iVmName.qcow2${lBlue} volume${NC}"
115virsh vol-delete "${iVmName}.qcow2" default
116
117# Clone the base vanilla ubuntu install
118echo -e "${lBlue}Cloning the ${lCyan}$baseImage.qcow2${lBlue} to ${lCyan}$iVmName.qcow2${NC}"
119virsh vol-clone "${baseImage}.qcow2" "${iVmName}.qcow2" default
120
121# Create the xml file and define the VM for virsh
122echo -e "${lBlue}Defining the ${lCyan}$iVmName${lBlue} virtual machine${NC}"
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400123cat vmTemplate.xml | sed -e "s/{{ VMName }}/$iVmName/g" | sed -e "s/{{ VMNetwork }}/$iVmNetwork/g" > tmp.xml
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400124
125virsh define tmp.xml
126
127rm tmp.xml
128
129# Start the VMm, if it's already running just ignore the error
130echo -e "${lBlue}Starting the ${lCyan}$iVmName${lBlue} virtual machine${NC}"
131virsh start $iVmName > /dev/null 2>&1
132
133# Generate a keypair for communicating with the VM
134echo -e "${lBlue}Generating the key-pair for communication with the VM${NC}"
135ssh-keygen -f ./key -t rsa -N ''
136
137mv key key.pem
138
139# Clone BashLogin.sh and add the public key to it for later use.
140echo -e "${lBlue}Creating the pre-configuration script${NC}"
141cp BashLogin.sh bash_login.sh
142echo "cat <<HERE > .ssh/authorized_keys" >> bash_login.sh
143cat key.pub >> bash_login.sh
144echo "HERE" >> bash_login.sh
145echo "chmod 400 .ssh/authorized_keys" >> bash_login.sh
146echo "rm .bash_login" >> bash_login.sh
147echo "logout" >> bash_login.sh
148rm key.pub
149
150
151
152# Get the VM's IP address
153ctr=0
154ipAddr=""
155while [ -z "$ipAddr" ];
156do
157 echo -e "${lBlue}Waiting for the VM's IP address${NC}"
158 ipAddr=`virsh domifaddr $iVmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
159 sleep 3
160 if [ $ctr -eq $ipTimeout ]; then
161 echo -e "${red}Tired of waiting, please adjust the ipTimeout if the VM is slow to start${NC}"
162 exit
163 fi
164 ctr=`expr $ctr + 1`
165done
166
167echo -e "${lBlue}The IP address is: ${lCyan}$ipAddr${NC}"
168
169# Copy the pre-config file to the VM
170echo -e "${lBlue}Transfering pre-configuration script to the VM${NC}"
171scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no bash_login.sh vinstall@$ipAddr:.bash_login
172
173rm bash_login.sh
174
175# Run the pre-config file on the VM
176echo -e "${lBlue}Running the pre-configuration script on the VM${NC}"
177ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no vinstall@$ipAddr
178
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400179# If we're in test mode, change the hostname of the installer vm
180if [ $# -eq 1 -a "$1" == "test" ]; then
181 echo -e "${lBlue}Test mode, change the installer host name to ${yellow}vinstall${uId}${NC}"
182 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr \
183 sudo hostnamectl set-hostname vinstall${uId}
184 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr \
185 sudo service networking restart
186fi
187
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400188# Install python which is required for ansible
189echo -e "${lBlue}Installing python${NC}"
190ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get update
191ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get -y install python
192
193# Make sure the VM is up-to-date
194echo -e "${lBlue}Ensure that the VM is up-to-date${NC}"
195ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get update
196ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get -y upgrade
197
198
199
200# Copy the apt repository to the VM because it's way too slow using ansible
201#echo -e "${red}NOT COPYING${lBlue} the apt-repository to the VM, ${red}TESTING ONLY REMOVE FOR PRODUCTION${NC}"
202#echo -e "${lBlue}Copy the apt-repository to the VM${NC}"
203#scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem -r apt-mirror vinstall@$ipAddr:apt-mirror
204
205# Create the docker.cfg file in the ansible tree using the VMs IP address
206echo 'DOCKER_OPTS="$DOCKER_OPTS --insecure-registry '$ipAddr':5000 -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --registry-mirror=http://'$ipAddr':5001"' > ansible/roles/docker/templates/docker.cfg
207
208# Add the voltha vm's information to the ansible tree
209echo -e "${lBlue}Add the voltha vm and key to the ansible accessible hosts${NC}"
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400210vIpAddr=`virsh domifaddr voltha_voltha${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'`
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400211echo "[voltha]" > ansible/hosts/voltha
212echo $vIpAddr >> ansible/hosts/voltha
Sergio Slobodrian7c483622017-06-13 15:51:34 -0400213echo "ansible_ssh_private_key_file: $wd/../.vagrant/machines/voltha${uId}/libvirt/private_key" > ansible/host_vars/$vIpAddr
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400214
215
216# Prepare to launch the ansible playbook to configure the installer VM
217echo -e "${lBlue}Prepare to launch the ansible playbook to configure the VM${NC}"
218echo "[installer]" > ansible/hosts/installer
219echo "$ipAddr" >> ansible/hosts/installer
220echo "ansible_ssh_private_key_file: $wd/key.pem" > ansible/host_vars/$ipAddr
221
222# Launch the ansible playbook
223echo -e "${lBlue}Launching the ansible playbook${NC}"
224ansible-playbook ansible/volthainstall.yml -i ansible/hosts/installer
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400225if [ $? -ne 0 ]; then
226 echo -e "${red}PLAYBOOK FAILED, Exiting${NC}"
227 exit
228fi
Sergio Slobodrianee4b2bc2017-06-05 10:08:59 -0400229ansible-playbook ansible/volthainstall.yml -i ansible/hosts/voltha
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400230if [ $? -ne 0 ]; then
231 echo -e "${red}PLAYBOOK FAILED, Exiting${NC}"
232 exit
233fi
234
235if [ $# -eq 1 -a "$1" == "test" ]; then
Sergio Slobodriand24189e2017-06-10 23:27:15 -0400236 echo -e "${lBlue}Testing, the install image ${red}WILL NOT${lBlue} be built${NC}"
Sergio Slobodrianc5477712017-06-07 11:56:56 -0400237else
238 echo -e "${lBlue}Building, the install image (this can take a while)${NC}"
239 # Create a temporary directory for all the installer files
240 mkdir tmp_installer
241 cp vmTemplate.xml tmp_installer
242 # Shut down the installer vm
243 ctr=0
244 vStat=`virsh list | grep $iVmName`
245 virsh shutdown $iVmName
246 while [ ! -z "$vStat" ];
247 do
248 echo "Waiting for $iVmName to shut down"
249 sleep 2
250 vStat=`virsh list | grep $iVmName`
251 ctr=`expr $ctr + 1`
252 if [ $ctr -eq $shutdownTimeout ]; then
253 echo -e "${red}Tired of waiting, forcing the VM off${NC}"
254 virsh destroy $iVmName
255 vStat=`virsh list | grep $iVmName`
256 fi
257 done
258 # Copy the install bootstrap script to the installer directory
259 cp BootstrapInstaller.sh tmp_installer
260 # Copy the private key to access the VM
261 cp key.pem tmp_installer
262 pushd tmp_installer > /dev/null 2>&1
263 # Copy the vm image to the installer directory
264 virsh vol-dumpxml $iVmName.qcow2 default | sed -e 's/<key.*key>//' | sed -e '/^[ ]*$/d' > ${iVmName}_volume.xml
265 virsh pool-create-as installer --type dir --target `pwd`
266 virsh vol-create-from installer ${iVmName}_volume.xml $iVmName.qcow2 --inputpool default
267 virsh pool-destroy installer
268 # The image is copied in as root. It needs to have ownership changed
269 # this will result in a password prompt.
270 sudo chown `whoami`.`whoami` $iVmName.qcow2
271 # Now create the installer tar file
272 tar cjf ../$installerArchive .
273 popd > /dev/null 2>&1
274 # Clean up
275 rm -fr tmp_installer
276 # Final location for the installer
277 rm -fr $installerDirectory
278 mkdir $installerDirectory
279 cp installVoltha.sh $installerDirectory
280 # Check the image size and determine if it needs to be split.
281 # To be safe, split the image into chunks smaller than 2G so that
282 # it will fit on a FAT32 volume.
283 fSize=`ls -l $installerArchive | awk '{print $5'}`
284 if [ $fSize -gt 2000000000 ]; then
285 echo -e "${lBlue}Installer file too large, breaking into parts${NC}"
286 # The file is too large, breaking it up into parts
287 sPos=0
288 fnn="00"
289 while dd if=$installerArchive of=${installerDirectory}/${installerPart}$fnn \
290 bs=1900MB count=1 skip=$sPos > /dev/null 2>&1
291 do
292 sPos=`expr $sPos + 1`
293 if [ ! -s ${installerDirectory}/${installerPart}$fnn ]; then
294 rm -f ${installerDirectory}/${installerPart}$fnn
295 break
296 fi
297 if [ $sPos -lt 10 ]; then
298 fnn="0$sPos"
299 else
300 fnn="$sPos"
301 fi
302 done
303 else
304 cp $installerArchive $installerDirectory
305 fi
306 # Clean up
307 rm $installerArchive
308 echo -e "${lBlue}The install image is built and can be found in ${yellow}$installerDirectory${NC}"
309 echo -e "${lBlue}Copy all the files in ${yellow}$installerDirectory${lBlue} to the traasnport media${NC}"
310fi