Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 1 | #!/bin/bash |
| 2 | |
Sergio Slobodrian | 7c48362 | 2017-06-13 15:51:34 -0400 | [diff] [blame] | 3 | |
Sergio Slobodrian | c547771 | 2017-06-07 11:56:56 -0400 | [diff] [blame] | 4 | iVmName="vInstaller" |
Sergio Slobodrian | ba9cbd8 | 2017-06-22 11:45:49 -0400 | [diff] [blame] | 5 | vVmName="voltha_voltha" |
Sergio Slobodrian | 7c48362 | 2017-06-13 15:51:34 -0400 | [diff] [blame] | 6 | baseImage="Ubuntu1604LTS" |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 7 | iVmNetwork="vagrant-libvirt" |
Sergio Slobodrian | c547771 | 2017-06-07 11:56:56 -0400 | [diff] [blame] | 8 | installerArchive="installer.tar.bz2" |
| 9 | installerDirectory="volthaInstaller" |
| 10 | installerPart="installer.part" |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 11 | shutdownTimeout=5 |
| 12 | ipTimeout=10 |
| 13 | |
Sergio Slobodrian | f74fa07 | 2017-06-28 09:33:24 -0400 | [diff] [blame] | 14 | # Command line argument variables |
| 15 | testMode="no" |
| 16 | |
| 17 | |
| 18 | |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 19 | lBlue='\033[1;34m' |
| 20 | green='\033[0;32m' |
| 21 | orange='\033[0;33m' |
| 22 | NC='\033[0m' |
| 23 | red='\033[0;31m' |
| 24 | yellow='\033[1;33m' |
| 25 | dGrey='\033[1;30m' |
| 26 | lGrey='\033[1;37m' |
| 27 | lCyan='\033[1;36m' |
| 28 | |
Sergio Slobodrian | 7c48362 | 2017-06-13 15:51:34 -0400 | [diff] [blame] | 29 | uId=`id -u` |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 30 | wd=`pwd` |
| 31 | |
Sergio Slobodrian | f74fa07 | 2017-06-28 09:33:24 -0400 | [diff] [blame] | 32 | parse_args() |
| 33 | { |
| 34 | for i in $@ |
| 35 | do |
| 36 | case "$i" in |
| 37 | "test" ) |
| 38 | testMode="yes" |
| 39 | echo -e "${lBlue}Test mode is ${green}enabled${NC}" |
| 40 | ;; |
| 41 | esac |
| 42 | done |
| 43 | } |
| 44 | |
| 45 | |
| 46 | ###################################### |
| 47 | # MAIN MAIN MAIN MAIN MAIN MAIN MAIN # |
| 48 | ###################################### |
| 49 | parse_args $@ |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 50 | # Validate that vagrant is installed. |
| 51 | echo -e "${lBlue}Ensure that ${lCyan}vagrant${lBlue} is installed${NC}" |
| 52 | vInst=`which vagrant` |
| 53 | |
| 54 | if [ -z "$vInst" ]; then |
| 55 | wget https://releases.hashicorp.com/vagrant/1.9.5/vagrant_1.9.5_x86_64.deb |
| 56 | sudo dpkg -i vagrant_1.8.5_x86_64.deb |
| 57 | rm vagrant_1.8.5_x86_64.deb |
| 58 | fi |
| 59 | unset vInst |
| 60 | |
| 61 | # Validate that ansible is installed |
| 62 | echo -e "${lBlue}Ensure that ${lCyan}ansible${lBlue} is installed${NC}" |
| 63 | aInst=`which ansible` |
| 64 | |
| 65 | if [ -z "$aInst" ]; then |
Sergio Slobodrian | c547771 | 2017-06-07 11:56:56 -0400 | [diff] [blame] | 66 | sudo apt-get install -y software-properties-common |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 67 | sudo apt-add-repository ppa:ansible/ansible |
| 68 | sudo apt-get update |
Sergio Slobodrian | c547771 | 2017-06-07 11:56:56 -0400 | [diff] [blame] | 69 | sudo apt-get install -y ansible |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 70 | fi |
| 71 | unset vInst |
| 72 | |
Sergio Slobodrian | 6128779 | 2017-06-27 12:14:05 -0400 | [diff] [blame] | 73 | # Verify if this is intended to be a test environment, if so |
| 74 | # configure the 3 VMs which will be started later to emulate |
| 75 | # the production installation cluster. |
Sergio Slobodrian | f74fa07 | 2017-06-28 09:33:24 -0400 | [diff] [blame] | 76 | if [ "$testMode" == "yes" ]; then |
| 77 | echo -e "${lBlue}Test mode ${green}enabled${lBlue}, configure the ${lCyan}ha-serv${lBlue} VMs${NC}" |
Sergio Slobodrian | 7c48362 | 2017-06-13 15:51:34 -0400 | [diff] [blame] | 78 | # Update the vagrant settings file |
| 79 | sed -i -e '/server_name/s/.*/server_name: "ha-serv'${uId}'-"/' settings.vagrant.yaml |
| 80 | sed -i -e '/docker_push_registry/s/.*/docker_push_registry: "vinstall'${uId}':5000"/' ansible/group_vars/all |
| 81 | sed -i -e "/vinstall/s/vinstall/vinstall${uId}/" ../ansible/roles/docker/templates/daemon.json |
| 82 | |
| 83 | # Set the insecure registry configuration based on the installer hostname |
Sergio Slobodrian | f74fa07 | 2017-06-28 09:33:24 -0400 | [diff] [blame] | 84 | echo -e "${lBlue}Set up the insecure registry config for hostname ${lCyan}vinstall${uId}${NC}" |
Sergio Slobodrian | 7c48362 | 2017-06-13 15:51:34 -0400 | [diff] [blame] | 85 | echo '{' > ansible/roles/voltha/templates/daemon.json |
| 86 | echo '"insecure-registries" : ["vinstall'${uId}':5000"]' >> ansible/roles/voltha/templates/daemon.json |
| 87 | echo '}' >> ansible/roles/voltha/templates/daemon.json |
| 88 | |
Sergio Slobodrian | 5727e98 | 2017-06-28 21:02:27 -0400 | [diff] [blame] | 89 | # Check to make sure that the vagrant-libvirt network is both defined and started |
| 90 | echo -e "${lBlue}Verify tha the ${lCyan}vagrant-libvirt${lBlue} network is defined and started${NC}" |
| 91 | virsh net-list | grep "vagrant-libvirt" > /dev/null |
| 92 | rtrn=$? |
| 93 | if [ $rtrn -eq 1 ]; then |
| 94 | # The network isn't running, check if it's defined |
| 95 | virsh net-list --all | grep "vagrant-libvirt" > /dev/null |
| 96 | rtrn=$? |
| 97 | if [ $rtrn -eq 1 ]; then |
| 98 | # Not defined either |
| 99 | echo -e "${lBlue}Defining the ${lCyan}vagrant-libvirt${lBlue} network${NC}" |
| 100 | virsh net-define vagrant-libvirt.xml |
| 101 | echo -e "${lBlue}Starting the ${lCyan}vagrant-libvirt${lBlue} network${NC}" |
| 102 | virsh net-start vagrant-libvirt |
| 103 | else |
| 104 | # Defined but not started |
| 105 | echo -e "${lBlue}Starting the ${lCyan}vagrant-libvirt${lBlue} network${NC}" |
| 106 | virsh net-start vagrant-libvirt |
| 107 | fi |
| 108 | else |
| 109 | echo -e "${lBlue}The ${lCyan}vagrant-libvirt${lBlue} network is ${green} running${NC}" |
| 110 | fi |
| 111 | |
Sergio Slobodrian | 7c48362 | 2017-06-13 15:51:34 -0400 | [diff] [blame] | 112 | # Change the installer name |
| 113 | iVmName="vInstaller${uId}" |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 114 | else |
| 115 | rm -fr .test |
Sergio Slobodrian | c547771 | 2017-06-07 11:56:56 -0400 | [diff] [blame] | 116 | # Clean out the install config file keeping only the commented lines |
| 117 | # which serve as documentation. |
| 118 | sed -i -e '/^#/!d' install.cfg |
Sergio Slobodrian | 7c48362 | 2017-06-13 15:51:34 -0400 | [diff] [blame] | 119 | # Set the insecure registry configuration based on the installer hostname |
Sergio Slobodrian | 6128779 | 2017-06-27 12:14:05 -0400 | [diff] [blame] | 120 | echo -e "${lBlue}Set up the inescure registry config for hostname ${lCyan}vinstall${uId}${NC}" |
Sergio Slobodrian | ba9cbd8 | 2017-06-22 11:45:49 -0400 | [diff] [blame] | 121 | sed -i -e '/docker_push_registry/s/.*/docker_push_registry: "vinstall:5000"/' ansible/group_vars/all |
Sergio Slobodrian | 7c48362 | 2017-06-13 15:51:34 -0400 | [diff] [blame] | 122 | echo '{' > ansible/roles/voltha/templates/daemon.json |
| 123 | echo '"insecure-registries" : ["vinstall:5000"]' >> ansible/roles/voltha/templates/daemon.json |
| 124 | echo '}' >> ansible/roles/voltha/templates/daemon.json |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 125 | fi |
| 126 | |
Sergio Slobodrian | 7c48362 | 2017-06-13 15:51:34 -0400 | [diff] [blame] | 127 | |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 128 | # Shut down the domain in case it's running. |
| 129 | echo -e "${lBlue}Shut down the ${lCyan}$iVmName${lBlue} VM if running${NC}" |
| 130 | ctr=0 |
| 131 | vStat=`virsh list | grep $iVmName` |
Sergio Slobodrian | c547771 | 2017-06-07 11:56:56 -0400 | [diff] [blame] | 132 | virsh shutdown $iVmName |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 133 | while [ ! -z "$vStat" ]; |
| 134 | do |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 135 | echo "Waiting for $iVmName to shut down" |
| 136 | sleep 2 |
| 137 | vStat=`virsh list | grep $iVmName` |
| 138 | ctr=`expr $ctr + 1` |
| 139 | if [ $ctr -eq $shutdownTimeout ]; then |
| 140 | echo -e "${red}Tired of waiting, forcing the VM off${NC}" |
| 141 | virsh destroy $iVmName |
| 142 | vStat=`virsh list | grep $iVmName` |
| 143 | fi |
| 144 | done |
| 145 | |
| 146 | |
| 147 | # Delete the VM and ignore any errors should they occur |
| 148 | echo -e "${lBlue}Undefining the ${lCyan}$iVmName${lBlue} domain${NC}" |
| 149 | virsh undefine $iVmName |
| 150 | |
| 151 | # Remove the associated volume |
| 152 | echo -e "${lBlue}Removing the ${lCyan}$iVmName.qcow2${lBlue} volume${NC}" |
| 153 | virsh vol-delete "${iVmName}.qcow2" default |
| 154 | |
| 155 | # Clone the base vanilla ubuntu install |
| 156 | echo -e "${lBlue}Cloning the ${lCyan}$baseImage.qcow2${lBlue} to ${lCyan}$iVmName.qcow2${NC}" |
| 157 | virsh vol-clone "${baseImage}.qcow2" "${iVmName}.qcow2" default |
| 158 | |
| 159 | # Create the xml file and define the VM for virsh |
| 160 | echo -e "${lBlue}Defining the ${lCyan}$iVmName${lBlue} virtual machine${NC}" |
Sergio Slobodrian | c547771 | 2017-06-07 11:56:56 -0400 | [diff] [blame] | 161 | cat vmTemplate.xml | sed -e "s/{{ VMName }}/$iVmName/g" | sed -e "s/{{ VMNetwork }}/$iVmNetwork/g" > tmp.xml |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 162 | |
| 163 | virsh define tmp.xml |
| 164 | |
| 165 | rm tmp.xml |
| 166 | |
| 167 | # Start the VMm, if it's already running just ignore the error |
| 168 | echo -e "${lBlue}Starting the ${lCyan}$iVmName${lBlue} virtual machine${NC}" |
| 169 | virsh start $iVmName > /dev/null 2>&1 |
| 170 | |
| 171 | # Generate a keypair for communicating with the VM |
| 172 | echo -e "${lBlue}Generating the key-pair for communication with the VM${NC}" |
| 173 | ssh-keygen -f ./key -t rsa -N '' |
| 174 | |
| 175 | mv key key.pem |
| 176 | |
| 177 | # Clone BashLogin.sh and add the public key to it for later use. |
| 178 | echo -e "${lBlue}Creating the pre-configuration script${NC}" |
| 179 | cp BashLogin.sh bash_login.sh |
| 180 | echo "cat <<HERE > .ssh/authorized_keys" >> bash_login.sh |
| 181 | cat key.pub >> bash_login.sh |
| 182 | echo "HERE" >> bash_login.sh |
| 183 | echo "chmod 400 .ssh/authorized_keys" >> bash_login.sh |
| 184 | echo "rm .bash_login" >> bash_login.sh |
| 185 | echo "logout" >> bash_login.sh |
| 186 | rm key.pub |
| 187 | |
| 188 | |
| 189 | |
| 190 | # Get the VM's IP address |
| 191 | ctr=0 |
| 192 | ipAddr="" |
| 193 | while [ -z "$ipAddr" ]; |
| 194 | do |
| 195 | echo -e "${lBlue}Waiting for the VM's IP address${NC}" |
| 196 | ipAddr=`virsh domifaddr $iVmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'` |
| 197 | sleep 3 |
| 198 | if [ $ctr -eq $ipTimeout ]; then |
| 199 | echo -e "${red}Tired of waiting, please adjust the ipTimeout if the VM is slow to start${NC}" |
| 200 | exit |
| 201 | fi |
| 202 | ctr=`expr $ctr + 1` |
| 203 | done |
| 204 | |
| 205 | echo -e "${lBlue}The IP address is: ${lCyan}$ipAddr${NC}" |
| 206 | |
| 207 | # Copy the pre-config file to the VM |
| 208 | echo -e "${lBlue}Transfering pre-configuration script to the VM${NC}" |
| 209 | scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no bash_login.sh vinstall@$ipAddr:.bash_login |
| 210 | |
| 211 | rm bash_login.sh |
| 212 | |
| 213 | # Run the pre-config file on the VM |
| 214 | echo -e "${lBlue}Running the pre-configuration script on the VM${NC}" |
| 215 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no vinstall@$ipAddr |
| 216 | |
Sergio Slobodrian | 7c48362 | 2017-06-13 15:51:34 -0400 | [diff] [blame] | 217 | # If we're in test mode, change the hostname of the installer vm |
Sergio Slobodrian | 6128779 | 2017-06-27 12:14:05 -0400 | [diff] [blame] | 218 | # also start the 3 vagrant target VMs |
Sergio Slobodrian | f74fa07 | 2017-06-28 09:33:24 -0400 | [diff] [blame] | 219 | if [ "$testMode" == "yes" ]; then |
| 220 | echo -e "${lBlue}Test mode, change the installer host name to ${lCyan}vinstall${uId}${NC}" |
Sergio Slobodrian | 7c48362 | 2017-06-13 15:51:34 -0400 | [diff] [blame] | 221 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr \ |
| 222 | sudo hostnamectl set-hostname vinstall${uId} |
| 223 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr \ |
| 224 | sudo service networking restart |
Sergio Slobodrian | 6128779 | 2017-06-27 12:14:05 -0400 | [diff] [blame] | 225 | |
| 226 | echo -e "${lBlue}Testing, start the ${lCyan}ha-serv${lBlue} VMs${NC}" |
| 227 | vagrant destroy ha-serv${uId}-{1,2,3} |
| 228 | vagrant up ha-serv${uId}-{1,2,3} |
| 229 | ./devSetHostList.sh |
Sergio Slobodrian | 7c48362 | 2017-06-13 15:51:34 -0400 | [diff] [blame] | 230 | fi |
| 231 | |
Sergio Slobodrian | 36e1655 | 2017-06-19 11:00:45 -0400 | [diff] [blame] | 232 | # Ensure that the voltha VM is running so that images can be secured |
| 233 | echo -e "${lBlue}Ensure that the ${lCyan}voltha VM${lBlue} is running${NC}" |
| 234 | vVM=`virsh list | grep voltha_voltha${uId}` |
| 235 | |
| 236 | if [ -z "$vVM" ]; then |
Sergio Slobodrian | f74fa07 | 2017-06-28 09:33:24 -0400 | [diff] [blame] | 237 | if [ "$testMode" == "yes" ]; then |
Sergio Slobodrian | 36e1655 | 2017-06-19 11:00:45 -0400 | [diff] [blame] | 238 | ./BuildVoltha.sh $1 |
Sergio Slobodrian | 6128779 | 2017-06-27 12:14:05 -0400 | [diff] [blame] | 239 | rtrn=$? |
Sergio Slobodrian | 36e1655 | 2017-06-19 11:00:45 -0400 | [diff] [blame] | 240 | else |
| 241 | # Default to installer mode |
| 242 | ./BuildVoltha.sh install |
Sergio Slobodrian | 6128779 | 2017-06-27 12:14:05 -0400 | [diff] [blame] | 243 | rtrn=$? |
Sergio Slobodrian | ba9cbd8 | 2017-06-22 11:45:49 -0400 | [diff] [blame] | 244 | fi |
| 245 | if [ $rtrn -ne 0 ]; then |
Sergio Slobodrian | f74fa07 | 2017-06-28 09:33:24 -0400 | [diff] [blame] | 246 | echo -e "${red}Voltha build failed!! ${lCyan}Please review the log and correct${lBlue} is running${NC}" |
Sergio Slobodrian | ba9cbd8 | 2017-06-22 11:45:49 -0400 | [diff] [blame] | 247 | exit 1 |
Sergio Slobodrian | 36e1655 | 2017-06-19 11:00:45 -0400 | [diff] [blame] | 248 | fi |
| 249 | fi |
| 250 | |
Sergio Slobodrian | ba9cbd8 | 2017-06-22 11:45:49 -0400 | [diff] [blame] | 251 | # Extract all the image names and tags from the running voltha VM |
Sergio Slobodrian | 6128779 | 2017-06-27 12:14:05 -0400 | [diff] [blame] | 252 | # when running in test mode. This will provide the entire suite |
| 253 | # of available containers to the VM cluster. |
| 254 | |
Sergio Slobodrian | f74fa07 | 2017-06-28 09:33:24 -0400 | [diff] [blame] | 255 | if [ "$testMode" == "yes" ]; then |
Sergio Slobodrian | 6128779 | 2017-06-27 12:14:05 -0400 | [diff] [blame] | 256 | echo -e "${lBlue}Extracting the docker image list from the voltha VM${NC}" |
| 257 | volIpAddr=`virsh domifaddr $vVmName${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'` |
| 258 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ../.vagrant/machines/voltha${uId}/libvirt/private_key vagrant@$volIpAddr "docker image ls" > images.tmp |
| 259 | cat images.tmp | grep -v 5000 | tail -n +2 | awk '{printf(" - %s:%s\n", $1, $2)}' > image-list.cfg |
| 260 | rm -f images.tmp |
| 261 | sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all |
| 262 | echo "voltha_containers:" >> ansible/group_vars/all |
| 263 | cat image-list.cfg >> ansible/group_vars/all |
| 264 | rm -f image-list.cfg |
| 265 | else |
Sergio Slobodrian | f74fa07 | 2017-06-28 09:33:24 -0400 | [diff] [blame] | 266 | echo -e "${lBlue}Set up the docker image list from ${lCyan}containers.cfg${NC}" |
Sergio Slobodrian | 6128779 | 2017-06-27 12:14:05 -0400 | [diff] [blame] | 267 | sed -i -e '/voltha_containers:/,$d' ansible/group_vars/all |
| 268 | cat containers.cfg >> ansible/group_vars/all |
| 269 | fi |
Sergio Slobodrian | ba9cbd8 | 2017-06-22 11:45:49 -0400 | [diff] [blame] | 270 | |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 271 | # Install python which is required for ansible |
| 272 | echo -e "${lBlue}Installing python${NC}" |
| 273 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get update |
| 274 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get -y install python |
| 275 | |
| 276 | # Make sure the VM is up-to-date |
| 277 | echo -e "${lBlue}Ensure that the VM is up-to-date${NC}" |
| 278 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get update |
| 279 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get -y upgrade |
| 280 | |
| 281 | |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 282 | # Create the docker.cfg file in the ansible tree using the VMs IP address |
| 283 | echo 'DOCKER_OPTS="$DOCKER_OPTS --insecure-registry '$ipAddr':5000 -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --registry-mirror=http://'$ipAddr':5001"' > ansible/roles/docker/templates/docker.cfg |
| 284 | |
| 285 | # Add the voltha vm's information to the ansible tree |
| 286 | echo -e "${lBlue}Add the voltha vm and key to the ansible accessible hosts${NC}" |
Sergio Slobodrian | 7c48362 | 2017-06-13 15:51:34 -0400 | [diff] [blame] | 287 | vIpAddr=`virsh domifaddr voltha_voltha${uId} | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'` |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 288 | echo "[voltha]" > ansible/hosts/voltha |
| 289 | echo $vIpAddr >> ansible/hosts/voltha |
Sergio Slobodrian | 7c48362 | 2017-06-13 15:51:34 -0400 | [diff] [blame] | 290 | echo "ansible_ssh_private_key_file: $wd/../.vagrant/machines/voltha${uId}/libvirt/private_key" > ansible/host_vars/$vIpAddr |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 291 | |
| 292 | |
| 293 | # Prepare to launch the ansible playbook to configure the installer VM |
| 294 | echo -e "${lBlue}Prepare to launch the ansible playbook to configure the VM${NC}" |
| 295 | echo "[installer]" > ansible/hosts/installer |
| 296 | echo "$ipAddr" >> ansible/hosts/installer |
| 297 | echo "ansible_ssh_private_key_file: $wd/key.pem" > ansible/host_vars/$ipAddr |
| 298 | |
Sergio Slobodrian | f74fa07 | 2017-06-28 09:33:24 -0400 | [diff] [blame] | 299 | # Launch the ansible playbooks |
| 300 | |
| 301 | echo -e "${lBlue}Launching the ${lCyan}volthainstall${lBlue} ansible playbook on the installer vm${NC}" |
Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 302 | ansible-playbook ansible/volthainstall.yml -i ansible/hosts/installer |
Sergio Slobodrian | f74fa07 | 2017-06-28 09:33:24 -0400 | [diff] [blame] | 303 | rtrn=$? |
| 304 | if [ $rtrn -ne 0 ]; then |
Sergio Slobodrian | c547771 | 2017-06-07 11:56:56 -0400 | [diff] [blame] | 305 | echo -e "${red}PLAYBOOK FAILED, Exiting${NC}" |
| 306 | exit |
| 307 | fi |
| 308 | |
Sergio Slobodrian | f74fa07 | 2017-06-28 09:33:24 -0400 | [diff] [blame] | 309 | |
| 310 | echo -e "${lBlue}Launching the ${lCyan}volthainstall${lBlue} ansible playbook on the voltha vm${NC}" |
| 311 | ansible-playbook ansible/volthainstall.yml -i ansible/hosts/voltha |
| 312 | rtrn=$? |
| 313 | if [ $rtrn -ne 0 ]; then |
| 314 | echo -e "${red}PLAYBOOK FAILED, Exiting${NC}" |
| 315 | exit |
| 316 | fi |
| 317 | |
| 318 | if [ "$testMode" == "yes" ]; then |
Sergio Slobodrian | d24189e | 2017-06-10 23:27:15 -0400 | [diff] [blame] | 319 | echo -e "${lBlue}Testing, the install image ${red}WILL NOT${lBlue} be built${NC}" |
Sergio Slobodrian | 6128779 | 2017-06-27 12:14:05 -0400 | [diff] [blame] | 320 | |
| 321 | |
| 322 | # Reboot the installer |
| 323 | echo -e "${lBlue}Rebooting the installer${NC}" |
| 324 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo telinit 6 |
| 325 | # Wait for the host to shut down |
| 326 | sleep 5 |
| 327 | |
| 328 | ctr=0 |
| 329 | ipAddr="" |
| 330 | while [ -z "$ipAddr" ]; |
| 331 | do |
| 332 | echo -e "${lBlue}Waiting for the VM's IP address${NC}" |
| 333 | ipAddr=`virsh domifaddr $iVmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'` |
| 334 | sleep 3 |
| 335 | if [ $ctr -eq $ipTimeout ]; then |
| 336 | echo -e "${red}Tired of waiting, please adjust the ipTimeout if the VM is slow to start${NC}" |
| 337 | exit |
| 338 | fi |
| 339 | ctr=`expr $ctr + 1` |
| 340 | done |
| 341 | |
| 342 | echo -e "${lBlue}Running the installer${NC}" |
| 343 | echo "~/installer.sh" > tmp_bash_login |
| 344 | echo "rm ~/.bash_login" >> tmp_bash_login |
| 345 | echo "logout" >> tmp_bash_login |
| 346 | scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem tmp_bash_login vinstall@$ipAddr:.bash_login |
| 347 | rm -f tmp_bash_login |
| 348 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr |
| 349 | |
Sergio Slobodrian | c547771 | 2017-06-07 11:56:56 -0400 | [diff] [blame] | 350 | else |
| 351 | echo -e "${lBlue}Building, the install image (this can take a while)${NC}" |
| 352 | # Create a temporary directory for all the installer files |
| 353 | mkdir tmp_installer |
| 354 | cp vmTemplate.xml tmp_installer |
| 355 | # Shut down the installer vm |
| 356 | ctr=0 |
| 357 | vStat=`virsh list | grep $iVmName` |
| 358 | virsh shutdown $iVmName |
| 359 | while [ ! -z "$vStat" ]; |
| 360 | do |
| 361 | echo "Waiting for $iVmName to shut down" |
| 362 | sleep 2 |
| 363 | vStat=`virsh list | grep $iVmName` |
| 364 | ctr=`expr $ctr + 1` |
| 365 | if [ $ctr -eq $shutdownTimeout ]; then |
| 366 | echo -e "${red}Tired of waiting, forcing the VM off${NC}" |
| 367 | virsh destroy $iVmName |
| 368 | vStat=`virsh list | grep $iVmName` |
| 369 | fi |
| 370 | done |
| 371 | # Copy the install bootstrap script to the installer directory |
| 372 | cp BootstrapInstaller.sh tmp_installer |
| 373 | # Copy the private key to access the VM |
| 374 | cp key.pem tmp_installer |
| 375 | pushd tmp_installer > /dev/null 2>&1 |
| 376 | # Copy the vm image to the installer directory |
| 377 | virsh vol-dumpxml $iVmName.qcow2 default | sed -e 's/<key.*key>//' | sed -e '/^[ ]*$/d' > ${iVmName}_volume.xml |
| 378 | virsh pool-create-as installer --type dir --target `pwd` |
| 379 | virsh vol-create-from installer ${iVmName}_volume.xml $iVmName.qcow2 --inputpool default |
| 380 | virsh pool-destroy installer |
| 381 | # The image is copied in as root. It needs to have ownership changed |
| 382 | # this will result in a password prompt. |
| 383 | sudo chown `whoami`.`whoami` $iVmName.qcow2 |
| 384 | # Now create the installer tar file |
| 385 | tar cjf ../$installerArchive . |
| 386 | popd > /dev/null 2>&1 |
| 387 | # Clean up |
| 388 | rm -fr tmp_installer |
| 389 | # Final location for the installer |
| 390 | rm -fr $installerDirectory |
| 391 | mkdir $installerDirectory |
Sergio Slobodrian | 36e1655 | 2017-06-19 11:00:45 -0400 | [diff] [blame] | 392 | cp deployInstaller.sh $installerDirectory |
Sergio Slobodrian | c547771 | 2017-06-07 11:56:56 -0400 | [diff] [blame] | 393 | # Check the image size and determine if it needs to be split. |
| 394 | # To be safe, split the image into chunks smaller than 2G so that |
| 395 | # it will fit on a FAT32 volume. |
| 396 | fSize=`ls -l $installerArchive | awk '{print $5'}` |
| 397 | if [ $fSize -gt 2000000000 ]; then |
| 398 | echo -e "${lBlue}Installer file too large, breaking into parts${NC}" |
| 399 | # The file is too large, breaking it up into parts |
| 400 | sPos=0 |
| 401 | fnn="00" |
| 402 | while dd if=$installerArchive of=${installerDirectory}/${installerPart}$fnn \ |
| 403 | bs=1900MB count=1 skip=$sPos > /dev/null 2>&1 |
| 404 | do |
| 405 | sPos=`expr $sPos + 1` |
| 406 | if [ ! -s ${installerDirectory}/${installerPart}$fnn ]; then |
| 407 | rm -f ${installerDirectory}/${installerPart}$fnn |
| 408 | break |
| 409 | fi |
| 410 | if [ $sPos -lt 10 ]; then |
| 411 | fnn="0$sPos" |
| 412 | else |
| 413 | fnn="$sPos" |
| 414 | fi |
| 415 | done |
| 416 | else |
| 417 | cp $installerArchive $installerDirectory |
| 418 | fi |
| 419 | # Clean up |
| 420 | rm $installerArchive |
Sergio Slobodrian | f74fa07 | 2017-06-28 09:33:24 -0400 | [diff] [blame] | 421 | echo -e "${lBlue}The install image is built and can be found in ${lCyan}$installerDirectory${NC}" |
| 422 | echo -e "${lBlue}Copy all the files in ${lCyan}$installerDirectory${lBlue} to the traasnport media${NC}" |
Sergio Slobodrian | c547771 | 2017-06-07 11:56:56 -0400 | [diff] [blame] | 423 | fi |