Sergio Slobodrian | ee4b2bc | 2017-06-05 10:08:59 -0400 | [diff] [blame] | 1 | #!/bin/bash |
| 2 | |
| 3 | baseImage="Ubuntu1604LTS" |
| 4 | iVmName="Ubuntu1604LTS-1" |
| 5 | iVmNetwork="vagrant-libvirt" |
| 6 | shutdownTimeout=5 |
| 7 | ipTimeout=10 |
| 8 | |
| 9 | lBlue='\033[1;34m' |
| 10 | green='\033[0;32m' |
| 11 | orange='\033[0;33m' |
| 12 | NC='\033[0m' |
| 13 | red='\033[0;31m' |
| 14 | yellow='\033[1;33m' |
| 15 | dGrey='\033[1;30m' |
| 16 | lGrey='\033[1;37m' |
| 17 | lCyan='\033[1;36m' |
| 18 | |
| 19 | wd=`pwd` |
| 20 | |
| 21 | # Validate that vagrant is installed. |
| 22 | echo -e "${lBlue}Ensure that ${lCyan}vagrant${lBlue} is installed${NC}" |
| 23 | vInst=`which vagrant` |
| 24 | |
| 25 | if [ -z "$vInst" ]; then |
| 26 | wget https://releases.hashicorp.com/vagrant/1.9.5/vagrant_1.9.5_x86_64.deb |
| 27 | sudo dpkg -i vagrant_1.8.5_x86_64.deb |
| 28 | rm vagrant_1.8.5_x86_64.deb |
| 29 | fi |
| 30 | unset vInst |
| 31 | |
| 32 | # Validate that ansible is installed |
| 33 | echo -e "${lBlue}Ensure that ${lCyan}ansible${lBlue} is installed${NC}" |
| 34 | aInst=`which ansible` |
| 35 | |
| 36 | if [ -z "$aInst" ]; then |
| 37 | sudo apt-get install software-properties-common |
| 38 | sudo apt-add-repository ppa:ansible/ansible |
| 39 | sudo apt-get update |
| 40 | sudo apt-get install ansible |
| 41 | fi |
| 42 | unset vInst |
| 43 | |
| 44 | # Ensure that the voltha VM is running so that images can be secured |
| 45 | echo -e "${lBlue}Ensure that the ${lCyan}voltha VM${lBlue} is running${NC}" |
| 46 | vVM=`virsh list | grep voltha_voltha` |
| 47 | |
| 48 | if [ -z "$vVM" ]; then |
| 49 | ./BuildVoltha.sh |
| 50 | fi |
| 51 | |
| 52 | # Verify if this is intended to be a test environment, if so start 3 VMs |
| 53 | # to emulate the production installation cluster. |
| 54 | if [ $# -eq 1 -a "$1" == "test" ]; then |
| 55 | echo -e "${lBlue}Testing, create the ${lCyan}ha-serv${lBlue} VMs${NC}" |
| 56 | vagrant destroy ha-serv{1,2,3} |
| 57 | vagrant up ha-serv{1,2,3} |
| 58 | ./devSetHostList.sh |
| 59 | else |
| 60 | rm -fr .test |
| 61 | fi |
| 62 | |
| 63 | # Shut down the domain in case it's running. |
| 64 | echo -e "${lBlue}Shut down the ${lCyan}$iVmName${lBlue} VM if running${NC}" |
| 65 | ctr=0 |
| 66 | vStat=`virsh list | grep $iVmName` |
| 67 | while [ ! -z "$vStat" ]; |
| 68 | do |
| 69 | virsh shutdown $iVmName |
| 70 | echo "Waiting for $iVmName to shut down" |
| 71 | sleep 2 |
| 72 | vStat=`virsh list | grep $iVmName` |
| 73 | ctr=`expr $ctr + 1` |
| 74 | if [ $ctr -eq $shutdownTimeout ]; then |
| 75 | echo -e "${red}Tired of waiting, forcing the VM off${NC}" |
| 76 | virsh destroy $iVmName |
| 77 | vStat=`virsh list | grep $iVmName` |
| 78 | fi |
| 79 | done |
| 80 | |
| 81 | |
| 82 | # Delete the VM and ignore any errors should they occur |
| 83 | echo -e "${lBlue}Undefining the ${lCyan}$iVmName${lBlue} domain${NC}" |
| 84 | virsh undefine $iVmName |
| 85 | |
| 86 | # Remove the associated volume |
| 87 | echo -e "${lBlue}Removing the ${lCyan}$iVmName.qcow2${lBlue} volume${NC}" |
| 88 | virsh vol-delete "${iVmName}.qcow2" default |
| 89 | |
| 90 | # Clone the base vanilla ubuntu install |
| 91 | echo -e "${lBlue}Cloning the ${lCyan}$baseImage.qcow2${lBlue} to ${lCyan}$iVmName.qcow2${NC}" |
| 92 | virsh vol-clone "${baseImage}.qcow2" "${iVmName}.qcow2" default |
| 93 | |
| 94 | # Create the xml file and define the VM for virsh |
| 95 | echo -e "${lBlue}Defining the ${lCyan}$iVmName${lBlue} virtual machine${NC}" |
| 96 | cat vmTemplate.xml | sed -e "s/{{VMName}}/$iVmName/g" | sed -e "s/{{VMNetwork}}/$iVmNetwork/g" > tmp.xml |
| 97 | |
| 98 | virsh define tmp.xml |
| 99 | |
| 100 | rm tmp.xml |
| 101 | |
| 102 | # Start the VMm, if it's already running just ignore the error |
| 103 | echo -e "${lBlue}Starting the ${lCyan}$iVmName${lBlue} virtual machine${NC}" |
| 104 | virsh start $iVmName > /dev/null 2>&1 |
| 105 | |
| 106 | # Generate a keypair for communicating with the VM |
| 107 | echo -e "${lBlue}Generating the key-pair for communication with the VM${NC}" |
| 108 | ssh-keygen -f ./key -t rsa -N '' |
| 109 | |
| 110 | mv key key.pem |
| 111 | |
| 112 | # Clone BashLogin.sh and add the public key to it for later use. |
| 113 | echo -e "${lBlue}Creating the pre-configuration script${NC}" |
| 114 | cp BashLogin.sh bash_login.sh |
| 115 | echo "cat <<HERE > .ssh/authorized_keys" >> bash_login.sh |
| 116 | cat key.pub >> bash_login.sh |
| 117 | echo "HERE" >> bash_login.sh |
| 118 | echo "chmod 400 .ssh/authorized_keys" >> bash_login.sh |
| 119 | echo "rm .bash_login" >> bash_login.sh |
| 120 | echo "logout" >> bash_login.sh |
| 121 | rm key.pub |
| 122 | |
| 123 | |
| 124 | |
| 125 | # Get the VM's IP address |
| 126 | ctr=0 |
| 127 | ipAddr="" |
| 128 | while [ -z "$ipAddr" ]; |
| 129 | do |
| 130 | echo -e "${lBlue}Waiting for the VM's IP address${NC}" |
| 131 | ipAddr=`virsh domifaddr $iVmName | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'` |
| 132 | sleep 3 |
| 133 | if [ $ctr -eq $ipTimeout ]; then |
| 134 | echo -e "${red}Tired of waiting, please adjust the ipTimeout if the VM is slow to start${NC}" |
| 135 | exit |
| 136 | fi |
| 137 | ctr=`expr $ctr + 1` |
| 138 | done |
| 139 | |
| 140 | echo -e "${lBlue}The IP address is: ${lCyan}$ipAddr${NC}" |
| 141 | |
| 142 | # Copy the pre-config file to the VM |
| 143 | echo -e "${lBlue}Transfering pre-configuration script to the VM${NC}" |
| 144 | scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no bash_login.sh vinstall@$ipAddr:.bash_login |
| 145 | |
| 146 | rm bash_login.sh |
| 147 | |
| 148 | # Run the pre-config file on the VM |
| 149 | echo -e "${lBlue}Running the pre-configuration script on the VM${NC}" |
| 150 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no vinstall@$ipAddr |
| 151 | |
| 152 | # Install python which is required for ansible |
| 153 | echo -e "${lBlue}Installing python${NC}" |
| 154 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get update |
| 155 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get -y install python |
| 156 | |
| 157 | # Make sure the VM is up-to-date |
| 158 | echo -e "${lBlue}Ensure that the VM is up-to-date${NC}" |
| 159 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get update |
| 160 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem vinstall@$ipAddr sudo apt-get -y upgrade |
| 161 | |
| 162 | |
| 163 | |
| 164 | # Copy the apt repository to the VM because it's way too slow using ansible |
| 165 | #echo -e "${red}NOT COPYING${lBlue} the apt-repository to the VM, ${red}TESTING ONLY REMOVE FOR PRODUCTION${NC}" |
| 166 | #echo -e "${lBlue}Copy the apt-repository to the VM${NC}" |
| 167 | #scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i key.pem -r apt-mirror vinstall@$ipAddr:apt-mirror |
| 168 | |
| 169 | # Create the docker.cfg file in the ansible tree using the VMs IP address |
| 170 | echo 'DOCKER_OPTS="$DOCKER_OPTS --insecure-registry '$ipAddr':5000 -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --registry-mirror=http://'$ipAddr':5001"' > ansible/roles/docker/templates/docker.cfg |
| 171 | |
| 172 | # Add the voltha vm's information to the ansible tree |
| 173 | echo -e "${lBlue}Add the voltha vm and key to the ansible accessible hosts${NC}" |
| 174 | vIpAddr=`virsh domifaddr voltha_voltha | tail -n +3 | awk '{ print $4 }' | sed -e 's~/.*~~'` |
| 175 | echo "[voltha]" > ansible/hosts/voltha |
| 176 | echo $vIpAddr >> ansible/hosts/voltha |
| 177 | echo "ansible_ssh_private_key_file: $wd/../.vagrant/machines/voltha/libvirt/private_key" > ansible/host_vars/$vIpAddr |
| 178 | |
| 179 | |
| 180 | # Prepare to launch the ansible playbook to configure the installer VM |
| 181 | echo -e "${lBlue}Prepare to launch the ansible playbook to configure the VM${NC}" |
| 182 | echo "[installer]" > ansible/hosts/installer |
| 183 | echo "$ipAddr" >> ansible/hosts/installer |
| 184 | echo "ansible_ssh_private_key_file: $wd/key.pem" > ansible/host_vars/$ipAddr |
| 185 | |
| 186 | # Launch the ansible playbook |
| 187 | echo -e "${lBlue}Launching the ansible playbook${NC}" |
| 188 | ansible-playbook ansible/volthainstall.yml -i ansible/hosts/installer |
| 189 | ansible-playbook ansible/volthainstall.yml -i ansible/hosts/voltha |