Andy Bavier | 99c11d3 | 2016-09-14 17:21:20 -0400 | [diff] [blame] | 1 | #!/usr/bin/env bash |
| 2 | |
| 3 | set -e |
| 4 | set -x |
| 5 | |
| 6 | CORDDIR=~/opencord |
| 7 | VMDIR=/cord/build/ |
| 8 | CONFIG=config/cord_in_a_box.yml |
| 9 | |
| 10 | function cleanup_from_previous_test() { |
| 11 | set +e |
| 12 | |
| 13 | echo "## Cleanup ##" |
| 14 | |
| 15 | echo "Shutting down all Vagrant VMs" |
| 16 | cd $CORDDIR/build |
| 17 | vagrant destroy |
| 18 | |
| 19 | echo "Destroying juju environment" |
| 20 | juju destroy-environment --force -y manual |
| 21 | |
| 22 | VMS=$( sudo uvt-kvm list ) |
| 23 | for VM in $VMS |
| 24 | do |
| 25 | echo "Destroying $VM" |
| 26 | sudo uvt-kvm destroy $VM |
| 27 | done |
| 28 | |
| 29 | echo "Cleaning up files" |
| 30 | rm -rf ~/.juju |
| 31 | rm -f ~/.ssh/known_hosts |
| 32 | rm -rf ~/platform-install |
| 33 | rm -rf ~/cord_apps |
| 34 | rm -rf ~/.ansible_async |
| 35 | |
| 36 | echo "Removing MAAS" |
| 37 | [ -e /usr/local/bin/remove-maas-components ] && /usr/local/bin/remove-maas-components |
| 38 | |
| 39 | echo "Remove apt-cacher-ng" |
| 40 | sudo apt-get remove -y apt-cacher-ng |
| 41 | sudo rm -f /etc/apt/apt.conf.d/02apt-cacher-ng |
| 42 | |
| 43 | echo "Removing mgmtbr" |
| 44 | ifconfig mgmtbr && sudo ip link set dev mgmtbr down && sudo brctl delbr mgmtbr |
| 45 | |
| 46 | echo "Removing Juju packages" |
| 47 | sudo apt-get remove --purge -y $(dpkg --get-selections | grep "juju\|nova\|neutron\|keystone\|glance" | awk '{print $1}') |
| 48 | sudo apt-get autoremove -y |
| 49 | |
| 50 | rm -rf $CORDDIR |
| 51 | |
| 52 | set -e |
| 53 | } |
| 54 | |
| 55 | function bootstrap() { |
| 56 | cd ~ |
| 57 | sudo apt-get update |
| 58 | [ -e vagrant_1.8.5_x86_64.deb ] || wget https://releases.hashicorp.com/vagrant/1.8.5/vagrant_1.8.5_x86_64.deb |
| 59 | sudo dpkg -i vagrant_1.8.5_x86_64.deb |
| 60 | sudo apt-get -y install qemu-kvm libvirt-bin libvirt-dev curl |
| 61 | |
| 62 | [ -e ~/.ssh/id_rsa ] || ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa |
| 63 | cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys |
| 64 | |
| 65 | # Log into the local node once to get host key |
| 66 | ssh -o StrictHostKeyChecking=no localhost "ls > /dev/null" |
| 67 | |
| 68 | USER=$(whoami) |
| 69 | sudo adduser $USER libvirtd |
| 70 | |
| 71 | sudo curl -o /usr/local/bin/repo https://storage.googleapis.com/git-repo-downloads/repo |
| 72 | sudo chmod a+x /usr/local/bin/repo |
| 73 | |
| 74 | if [ ! -d "$CORDDIR" ] |
| 75 | then |
| 76 | mkdir $CORDDIR && cd $CORDDIR |
| 77 | git config --global user.name 'Test User' |
| 78 | git config --global user.email 'test@null.com' |
| 79 | git config --global color.ui false |
| 80 | |
| 81 | repo init -u https://gerrit.opencord.org/manifest -b master -g build,onos |
| 82 | repo sync |
| 83 | |
| 84 | cd $CORDDIR/build |
| 85 | sed -i "s/user: 'ubuntu'/user: \"$USER\"/" $CONFIG |
| 86 | |
| 87 | # Set external interface in config file |
| 88 | IFACE=$(route | grep default | awk '{print $8}' ) |
| 89 | sed -i "s/eth0/$IFACE/" $CONFIG |
| 90 | fi |
| 91 | |
| 92 | cd $CORDDIR/build |
Andy Bavier | 45e30bb | 2016-10-07 15:45:18 -0400 | [diff] [blame^] | 93 | vagrant plugin install vagrant-libvirt --plugin-version 0.0.35 |
Andy Bavier | 99c11d3 | 2016-09-14 17:21:20 -0400 | [diff] [blame] | 94 | vagrant plugin install vagrant-mutate |
| 95 | vagrant box list ubuntu/trusty64 | grep virtualbox || vagrant box add ubuntu/trusty64 |
| 96 | vagrant box list ubuntu/trusty64 | grep libvirt || vagrant mutate ubuntu/trusty64 libvirt --input-provider virtualbox |
| 97 | } |
| 98 | |
| 99 | function cloudlab_setup() { |
| 100 | if [ -e /usr/testbed/bin/mkextrafs ] |
| 101 | then |
| 102 | sudo /usr/testbed/bin/mkextrafs -r /dev/sdb -qf "/var/lib/libvirt/images/" |
| 103 | |
| 104 | cd $CORDDIR/build |
| 105 | SRC="#- 'on_cloudlab=True'" |
| 106 | DST="- 'on_cloudlab=True'" |
| 107 | sed -i "s/$SRC/$DST/" config/cord_in_a_box.yml |
| 108 | fi |
| 109 | } |
| 110 | |
| 111 | function unfortunate_hacks() { |
| 112 | cd $CORDDIR/build |
| 113 | |
| 114 | # Disable interface rename during MAAS provision |
| 115 | sed -i 's/"INTERFACE_CONFIG=1"/"INTERFACE_CONFIG=0"/' maas/roles/maas/templates/automation-compose.yml.j2 |
| 116 | |
| 117 | # Don't require fabric_ip |
| 118 | SRC="fabric_ip | mandatory" |
| 119 | DST="fabric_ip | default('manual')" |
| 120 | sed -i "s/$SRC/$DST/" maas/roles/compute-node/vars/main.yml |
| 121 | |
| 122 | # Allow compute nodes to PXE boot from mgmtbr |
| 123 | sed -i "s/@type='udp']/@type='udp' or @type='bridge']/" \ |
| 124 | ~/.vagrant.d/gems/gems/vagrant-libvirt-0.0.35/lib/vagrant-libvirt/action/set_boot_order.rb |
| 125 | |
| 126 | # Should get these keys inside the VM in another way |
| 127 | cp ~/.ssh/id_rsa* $CORDDIR |
| 128 | } |
| 129 | |
| 130 | function corddev_up() { |
| 131 | cd $CORDDIR/build |
| 132 | |
| 133 | sudo su $USER -c 'vagrant up corddev --provider libvirt' |
| 134 | } |
| 135 | |
| 136 | function install_head_node() { |
| 137 | cd $CORDDIR/build |
| 138 | |
| 139 | # Network setup to install physical server as head node |
| 140 | ip addr list dev virbr2 | grep 10.100.198.201 || sudo ip addr add dev virbr2 10.100.198.201 |
| 141 | ifconfig mgmtbr || sudo brctl addbr mgmtbr |
| 142 | sudo ifconfig mgmtbr 10.1.0.1/24 up |
| 143 | |
| 144 | # User has been added to the libvirtd group, but su $USER to be safe |
| 145 | sudo su $USER -c "vagrant ssh corddev -c \"cp /cord/id_rsa* ~/.ssh\"" |
| 146 | sudo su $USER -c "vagrant ssh corddev -c \"cd /cord/build; ./gradlew fetch\"" |
| 147 | sudo su $USER -c "vagrant ssh corddev -c \"cd /cord/build; ./gradlew buildImages\"" |
| 148 | sudo su $USER -c "vagrant ssh corddev -c \"cd /cord/build; ./gradlew -PdeployConfig=$VMDIR/$CONFIG -PtargetReg=10.100.198.201:5000 publish\"" |
| 149 | sudo su $USER -c "vagrant ssh corddev -c \"cd /cord/build; ./gradlew -PdeployConfig=$VMDIR/$CONFIG deploy\"" |
| 150 | } |
| 151 | |
| 152 | function set_up_maas_user() { |
| 153 | # Set up MAAS user to restart nodes via libvirt |
| 154 | sudo mkdir -p /home/maas |
| 155 | sudo chown maas:maas /home/maas |
| 156 | sudo chsh -s /bin/bash maas |
| 157 | sudo adduser maas libvirtd |
| 158 | |
| 159 | sudo su maas -c 'cp ~/.ssh/id_rsa.pub ~/.ssh/authorized_keys' |
| 160 | } |
| 161 | |
| 162 | function add_compute_node() { |
| 163 | cd $CORDDIR/build |
| 164 | sudo su $USER -c 'vagrant up compute_node --provider libvirt' |
| 165 | |
| 166 | # Sign into MAAS |
| 167 | KEY=$(sudo maas-region-admin apikey --username=cord) |
| 168 | maas login cord http://localhost/MAAS/api/1.0 $KEY |
| 169 | |
| 170 | NODEID=$(maas cord nodes list|jq -r '.[] | select(.status == 0).system_id') |
| 171 | until [ "$NODEID" ]; do |
| 172 | echo "Waiting for the compute node to transition to NEW state" |
| 173 | sleep 15 |
| 174 | NODEID=$(maas cord nodes list|jq -r '.[] | select(.status == 0).system_id') |
| 175 | done |
| 176 | |
| 177 | # Add remote power state |
| 178 | maas cord node update $NODEID power_type="virsh" \ |
| 179 | power_parameters_power_address="qemu+ssh://maas@localhost/system" \ |
| 180 | power_parameters_power_id="build_compute_node" |
| 181 | |
| 182 | STATUS=$(sudo /usr/local/bin/get-node-prov-state |jq ".[] | select(.id == \"$NODEID\").status") |
| 183 | until [ "$STATUS" == "2" ]; do |
| 184 | if [ "$STATUS" == "3" ]; then |
| 185 | echo "*** ERROR in provisioning!" |
| 186 | echo "*** Check /etc/maas/ansible/logs/$NODEID.log" |
| 187 | exit 1 |
| 188 | fi |
| 189 | echo "Waiting for the compute node to be fully provisioned" |
| 190 | sleep 60 |
| 191 | STATUS=$(sudo /usr/local/bin/get-node-prov-state |jq ".[] | select(.id == \"$NODEID\").status") |
| 192 | done |
| 193 | |
| 194 | echo "" |
| 195 | echo "compute_node is fully provisioned!" |
| 196 | } |
| 197 | |
| 198 | function run_e2e_test () { |
| 199 | cd $CORDDIR/build |
| 200 | |
| 201 | # User has been added to the libvirtd group, but su $USER to be safe |
| 202 | sudo su $USER -c "vagrant ssh corddev -c \"cd /cord/build; ./gradlew -PdeployConfig=$VMDIR/$CONFIG postDeployTests\"" |
| 203 | } |
| 204 | |
| 205 | function run_diagnostics() { |
| 206 | echo "*** COLLECTING DIAGNOSTIC INFO NOT CURRENTLY IMPLEMENTED" |
| 207 | # Need to fix up inventory to collect info from compute nodes |
| 208 | # Using juju-ansible is one possibility |
| 209 | #echo "*** COLLECTING DIAGNOSTIC INFO - check ~/diag-* on the head node" |
| 210 | #ansible-playbook -i $INVENTORY cord-diag-playbook.yml |
| 211 | } |
| 212 | |
| 213 | # Parse options |
| 214 | RUN_TEST=0 |
| 215 | SETUP_BRANCH="master" |
| 216 | DIAGNOSTICS=0 |
| 217 | CLEANUP=0 |
| 218 | |
| 219 | while getopts "b:cdehi:p:r:ts:" opt; do |
| 220 | case ${opt} in |
| 221 | b ) XOS_BRANCH=$OPTARG |
| 222 | ;; |
| 223 | c ) CLEANUP=1 |
| 224 | ;; |
| 225 | d ) DIAGNOSTICS=1 |
| 226 | ;; |
| 227 | h ) echo "Usage:" |
| 228 | echo " $0 install OpenStack and prep XOS and ONOS VMs [default]" |
| 229 | echo " $0 -b <branch> checkout <branch> of the xos git repo" |
| 230 | echo " $0 -c cleanup from previous test" |
| 231 | echo " $0 -d run diagnostic collector" |
| 232 | echo " $0 -h display this help message" |
| 233 | echo " $0 -t do install, bring up cord-pod configuration, run E2E test" |
| 234 | exit 0 |
| 235 | ;; |
| 236 | t ) RUN_TEST=1 |
| 237 | ;; |
| 238 | \? ) echo "Invalid option: -$OPTARG" |
| 239 | exit 1 |
| 240 | ;; |
| 241 | esac |
| 242 | done |
| 243 | |
| 244 | # What to do |
| 245 | if [[ $CLEANUP -eq 1 ]] |
| 246 | then |
| 247 | cleanup_from_previous_test |
| 248 | fi |
| 249 | |
| 250 | set -e |
| 251 | |
| 252 | bootstrap |
| 253 | cloudlab_setup |
| 254 | unfortunate_hacks |
| 255 | corddev_up |
| 256 | install_head_node |
| 257 | set_up_maas_user |
| 258 | add_compute_node |
| 259 | |
| 260 | if [[ $RUN_TEST -eq 1 ]] |
| 261 | then |
| 262 | run_e2e_test |
| 263 | fi |
| 264 | |
| 265 | if [[ $DIAGNOSTICS -eq 1 ]] |
| 266 | then |
| 267 | run_diagnostics |
| 268 | fi |
| 269 | |
| 270 | exit 0 |