Install the CORD-in-a-Box head node into the 'prod' VM

Change-Id: I121130e21ae4268a9dbc9d0a6fb8ff8095ffdcc7
diff --git a/scripts/cord-in-a-box.sh b/scripts/cord-in-a-box.sh
index 5eecc1c..7c475da 100755
--- a/scripts/cord-in-a-box.sh
+++ b/scripts/cord-in-a-box.sh
@@ -8,48 +8,15 @@
 SSHCONFIG=~/.ssh/config
 
 function cleanup_from_previous_test() {
-  set +e
-
   echo "## Cleanup ##"
 
-  echo "Shutting down all Vagrant VMs"
+  echo "Destroying all Vagrant VMs"
   cd $CORDDIR/build
   vagrant destroy
 
-  echo "Destroying juju environment"
-  juju destroy-environment --force -y manual
-
-  VMS=$( sudo uvt-kvm list )
-  for VM in $VMS
-  do
-    echo "Destroying $VM"
-    sudo uvt-kvm destroy $VM
-  done
-
-  echo "Cleaning up files"
-  rm -rf ~/.juju
-  rm -f ~/.ssh/known_hosts
-  rm -rf ~/platform-install
-  rm -rf ~/cord_apps
-  rm -rf ~/.ansible_async
-
-  echo "Removing MAAS"
-  [ -e  /usr/local/bin/remove-maas-components ] && /usr/local/bin/remove-maas-components
-
-  echo "Remove apt-cacher-ng"
-  sudo apt-get remove -y apt-cacher-ng
-  sudo rm -f /etc/apt/apt.conf.d/02apt-cacher-ng
-
-  echo "Removing mgmtbr"
-  ifconfig mgmtbr && sudo ip link set dev mgmtbr down && sudo brctl delbr mgmtbr
-
-  echo "Removing Juju packages"
-  sudo apt-get remove --purge -y $(dpkg --get-selections | grep "juju\|nova\|neutron\|keystone\|glance" | awk '{print $1}')
-  sudo apt-get autoremove -y
-
+  echo "Removing $CORDDIR"
+  cd ~
   rm -rf $CORDDIR
-
-  set -e
 }
 
 function bootstrap() {
@@ -60,10 +27,6 @@
   sudo apt-get -y install qemu-kvm libvirt-bin libvirt-dev curl nfs-kernel-server git build-essential
 
   [ -e ~/.ssh/id_rsa ] || ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
-  cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
-
-  # Log into the local node once to get host key
-  ssh -o StrictHostKeyChecking=no localhost "ls > /dev/null"
 
   USER=$(whoami)
   sudo adduser $USER libvirtd
@@ -86,15 +49,6 @@
       echo "checking out opencord gerrit branch: $gerrit_branch"
       repo download ${gerrit_branch/:/ }
     done
-
-    cd $CORDDIR/build
-    sed -i "s/user: 'ubuntu'/user: \"$USER\"/" $CONFIG
-
-    # Set external interface in config file
-    IFACE=$(route | grep default | awk '{print $8}' )
-    SRC="'eth0'"
-    DST="'"$IFACE"'"
-    sed -i "s/$SRC/$DST/" $CONFIG
   fi
 
   cd $CORDDIR/build
@@ -119,116 +73,70 @@
     [ -d /var/lib/libvirt/images/ ] && [ ! -h /var/lib/libvirt/images ] && sudo rmdir /var/lib/libvirt/images
 
     sudo mkdir -p /mnt/extra/libvirt_images
-    sudo mkdir -p /mnt/extra/docker
-    sudo mkdir -p /mnt/extra/docker-registry
-    [ ! -e /var/lib/libvirt/images ] && sudo ln -s /mnt/extra/libvirt_images /var/lib/libvirt/images
-    [ ! -e /var/lib/docker ] && sudo ln -s /mnt/extra/docker /var/lib/docker
-    [ ! -e /docker-registry ] && sudo ln -s /mnt/extra/docker-registry /docker-registry
-
-    cd $CORDDIR/build
-    SRC="#- 'on_cloudlab=True'"
-    DST="- 'on_cloudlab=True'"
-    sed -i "s/$SRC/$DST/" config/cord_in_a_box.yml
+    if [ ! -e /var/lib/libvirt/images ]
+    then
+      sudo ln -s /mnt/extra/libvirt_images /var/lib/libvirt/images
+    fi
   fi
 }
 
-function unfortunate_hacks() {
-  cd $CORDDIR/build
-
-  # Allow compute nodes to PXE boot from mgmtbr
-  sed -i "s/@type='udp']/@type='udp' or @type='bridge']/" \
-    ~/.vagrant.d/gems/gems/vagrant-libvirt-0.0.35/lib/vagrant-libvirt/action/set_boot_order.rb
-}
-
-function corddev_up() {
+function vagrant_vms_up() {
   cd $CORDDIR/build
 
   sudo su $USER -c 'vagrant up corddev --provider libvirt'
+  sudo su $USER -c 'vagrant up prod --provider libvirt'
 
   # This is a workaround for a weird issue with ARP cache timeout breaking 'vagrant ssh'
   # It allows SSH'ing to the machine via 'ssh corddev'
-  sudo su $USER -c "grep corddev $SSHCONFIG || vagrant ssh-config corddev >> $SSHCONFIG"
+  sudo su $USER -c "vagrant ssh-config corddev prod > $SSHCONFIG"
+
+  scp ~/.ssh/id_rsa* corddev:.ssh
+  ssh corddev "chmod go-r ~/.ssh/id_rsa"
 }
 
 function install_head_node() {
   cd $CORDDIR/build
 
-  # Network setup to install physical server as head node
-  BRIDGE=$( route -n | grep 10.100.198.0 | awk '{print $8}' )
-  ip addr list dev $BRIDGE | grep 10.100.198.201 || sudo ip addr add dev $BRIDGE 10.100.198.201
-  ifconfig mgmtbr || sudo brctl addbr mgmtbr
-  sudo ifconfig mgmtbr 10.1.0.1/24 up
-
   # SSH config saved earlier allows us to connect to VM without running 'vagrant'
-  scp ~/.ssh/id_rsa* corddev:.ssh
   ssh corddev "cd /cord/build; ./gradlew fetch"
   ssh corddev "cd /cord/build; ./gradlew buildImages"
-  ssh corddev "cd /cord/build; ./gradlew -PdeployConfig=$VMDIR/$CONFIG -PtargetReg=10.100.198.201:5000 publish"
+  ssh corddev "cd /cord/build; ping -c 3 prod; ./gradlew -PdeployConfig=$VMDIR/$CONFIG -PtargetReg=10.100.198.201:5000 publish"
   ssh corddev "cd /cord/build; ./gradlew -PdeployConfig=$VMDIR/$CONFIG deploy"
-
-  # SSH config was overwritten by the deploy step
-  # Will go away when head node runs in 'prod' VM
-  sudo su $USER -c "grep corddev $SSHCONFIG || vagrant ssh-config corddev >> $SSHCONFIG"
 }
 
 function set_up_maas_user() {
-  # Set up MAAS user to restart nodes via libvirt
-  sudo mkdir -p /home/maas
-  sudo chown maas:maas /home/maas
-  sudo chsh -s /bin/bash maas
+  # Set up MAAS user on server to restart nodes via libvirt
+  grep maas /etc/passwd || sudo useradd -m maas
   sudo adduser maas libvirtd
 
-  sudo su maas -c 'cp ~/.ssh/id_rsa.pub ~/.ssh/authorized_keys'
-}
+  # Copy generated public key to maas user's authorized_keys
+  sudo su maas -c "mkdir -p ~/.ssh"
+  sudo cp $HOME/.ssh/id_rsa.pub ~maas/.ssh/authorized_keys
+  sudo chown maas:maas ~maas/.ssh/authorized_keys
 
-FIRST_COMPUTE_NODE=true
+  # Copy generated private key to maas user's home dir in prod VM
+  scp $HOME/.ssh/id_rsa prod:/tmp
+  ssh prod "sudo mkdir -p ~maas/.ssh"
+  ssh prod "sudo cp /tmp/id_rsa ~maas/.ssh/id_rsa"
+  ssh prod "sudo chown -R maas:maas ~maas/.ssh"
+}
 
 function add_compute_node() {
   echo add_compute_node: $1 $2
-    
+
   cd $CORDDIR/build
   sudo su $USER -c "vagrant up $1 --provider libvirt"
 
-  if [ "$FIRST_COMPUTE_NODE" == true ]; then
-    # Change MAC address of bridge to match cord-pod service profile
-    # This change won't survive a reboot
-    BRIDGE=$( route -n | grep 10.6.1.0 | awk '{print $8}' )
-    sudo ifconfig $BRIDGE hw ether 02:42:0a:06:01:01
+  # Change MAC address of bridge to match cord-pod service profile
+  # This change won't survive a reboot
+  BRIDGE=$( route -n | grep 10.6.1.0 | awk '{print $8}' )
+  sudo ifconfig $BRIDGE hw ether 02:42:0a:06:01:01
 
-    # Add gateway IP addresses to $BRIDGE for vsg and exampleservice tests
-    # This change won't survive a reboot
-    sudo ip address add 10.6.1.129 dev $BRIDGE
-    sudo ip address add 10.6.1.193 dev $BRIDGE
+  ip addr list | grep 10.6.1.129 || sudo ip address add 10.6.1.129 dev $BRIDGE
+  ip addr list | grep 10.6.1.193 || sudo ip address add 10.6.1.193 dev $BRIDGE
 
-    FIRST_COMPUTE_NODE=false
-  fi
-
-  # Sign into MAAS
-  KEY=$(sudo maas-region-admin apikey --username=cord)
-  maas login cord http://localhost/MAAS/api/1.0 $KEY
-
-  NODEID=$(maas cord nodes list|jq -r '.[] | select(.status == 0).system_id')
-  until [ "$NODEID" ]; do
-    echo "Waiting for the compute node to transition to NEW state"
-    sleep 15
-    NODEID=$(maas cord nodes list|jq -r '.[] | select(.status == 0).system_id')
-  done
-
-  # Add remote power state
-  maas cord node update $NODEID power_type="virsh" \
-    power_parameters_power_address="qemu+ssh://maas@localhost/system" \
-    power_parameters_power_id="$2"
-
-  STATUS=$(sudo /usr/local/bin/get-node-prov-state |jq ".[] | select(.id == \"$NODEID\").status")
-  until [ "$STATUS" == "2" ]; do
-    if [ "$STATUS" == "3" ]; then
-      echo "*** [WARNING] Possible error in node provisioning process"
-      echo "*** [WARNING] Check /etc/maas/ansible/logs/$NODEID.log"
-    fi
-    echo "Waiting for the compute node to be fully provisioned"
-    sleep 60
-    STATUS=$(sudo /usr/local/bin/get-node-prov-state |jq ".[] | select(.id == \"$NODEID\").status")
-  done
+  # Set up power cycling for the compute node and wait for it to be provisioned
+  ssh prod "cd /cord/build/ansible; ansible-playbook maas-provision.yml --extra-vars \"maas_user=maas vagrant_name=$2\""
 
   echo ""
   echo "compute_node is fully provisioned!"
@@ -237,7 +145,7 @@
 function run_e2e_test () {
   cd $CORDDIR/build
 
-  # User has been added to the libvirtd group, but su $USER to be safe
+  # User has been added to the lbvirtd group, but su $USER to be safe
   ssh corddev "cd /cord/build; ./gradlew -PdeployConfig=$VMDIR/$CONFIG postDeployTests"
 }
 
@@ -297,12 +205,9 @@
   cleanup_from_previous_test
 fi
 
-set -e
-
 bootstrap
 cloudlab_setup
-unfortunate_hacks
-corddev_up
+vagrant_vms_up
 
 if [[ $SETUP_ONLY -ne 0 ]]
 then
@@ -313,7 +218,7 @@
 install_head_node
 set_up_maas_user
 
-#Limiting the maximum number of compute nodes that can be supported to 2. If 
+#Limiting the maximum number of compute nodes that can be supported to 2. If
 #more than 2 compute nodes are needed, this script need to be enhanced to set
 #the environment variable NUM_COMPUTE_NODES before invoking the Vagrant commands
 if [[ $NUM_COMPUTE_NODES -gt 2 ]]