adding "new/official" Jenkinsfile

Change-Id: I79e2c7d484e4e929c4d5d8936026ad6a5ba077db
diff --git a/Jenkinsfile b/Jenkinsfile
new file mode 100644
index 0000000..7d78e14
--- /dev/null
+++ b/Jenkinsfile
@@ -0,0 +1,454 @@
+// Copyright 2017-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import groovy.json.JsonSlurperClassic
+
+def filename = 'manifest-${GERRIT_BRANCH}.xml'
+def manifestUrl = 'https://gerrit.opencord.org/manifest'
+deployment_config = null;
+pod_config = null;
+
+node ("${devNodeName}") {
+    timeout (time: 240) {
+        stage ("Clean-up previous build") {
+            sh "make -C build clean-all || true"
+            sh "rm -rf *"
+        }
+        stage ("Parse deployment configuration file") {
+            sh returnStdout: true, script: 'rm -rf ${configRepoBaseDir}'
+            sh returnStdout: true, script: 'git clone -b ${GERRIT_BRANCH} ${configRepoUrl}'
+            deployment_config = readYaml file: "${configRepoBaseDir}${configRepoFile}"
+            pod_config = readYaml file: "${configRepoBaseDir}${deployment_config.pod_config.file_name}"
+        }
+        stage ('Remove old head node from known hosts') {
+            sh "ssh-keygen -R ${deployment_config.head.ip}"
+        }
+        stage ('Checkout cord repo') {
+            checkout changelog: false, poll: false, scm: [$class: 'RepoScm', currentBranch: true, manifestBranch: params.branch, manifestRepositoryUrl: "${manifestUrl}", quiet: true]
+        }
+
+        try {
+            dir('build') {
+                stage ("Re-deploy head node") {
+                    maasOps: {
+                        sh "maas login maas http://${deployment_config.maas.ip}/MAAS/api/2.0 ${deployment_config.maas.api_key}"
+                        sh "maas maas machine release ${deployment_config.maas.head_system_id}"
+
+                        timeout(time: 15) {
+                            waitUntil {
+                               try {
+                                    sh "maas maas machine read ${deployment_config.maas.head_system_id} | grep Ready"
+                                    return true
+                                } catch (exception) {
+                                    return false
+                                }
+                            }
+                        }
+
+                        sh 'maas maas machines allocate'
+                        sh "maas maas machine deploy ${deployment_config.maas.head_system_id}"
+
+                        timeout(time: 30) {
+                            waitUntil {
+                               try {
+                                    sh "maas maas machine read ${deployment_config.maas.head_system_id} | grep Deployed"
+                                    return true
+                                } catch (exception) {
+                                    return false
+                                }
+                            }
+                        }
+                    }
+                }
+
+                stage ("Download CORD POD configuration") {
+                    sh "cd ../orchestration/profiles; git clone -b ${GERRIT_BRANCH} ${deployment_config.pod_config.repo_url} automation"
+                }
+
+                stage ("Generate CORD configuration") {
+                    sh "make PODCONFIG_PATH=../orchestration/profiles/automation/${deployment_config.pod_config.file_name} config"
+                }
+
+                if (deployment_config.fabric_switches != null) {
+                    stage("Reserve IPs for fabric switches") {
+                        for(int i=0; i < deployment_config.fabric_switches.size(); i++) {
+                            def str = createMACIPbindingStr("fabric", i+1,
+                                                           "${deployment_config.fabric_switches[i].mac}",
+                                                           "${deployment_config.fabric_switches[i].ip}")
+                            sh "echo $str >> maas/roles/maas/files/dhcpd.reservations"
+                        }
+                    }
+                }
+
+                stage ("Deploy") {
+                    sh "make build"
+                }
+
+                if (deployment_config.compute_nodes != null) {
+                    stage ("Power cycle compute nodes") {
+                        for(int i=0; i < deployment_config.compute_nodes.size(); i++) {
+                            sh "ipmitool -U ${deployment_config.compute_nodes[i].ipmi.user} -P ${deployment_config.compute_nodes[i].ipmi.pass} -H ${deployment_config.compute_nodes[i].ipmi.ip} power cycle"
+                        }
+                    }
+
+                    stage ("Wait for compute nodes to get deployed") {
+                        sh "ssh-keygen -f /home/${deployment_config.dev_node.user}/.ssh/known_hosts -R ${deployment_config.head.ip}"
+                        def cordApiKey = runHeadNodeCmd("sudo maas-region-admin apikey --username ${deployment_config.head.user}")
+                        runHeadNodeCmd("maas login pod-maas http://${deployment_config.head.ip}/MAAS/api/1.0 ${cordApiKey}")
+                        timeout(time: 90) {
+                            waitUntil {
+                                try {
+                                    num = runHeadNodeCmd("maas pod-maas nodes list | grep substatus_name | grep -i deployed | wc -l").trim()
+                                    return num.toInteger() == deployment_config.compute_nodes.size()
+                                } catch (exception) {
+                                    return false
+                                }
+                            }
+                        }
+                    }
+
+                    stage ("Wait for compute nodes to be provisioned") {
+                        timeout(time:45) {
+                            waitUntil {
+                                try {
+                                    num = runHeadNodeCmd("cord prov list | grep -i node | grep -i complete | wc -l").trim()
+                                    return num.toInteger() == deployment_config.compute_nodes.size()
+                                } catch (exception) {
+                                    return false
+                                }
+                            }
+                        }
+                    }
+                }
+
+                if (deployment_config.fabric_switches != null) {
+                    stage ("Wait for fabric switches to get deployed") {
+                        runFabricSwitchCmdAll("sudo onl-onie-boot-mode install")
+                        runFabricSwitchCmdAll("sudo reboot")
+                            // Ensure that switches get provisioned after ONIE reinstall.
+                            // Delete them if they were provisioned earlier.  If the switches are not
+                            // present in 'cord prov list', this command has no effect.
+                        for(int i=0; i < deployment_config.fabric_switches.size(); i++) {
+                            runHeadNodeCmd("cord prov delete ${deployment_config.fabric_switches[i].mac}")
+                        }
+                        timeout(time: 45) {
+                            waitUntil {
+                                try {
+                                    def harvestCompleted = runHeadNodeCmd("cord harvest list | grep -i fabric | wc -l").trim()
+                                    return harvestCompleted.toInteger() == deployment_config.fabric_switches.size()
+                                } catch (exception) {
+                                    return false
+                                }
+                            }
+                        }
+                    }
+
+                    stage ("Wait for fabric switches to be provisioned") {
+                        timeout(time:45) {
+                            waitUntil {
+                                try {
+                                    def provCompleted = 0
+                                    for(int i=0; i < deployment_config.fabric_switches.size(); i++) {
+                                        def count = runHeadNodeCmd("cord prov list | grep -i ${deployment_config.fabric_switches[i].ip} | grep -i complete | wc -l").trim()
+                                        provCompleted = provCompleted + count.toInteger()
+                                    }
+                                    return provCompleted == deployment_config.fabric_switches.size()
+                                } catch (exception) {
+                                    return false
+                                }
+                            }
+                        }
+                    }
+
+                    // Post installation configuration starts here
+                    fabricIpPrefix = pod_config.fabric_ip.split(/\.\d+\.\d+\/24/)[0]
+                    xosUser = "xosadmin@opencord.org"
+                    xosPass = runHeadNodeCmd("cat /opt/credentials/xosadmin@opencord.org").trim()
+                    stage ("Connect fabric switches and compute nodes to ONOS") {
+                        // Configure breakout ports
+                        for(int i=0; i < deployment_config.fabric_switches.size(); i++) {
+                            if (deployment_config.fabric_switches[i].breakout_ports != null) {
+                                for(int j=0; j < deployment_config.fabric_switches[i].breakout_ports.size(); j++) {
+                                    runFabricSwitchCmd("${deployment_config.fabric_switches[i].ip}",
+                                                       "${deployment_config.fabric_switches[i].user}",
+                                                       "${deployment_config.fabric_switches[i].pass}",
+                                                       "sed -i -e 's/#port_mode_${deployment_config.fabric_switches[i].breakout_ports[j]}=/port_mode_${deployment_config.fabric_switches[i].breakout_ports[j]}=/g' /etc/accton/ofdpa.conf")
+                                }
+                            }
+                        }
+                        connectFabricAndComputeToOnos()
+                    }
+                    stage ("Configure the compute nodes") {
+                        leafSwitchNum = 0
+                        for(int i=0; i < deployment_config.fabric_switches.size(); i++) {
+                            if(deployment_config.fabric_switches[i].containsKey("role")) {
+                                if(deployment_config.fabric_switches[i].role.toLowerCase().contains("leaf")) {
+                                    leafSwitchNum += 1
+                                }
+                            }
+                        }
+                        for(int i=1; i<=leafSwitchNum; i++) {
+                            // Figure out which compute node connects to which switch
+                            leafName = "leaf-" + i.toString()
+                            computeNames = getComputeNames(leafName)
+                            echo "Compute nodes connnected to " + leafName + " switch:"
+                            for(name in computeNames) { echo "${name}" }
+                            index = 1
+                            for(name in computeNames) {
+                                if(i>1) {
+                                    //Update fabric IP of compute nodes
+                                    index += 1
+                                    fabricIp = runComputeNodeCmd("${name}", "ip a | grep -o '${fabricIpPrefix}.[1-9][0-9]*.[0-9]*/24'").trim()
+                                    if (fabricIp != "") {
+                                        nodeId = sh(returnStdout: true, script: "curl -u ${xosUser}:${xosPass} -X GET http://${deployment_config.head.ip}/xosapi/v1/core/nodes | jq '.[\"items\"][] | select(.dataPlaneIp==\"${fabricIp}\") | .id'").trim()
+                                        newFabricIp = "${fabricIpPrefix}." + i.toString() + "." + index.toString() + "/24"
+                                        out = sh(returnStdout: true, script: "curl -u ${xosUser}:${xosPass} -X PUT -d '{\"dataPlaneIp\":\"${newFabricIp}\"}' http://${deployment_config.head.ip}/xosapi/v1/core/nodes/${nodeId}").trim()
+                                        // Wait until the new fabric IP gets configured
+                                        timeout(time: 5) {
+                                            waitUntil {
+                                                try {
+                                                    num = runComputeNodeCmd("${name}", "ip a | grep " + newFabricIp + " | wc -l").trim()
+                                                    return num.toInteger() == 1
+                                                } catch (exception) {
+                                                    return false
+                                                }
+                                            }
+                                        }
+                                    }
+                                    else echo "Cannot find fabric IP matching pattern ${fabricIpPrefix}.[1-9][0-9]*.[0-9]*"
+                                }
+                                //Add routes to fabric subnets
+                                for(int j=1; j<=leafSwitchNum; j++) {
+                                    if(j!=i) {
+                                        runComputeNodeCmd("${name}", "sudo ip route add ${fabricIpPrefix}." + j.toString() + ".0/24 via ${fabricIpPrefix}." + i.toString() + ".254 || echo route already exists")
+                                    }
+                                }
+                            }
+                        }
+                    }
+                    stage ("Generate and load network configuration") {
+                        // Reconnect compute nodes to update the fabric IP in ONOS
+                        connectFabricAndComputeToOnos()
+                        // Refresh fabric configurations
+                        sh "make fabric-refresh"
+                    }
+                }
+            }
+
+            if (deployment_config.make_release == true) {
+                stage ("Trigger Build") {
+                    url = 'https://jenkins.opencord.org/job/release-build/job/' + params.branch + '/build'
+                    httpRequest authentication: 'auto-release', httpMode: 'POST', url: url, validResponseCodes: '201'
+                }
+            }
+
+            currentBuild.result = 'SUCCESS'
+        } catch (err) {
+            currentBuild.result = 'FAILURE'
+            step([$class: 'Mailer', notifyEveryUnstableBuild: true, recipients: "${notificationEmail}", sendToIndividuals: false])
+        }
+        echo "RESULT: ${currentBuild.result}"
+    }
+}
+
+/**
+ * Returns a string used to bind IPs and MAC addresses, substituting the values
+ * given.
+ *
+ * @param word    the word used to generate the host name
+ * @param counter the counter used to generate the host name
+ * @param mac     the MAC address to substitute
+ * @param ip      the IP address to substitute
+ */
+def createMACIPbindingStr(word, counter, mac, ip) {
+    return """host ${word}-${counter} {'\n'hardware ethernet ${mac}';''\n'fixed-address ${ip}';''\n'}"""
+}
+
+/**
+ * Runs a command on a remote host using sshpass.
+ *
+ * @param ip      the node IP address
+ * @param user    the node user name
+ * @param pass    the node password
+ * @param command the command to run
+ * @param sshArgs arguments for the ssh command
+ * @return the output of the command
+ */
+def runCmd(ip, user, pass, command, sshArgs="") {
+    return sh(returnStdout: true, script: "sshpass -p ${pass} ssh ${sshArgs} -oStrictHostKeyChecking=no -l ${user} ${ip} \"${command}\"")
+}
+
+/**
+ * Runs a command on the head node.
+ *
+ * @param command the command to run
+ * @param sshArgs arguments for the ssh command
+ * @return the output of the command
+ */
+def runHeadNodeCmd(command, sshArgs="") {
+    return sh(returnStdout: true, script: "sshpass -p ${deployment_config.head.pass} ssh ${sshArgs} -oStrictHostKeyChecking=no -l ${deployment_config.head.user} ${deployment_config.head.ip} \"${command}\"")
+}
+
+/**
+ * Runs a command on a fabric switch.
+ *
+ * @param ip      the mgmt IP of the fabric switch, reachable from the head node
+ * @param user    the mgmt user name of the fabric switch
+ * @param pass    the mgmt password of the fabric switch
+ * @param command the command to run on the fabric switch
+ * @param ssgArgs arguments for the ssh command
+ * @return the output of the command
+ */
+def runFabricSwitchCmd(ip, user, pass, command, sshArgs="") {
+    return sh(returnStdout: true, script: "sshpass -p ${deployment_config.head.pass} ssh ${sshArgs} -oStrictHostKeyChecking=no -l ${deployment_config.head.user} ${deployment_config.head.ip} \"sshpass -p ${pass} ssh ${sshArgs} -oStrictHostKeyChecking=no -l ${user} ${ip} ${command}\"")
+}
+
+/**
+ * Runs a command on all fabric switches
+ *
+ * @param command the command to run on the fabric switches
+ * @param ssgArgs arguments for the ssh command
+ */
+def runFabricSwitchCmdAll(command, sshArgs="") {
+    for(int i=0; i < deployment_config.fabric_switches.size(); i++) {
+        runFabricSwitchCmd("${deployment_config.fabric_switches[i].ip}",
+                           "${deployment_config.fabric_switches[i].user}",
+                           "${deployment_config.fabric_switches[i].pass}",
+                           "${command}",
+                           "${sshArgs}")
+    }
+}
+
+/**
+ * Runs a command on a compute node.
+ *
+ * @param name    the name of the compute node
+ * @param command the command to run on the compute node
+ * @param ssgArgs arguments for the ssh command
+ * @return the output of the command
+ */
+def runComputeNodeCmd(name, command, sshArgs="") {
+    return sh(returnStdout: true, script: "sshpass -p ${deployment_config.head.pass} ssh ${sshArgs} -oStrictHostKeyChecking=no -l ${deployment_config.head.user} ${deployment_config.head.ip} \"ssh ${sshArgs} ubuntu@${name} ${command}\"")
+}
+
+/**
+ * Runs a command on all compute nodes
+ *
+ * @param command the command to run on the compute nodes
+ * @param ssgArgs arguments for the ssh command
+ */
+def runComputeNodeCmdAll(command, sshArgs="") {
+    computeNamesAll = getComputeNames()
+    for (name in computeNamesAll) {
+        runComputeNodeCmd("${name}", "${command}", "${sshArgs}")
+    }
+}
+
+/**
+ * Runs an ONOS CLI command
+ *
+ * @param name    the onos node name, reachable from the head node
+ * @param port    the port used to login to ONOS CLI
+ * @param user    the user name to login to ONOS CLI
+ * @param pass    the password to login to ONOS CLI
+ * @param command the command to run in ONOS CLI
+ * @return the output of the command
+ */
+def runOnosCliCmd(name, port, user, pass, command) {
+    return sh(returnStdout: true, script: "sshpass -p ${deployment_config.head.pass} ssh -oStrictHostKeyChecking=no -l ${deployment_config.head.user} ${deployment_config.head.ip} \"sshpass -p ${pass} ssh -oStrictHostKeyChecking=no -l ${user} -p ${port} ${name} ${command}\"")
+}
+
+/**
+ * Returns a list of compute node names. When "role" is specified, returns only
+ * names of compute nodes connected to the switch
+ *
+ * @param role    the switch role, i.e. "leaf-1"
+ */
+def getComputeNames(role="") {
+    computeNamesAll = runHeadNodeCmd("cord prov list | grep node | awk '{print \\\$2}' | sed -e \\\"s/.*/'&'/\\\"").trim()
+    computeNamesAll = "${computeNamesAll}".split()
+    computeNamesAll = "${computeNamesAll}".replaceAll("'", "\"")
+    computeNamesAll = new JsonSlurperClassic().parseText("${computeNamesAll}")
+    if ("${role}" == "") return computeNamesAll
+    computeNames = []
+    switchMac = ""
+    for(int i=0; i < deployment_config.fabric_switches.size(); i++) {
+        if(deployment_config.fabric_switches[i].containsKey("role")) {
+            if ("${deployment_config.fabric_switches[i].role}" == "${role}")
+                switchMac = "${deployment_config.fabric_switches[i].mac}"
+        }
+    }
+    if ("${switchMac}" != "") {
+        switchMac = switchMac.toLowerCase().replaceAll(':','')
+        // Get fabric IPs of compute nodes connected to the switch
+        try {
+            computeFabricIps = runHeadNodeCmd("sshpass -p rocks ssh -q -oStrictHostKeyChecking=no -l onos -p 8101 onos-fabric hosts -j | jq '.[] | select(.locations[].elementId | contains(\\\"${switchMac}\\\")) | .ipAddresses' | grep -o '\\\"${fabricIpPrefix}.[1-9][0-9]*.[0-9]*\\\"'",
+                                              "-q").trim()
+         }catch (exception) {
+            return computeNames
+        }
+        computeFabricIps = "${computeFabricIps}".split()
+        computeFabricIps = new JsonSlurperClassic().parseText("${computeFabricIps}")
+        // Figure out which compute node connects to the switch
+        for (name in computeNamesAll) {
+            fabricIp = runComputeNodeCmd("${name}", "ip a | grep -o '${fabricIpPrefix}.[1-9][0-9]*.[0-9]*'").trim()
+            if (fabricIp in computeFabricIps) {
+                computeNames.add("${name}")
+            }
+        }
+    }
+    return computeNames
+}
+
+/**
+ * Connects all fabric switches and compute nodes to ONOS
+ */
+def connectFabricAndComputeToOnos() {
+    // Kill existing switch connections
+    runFabricSwitchCmdAll("./killit || echo no ofagentapp running")
+    // Clean stale ONOS data
+    runOnosCliCmd("onos-fabric", "8101", "onos", "rocks", "wipe-out -r -j please")
+    // Connect switches to ONOS
+    for(int i=0; i < deployment_config.fabric_switches.size(); i++) {
+        runFabricSwitchCmd("${deployment_config.fabric_switches[i].ip}",
+                           "${deployment_config.fabric_switches[i].user}",
+                           "${deployment_config.fabric_switches[i].pass}",
+                           "./connect -bg 2>&1 > ${deployment_config.fabric_switches[i].ip}.log",
+                           "-qftn")
+    }
+    // Verify ONOS has recognized the switches
+    timeout(time: 5) {
+        waitUntil {
+            try {
+                num = runHeadNodeCmd("\"sshpass -p rocks ssh -q -oStrictHostKeyChecking=no -l onos -p 8101 onos-fabric devices | grep available=true | wc -l\"").trim()
+                return num.toInteger() == deployment_config.fabric_switches.size()
+            } catch (exception) {
+                return false
+            }
+        }
+    }
+    // Connect compute nodes to ONOS
+    runComputeNodeCmdAll("ping -c 1 ${fabricIpPrefix}.1.254", "-qftn")
+    // Verify ONOS has recognized the hosts
+    timeout(time: 5) {
+        waitUntil {
+            try {
+                num = runHeadNodeCmd("\"sshpass -p rocks ssh -q -oStrictHostKeyChecking=no -l onos -p 8101 onos-fabric hosts | grep id= | wc -l\"").trim()
+                return num.toInteger() >= deployment_config.compute_nodes.size()
+            } catch (exception) {
+                return false
+            }
+        }
+    }
+}