[VOL-3454] Add new groovy script for soak jobs, template changes

Change-Id: If6406a6e9bf72c0911d93e84ab8199dcdd0994c2
diff --git a/jjb/cord-test/voltha.yaml b/jjb/cord-test/voltha.yaml
index 92905bb..2242053 100644
--- a/jjb/cord-test/voltha.yaml
+++ b/jjb/cord-test/voltha.yaml
@@ -16,6 +16,7 @@
     num-of-onos: '1'
     num-of-atomix: '0'
     reinstall-olt: true
+    test-type: ''
 
     jobs:
       # flex OCP pod with olt/onu - manual test job, voltha master build job
@@ -345,7 +346,10 @@
           config-pod: 'onf-soak-pod'
           release: 'master'
           branch: 'master'
+          profile: '1T8GEM'
           test-repo: 'voltha-system-tests'
+          name-extension: '_DT'
+          work-flow: 'DT'
           Jenkinsfile: 'Jenkinsfile-voltha-build'
           configurePod: true
           profile: 'Default'
@@ -361,16 +365,33 @@
           Jenkinsfile: 'Jenkinsfile-voltha-test'
 
       # ONF Menlo Soak POD test job - voltha-master branch
-      # Run tests every two days
+      # Run tests every three days
+      - 'build_voltha_pod_soak_test':
+          build-node: 'menlo-soak-pod'
+          config-pod: 'onf-soak-pod'
+          profile: '1T8GEM'
+          branch: 'master'
+          release: 'master'
+          work-flow: 'DT'
+          name-extension: '_soak_func'
+          test-type: 'Functional'
+          test-repo: 'voltha-system-tests'
+          pipeline-script: 'voltha-physical-soak-tests.groovy'
+          time-trigger: "H H */3 * *"
+
+      # ONF Menlo Soak POD test job - voltha-master branch
+      # Run tests every three days
       - 'build_voltha_pod_soak_test':
           build-node: 'menlo-soak-pod'
           config-pod: 'onf-soak-pod'
           profile: 'Default'
           branch: 'master'
           release: 'master'
-          name-extension: '_soak'
+          work-flow: 'DT'
+          name-extension: '_soak_fail'
+          test-type: 'Failure'
           test-repo: 'voltha-system-tests'
-          Jenkinsfile: 'Jenkinsfile-voltha-test'
+          pipeline-script: 'voltha-physical-soak-tests.groovy'
           time-trigger: "H H */3 * *"
 
       # Infosys pod with olt/onu - Default tech profile and timer based job
diff --git a/jjb/pipeline/voltha-physical-soak-tests.groovy b/jjb/pipeline/voltha-physical-soak-tests.groovy
new file mode 100644
index 0000000..0ca9a48
--- /dev/null
+++ b/jjb/pipeline/voltha-physical-soak-tests.groovy
@@ -0,0 +1,239 @@
+// Copyright 2017-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+node {
+  // Need this so that deployment_config has global scope when it's read later
+  deployment_config = null
+}
+
+pipeline {
+  /* no label, executor is determined by JJB */
+  agent {
+    label "${params.buildNode}"
+  }
+  options {
+    timeout(time: 280, unit: 'MINUTES')
+  }
+
+  environment {
+    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
+    VOLTCONFIG="$HOME/.volt/config-minimal"
+    PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+  }
+
+  stages {
+    stage ('Initialize') {
+      steps {
+        step([$class: 'WsCleanup'])
+        sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/${configBaseDir}"
+        sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/kind-voltha"
+        script {
+          deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
+        }
+        // This checkout allows us to show changes in Jenkins
+        checkout(changelog: true,
+          poll: false,
+          scm: [$class: 'RepoScm',
+            manifestRepositoryUrl: "${params.manifestUrl}",
+            manifestBranch: "${params.branch}",
+            currentBranch: true,
+            destinationDir: 'voltha',
+            forceSync: true,
+            resetFirst: true,
+            quiet: true,
+            jobs: 4,
+            showAllChanges: true]
+          )
+        sh returnStdout: false, script: """
+        cd voltha
+        git clone -b master ${cordRepoUrl}/cord-tester
+        mkdir -p $WORKSPACE/bin
+        bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/bin"
+        cd $WORKSPACE
+        if [ "${params.branch}" != "master" ]; then
+           cd $WORKSPACE/kind-voltha
+           source releases/${params.branch}
+           VC_VERSION=1.1.8
+        else
+           VC_VERSION=\$(curl -sSL https://api.github.com/repos/opencord/voltctl/releases/latest | jq -r .tag_name | sed -e 's/^v//g')
+        fi
+
+        HOSTOS=\$(uname -s | tr "[:upper:]" "[:lower:"])
+        HOSTARCH=\$(uname -m | tr "[:upper:]" "[:lower:"])
+        if [ \$HOSTARCH == "x86_64" ]; then
+            HOSTARCH="amd64"
+        fi
+        curl -o $WORKSPACE/bin/voltctl -sSL https://github.com/opencord/voltctl/releases/download/v\${VC_VERSION}/voltctl-\${VC_VERSION}-\${HOSTOS}-\${HOSTARCH}
+        chmod 755 $WORKSPACE/bin/voltctl
+        voltctl version --clientonly
+
+        if [ "${params.branch}" == "master" ]; then
+        # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
+        # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
+        # We should change this. In the meantime here is a workaround.
+           set +e
+
+        # Remove noise from voltha-core logs
+           voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
+           voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
+        # Remove noise from openolt logs
+           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
+           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
+           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
+        fi
+        """
+      }
+    }
+
+    stage('Functional Tests') {
+      environment {
+        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
+        ROBOT_FILE="Voltha_PODTests.robot"
+        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/FunctionalTests"
+      }
+      steps {
+        sh """
+        cd $WORKSPACE/voltha/kind-voltha/scripts
+        ./log-collector.sh > /dev/null &
+        ./log-combine.sh > /dev/null &
+
+        mkdir -p $ROBOT_LOGS_DIR
+        if [ "${params.testType}" == "Functional" ]; then
+            if ( ${powerSwitch} ); then
+                export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanity -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
+            else
+                export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i sanity -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
+            fi
+            make -C $WORKSPACE/voltha/voltha-system-tests voltha-test || true
+        fi
+        make -C $WORKSPACE/voltha/voltha-system-tests voltha-test || true
+        """
+      }
+    }
+
+    stage('Failure/Recovery Tests') {
+      environment {
+        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
+        ROBOT_FILE="Voltha_FailureScenarios.robot"
+        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/FailureScenarios"
+      }
+      steps {
+        sh """
+        mkdir -p $ROBOT_LOGS_DIR
+        if [ "${params.testType}" == "Failure" ]; then
+           if ( ${powerSwitch} ); then
+              export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
+           else
+              export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
+           fi
+        fi
+        make -C $WORKSPACE/voltha/voltha-system-tests voltha-test || true
+        """
+      }
+    }
+
+    stage('Dataplane Tests') {
+      environment {
+        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
+        ROBOT_FILE="Voltha_PODTests.robot"
+        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/DataplaneTests"
+      }
+      steps {
+        sh """
+        mkdir -p $ROBOT_LOGS_DIR
+        if [ "${params.testType}" == "Failure" ]; then
+           export ROBOT_MISC_ARGS="--removekeywords wuks -i dataplane -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
+           make -C $WORKSPACE/voltha/voltha-system-tests voltha-test || true
+        fi
+        """
+      }
+    }
+
+  }
+  post {
+    always {
+      sh returnStdout: false, script: '''
+      set +e
+      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
+      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
+      kubectl get nodes -o wide
+      kubectl get pods -n voltha -o wide
+
+      sleep 60 # Wait for log-collector and log-combine to complete
+
+      # Clean up "announcer" pod used by the tests if present
+      kubectl delete pod announcer || true
+
+      ## Pull out errors from log files
+      extract_errors_go() {
+        echo
+        echo "Error summary for $1:"
+        grep '"level":"error"' $WORKSPACE/voltha/kind-voltha/scripts/logger/combined/$1*
+        echo
+      }
+
+      extract_errors_python() {
+        echo
+        echo "Error summary for $1:"
+        grep 'ERROR' $WORKSPACE/voltha/kind-voltha/scripts/logger/combined/$1*
+        echo
+      }
+
+      extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
+      extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
+      extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
+      extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
+      extract_errors_python onos >> $WORKSPACE/error-report.log
+
+      cd $WORKSPACE/voltha/kind-voltha/scripts/logger/combined/
+      tar czf $WORKSPACE/container-logs.tgz *
+
+      cd $WORKSPACE
+      gzip *-combined.log || true
+
+      # collect ETCD cluster logs
+      mkdir -p $WORKSPACE/etcd
+      printf '%s\n' $(kubectl get pods -l app=etcd -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I@ bash -c "kubectl logs @ > $WORKSPACE/etcd/@.log"
+      '''
+      script {
+        deployment_config.olts.each { olt ->
+          sh returnStdout: false, script: """
+          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
+          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log  # Remove escape sequences
+          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
+          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log  # Remove escape sequences
+          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/startup.log $WORKSPACE/startup-${olt.sship}.log || true
+          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/startup-${olt.sship}.log || true # Remove escape sequences
+          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt_process_watchdog.log $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true
+          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true # Remove escape sequences
+          """
+        }
+      }
+      step([$class: 'RobotPublisher',
+        disableArchiveOutput: false,
+        logFileName: '**/log*.html',
+        otherFiles: '',
+        outputFileName: '**/output*.xml',
+        outputPath: 'RobotLogs',
+        passThreshold: 100,
+        reportFileName: '**/report*.html',
+        unstableThreshold: 0
+        ]);
+      archiveArtifacts artifacts: '*.log,*.gz,*.tgz,etcd/*.log'
+    }
+    unstable {
+      step([$class: 'Mailer', notifyEveryUnstableBuild: true, recipients: "${notificationEmail}", sendToIndividuals: false])
+    }
+  }
+}
diff --git a/jjb/voltha-test/voltha-nightly-jobs.yaml b/jjb/voltha-test/voltha-nightly-jobs.yaml
index a28f805..f007915 100644
--- a/jjb/voltha-test/voltha-nightly-jobs.yaml
+++ b/jjb/voltha-test/voltha-nightly-jobs.yaml
@@ -366,6 +366,11 @@
           default: '{power-switch}'
           description: "Indicate whether POD has power switch to reboot ONUs/OLT remotely"
 
+      - string:
+          name: testType
+          default: '{test-type}'
+          description: 'Passes the required test category to the groovy script'
+
     concurrent: true
 
     project-type: pipeline