Merge "Add cord helm repo in chart-api-test-helm.groovy"
diff --git a/jjb/cord-test/voltha.yaml b/jjb/cord-test/voltha.yaml
index a8a1828..8145a78 100644
--- a/jjb/cord-test/voltha.yaml
+++ b/jjb/cord-test/voltha.yaml
@@ -256,3 +256,29 @@
           released: false
           test-repo: 'voltha-system-tests'
           profile: 'Default'
+
+
+      # Berlin pod with olt/onu - Released versions Default tech profile and timer based job
+      - 'build_voltha_pod_release':
+          testvm: 'dt-berlin-community-pod'
+          config-pod: 'dt-berlin-pod'
+          release: 'release'
+          branch: 'master'
+          test-repo: 'voltha-system-tests'
+          Jenkinsfile: 'Jenkinsfile-voltha-bal31-build'
+          oltDebVersion: 'openolt_asfvolt16.deb'
+          configurePod: true
+          released: true
+          profile: 'Default'
+          time: '9'
+
+      # Berlin POD test job - released versions: uses tech profile on voltha branch
+      - 'build_voltha_pod_test':
+          testvm: 'dt-berlin-community-pod'
+          config-pod: 'dt-berlin-pod'
+          release: 'release'
+          branch: 'master'
+          released: true
+          test-repo: 'voltha-system-tests'
+          profile: 'Default'
+
diff --git a/jjb/pipeline/voltha-bbsim-tests.groovy b/jjb/pipeline/voltha-bbsim-tests.groovy
index a6f7b3d..a3033c1 100644
--- a/jjb/pipeline/voltha-bbsim-tests.groovy
+++ b/jjb/pipeline/voltha-bbsim-tests.groovy
@@ -23,13 +23,28 @@
     label "${params.buildNode}"
   }
   options {
-      timeout(time: 90, unit: 'MINUTES')
+    timeout(time: 90, unit: 'MINUTES')
+  }
+  environment {
+    KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
+    VOLTCONFIG="$HOME/.volt/config-minimal"
+    PATH="$WORKSPACE/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+    TYPE="minimal"
+    FANCY=0
+    WITH_SIM_ADAPTERS="n"
+    WITH_RADIUS="y"
+    WITH_BBSIM="y"
+    DEPLOY_K8S="y"
+    VOLTHA_LOG_LEVEL="DEBUG"
+    CONFIG_SADIS="n"
+    ROBOT_MISC_ARGS="-d $WORKSPACE/RobotLogs -v teardown_device:False"
   }
 
   stages {
 
     stage('Repo') {
       steps {
+        step([$class: 'WsCleanup'])
         checkout(changelog: false, \
           poll: false,
           scm: [$class: 'RepoScm', \
@@ -60,7 +75,8 @@
         sh """
            git clone https://github.com/ciena/kind-voltha.git
            cd kind-voltha/
-           DEPLOY_K8S=y JUST_K8S=y FANCY=0 ./voltha up
+           JUST_K8S=y ./voltha up
+           bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/kind-voltha/bin"
            """
       }
     }
@@ -82,10 +98,6 @@
            if ! [[ "${gerritProject}" =~ ^(voltha-helm-charts|voltha-system-tests)\$ ]]; then
              export GOROOT=/usr/local/go
              export GOPATH=\$(pwd)
-             export TYPE=minimal
-             export KUBECONFIG="$(./bin/kind get kubeconfig-path --name="voltha-minimal")"
-             export VOLTCONFIG="/home/jenkins/.volt/config-minimal"
-             export PATH=$WORKSPACE/kind-voltha/bin:$PATH
              docker images | grep citest
              for image in \$(docker images -f "reference=*/*citest" --format "{{.Repository}}"); do echo "Pushing \$image to nodes"; kind load docker-image \$image:citest --name voltha-\$TYPE --nodes voltha-\$TYPE-worker,voltha-\$TYPE-worker2; done
            fi
@@ -95,39 +107,32 @@
     stage('Deploy Voltha') {
       steps {
         sh '''
-           HELM_FLAG="${extraHelmFlags} "
+           export EXTRA_HELM_FLAGS="--set log_agent.enabled=False ${extraHelmFlags} "
 
+           IMAGES=""
            if [ "${gerritProject}" = "voltha-go" ]; then
-             HELM_FLAG+="--set images.rw_core.tag=citest,images.rw_core.pullPolicy=Never,images.ro_core.tag=citest,images.ro_core.pullPolicy=Never "
-           fi
-
-           if [ "${gerritProject}" = "voltha-onos" ]; then
-             HELM_FLAG+="--set images.onos.tag=citest,images.onos.pullPolicy=Never "
-           fi
-
-           if [ "${gerritProject}" = "ofagent-py" ]; then
-             HELM_FLAG+="--set images.ofagent.tag=citest,images.ofagent.pullPolicy=Never "
-           fi
-
-           if [ "${gerritProject}" = "voltha-openolt-adapter" ]; then
-             HELM_FLAG+="--set images.adapter_open_olt.tag=citest,images.adapter_open_olt.pullPolicy=Never "
-           fi
-
-           if [ "${gerritProject}" = "voltha-openonu-adapter" ]; then
-             HELM_FLAG+="--set images.adapter_open_onu.tag=citest,images.adapter_open_onu.pullPolicy=Never "
-           fi
-
-           if [ "${gerritProject}" = "bbsim" ]; then
-             HELM_FLAG+="--set images.bbsim.tag=citest,images.bbsim.pullPolicy=Never "
-           fi
-
-           if [ "${gerritProject}" = "voltha-api-server" ]; then
-             HELM_FLAG+="--set images.afrouter.tag=citest,images.afrouter.pullPolicy=Never,images.afrouterd.tag=citest,images.afrouterd.pullPolicy=Never "
+             IMAGES="rw_core ro_core "
+           elif [ "${gerritProject}" = "ofagent-py" ]; then
+             IMAGES="ofagent "
+           elif [ "${gerritProject}" = "voltha-onos" ]; then
+             IMAGES="onos "
+           elif [ "${gerritProject}" = "voltha-openolt-adapter" ]; then
+             IMAGES="adapter_open_olt "
+           elif [ "${gerritProject}" = "voltha-openonu-adapter" ]; then
+             IMAGES="adapter_open_onu "
+           elif [ "${gerritProject}" = "voltha-api-server" ]; then
+             IMAGES="afrouter afrouterd "
+           elif [ "${gerritProject}" = "bbsim" ]; then
+             IMAGES="bbsim "
            else
-             # afrouter only has master branch at present
-             HELM_FLAG+="--set images.afrouter.tag=master,images.afrouterd.tag=master "
+             echo "No images to push"
            fi
 
+           for I in \$IMAGES
+           do
+             EXTRA_HELM_FLAGS+="--set images.\$I.tag=citest,images.\$I.pullPolicy=Never "
+           done
+
            if [ "${gerritProject}" = "voltha-helm-charts" ]; then
              export CHART_PATH=$WORKSPACE/voltha/voltha-helm-charts
              export VOLTHA_CHART=\$CHART_PATH/voltha
@@ -139,8 +144,9 @@
            fi
 
            cd $WORKSPACE/kind-voltha/
-           echo \$HELM_FLAG
-           EXTRA_HELM_FLAGS=\$HELM_FLAG VOLTHA_LOG_LEVEL=DEBUG TYPE=minimal WITH_RADIUS=y WITH_BBSIM=y INSTALL_ONOS_APPS=y CONFIG_SADIS=n FANCY=0 WITH_SIM_ADAPTERS=n ./voltha up
+           echo \$EXTRA_HELM_FLAGS
+           kail -n voltha -n default > $WORKSPACE/onos-voltha-combined.log &
+           ./voltha up
            '''
       }
     }
@@ -148,11 +154,7 @@
     stage('Run E2E Tests') {
       steps {
         sh '''
-           cd kind-voltha/
-           export KUBECONFIG="$(./bin/kind get kubeconfig-path --name="voltha-minimal")"
-           export VOLTCONFIG="/home/jenkins/.volt/config-minimal"
-           export PATH=$WORKSPACE/kind-voltha/bin:$PATH
-           export ROBOT_MISC_ARGS="-v teardown_device:False"
+           mkdir -p $WORKSPACE/RobotLogs
            make -C $WORKSPACE/voltha/voltha-system-tests sanity-kind || true
            '''
       }
@@ -163,41 +165,38 @@
     always {
       sh '''
          set +e
-         # copy robot logs
-         if [ -d RobotLogs ]; then rm -r RobotLogs; fi; mkdir RobotLogs
-         cp -r $WORKSPACE/voltha/voltha-system-tests/tests/*/*.html ./RobotLogs || true
-         cp -r $WORKSPACE/voltha/voltha-system-tests/tests/*/*.xml ./RobotLogs || true
-         cd kind-voltha/
-         cp install-minimal.log $WORKSPACE/
-         export KUBECONFIG="$(./bin/kind get kubeconfig-path --name="voltha-minimal")"
-         export VOLTCONFIG="/home/jenkins/.volt/config-minimal"
-         export PATH=$WORKSPACE/kind-voltha/bin:$PATH
+         cp $WORKSPACE/kind-voltha/install-minimal.log $WORKSPACE/
          kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\t'}{.imageID}{'\\n'}" | sort | uniq -c
          kubectl get nodes -o wide
          kubectl get pods -o wide
          kubectl get pods -n voltha -o wide
-         ## get default pod logs
-         for pod in \$(kubectl get pods --no-headers | awk '{print \$1}');
-         do
-           if [[ \$pod == *"onos"* && \$pod != *"onos-service"* ]]; then
-             kubectl logs \$pod onos> $WORKSPACE/\$pod.log;
-           else
-             kubectl logs \$pod> $WORKSPACE/\$pod.log;
-           fi
-         done
-         ## get voltha pod logs
-         for pod in \$(kubectl get pods --no-headers -n voltha | awk '{print \$1}');
-         do
-           if [[ \$pod == *"-api-"* ]]; then
-             kubectl logs \$pod arouter -n voltha > $WORKSPACE/\$pod.log;
-           else
-             kubectl logs \$pod -n voltha > $WORKSPACE/\$pod.log;
-           fi
-         done
-         ## clean up node
-	 FANCY=0 WAIT_ON_DOWN=y ./voltha down
-	 cd $WORKSPACE/
-	 rm -rf kind-voltha/ voltha/ || true
+
+         sync
+         pkill kail || true
+
+         ## Pull out errors from log files
+         extract_errors_go() {
+           echo
+           echo "Error summary for $1:"
+           grep $1 $WORKSPACE/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
+           echo
+         }
+
+         extract_errors_python() {
+           echo
+           echo "Error summary for $1:"
+           grep $1 $WORKSPACE/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
+           echo
+         }
+
+         extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
+         extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
+         extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
+         extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
+
+         ## shut down kind-voltha
+         cd $WORKSPACE/kind-voltha
+	       WAIT_ON_DOWN=y ./voltha down
          '''
          step([$class: 'RobotPublisher',
             disableArchiveOutput: false,
diff --git a/jjb/pipeline/voltha-go-multi-tests.groovy b/jjb/pipeline/voltha-go-multi-tests.groovy
index a562fd5..6950583 100644
--- a/jjb/pipeline/voltha-go-multi-tests.groovy
+++ b/jjb/pipeline/voltha-go-multi-tests.groovy
@@ -37,7 +37,7 @@
     DEPLOY_K8S="y"
     VOLTHA_LOG_LEVEL="DEBUG"
     CONFIG_SADIS="n"
-    EXTRA_HELM_FLAGS="${params.extraHelmFlags}"
+    EXTRA_HELM_FLAGS="--set log_agent.enabled=False ${params.extraHelmFlags}"
     ROBOT_MISC_ARGS="${params.extraRobotArgs} -d $WORKSPACE/RobotLogs"
   }
   stages {
@@ -73,6 +73,8 @@
       steps {
         sh """
            cd kind-voltha/
+           JUST_K8S=y ./voltha up
+           kail -n voltha -n default > $WORKSPACE/onos-voltha-combined.log &
            ./voltha up
            """
       }
@@ -86,10 +88,6 @@
              kubectl -n voltha exec $(kubectl -n voltha get pods -lapp=etcd -o=name) -- sh -c "ETCDCTL_API=3 etcdctl del --prefix $1"
            }
 
-           wait_for_state_change() {
-             timeout 60 bash -c "until voltctl device list -f Type=$2,AdminState=$3,OperStatus=$4,ConnectStatus=$5 -q | wc -l | grep -q 1; do echo Waiting for $1 to change states; voltctl device list; echo; sleep 5; done"
-           }
-
            mkdir -p $WORKSPACE/RobotLogs
            git clone https://gerrit.opencord.org/voltha-system-tests
            cd kind-voltha
@@ -101,15 +99,8 @@
              if [[ \$i -lt ${testRuns} ]]
              then
                # For testing multiple back-to-back runs
-               # Doing some manual cleanup to work around known issues in BBSim and ONOS apps
-
-               helm delete --purge bbsim # VOL-2342
-               helm delete --purge onos # VOL-2343, VOL-2363
-               clear_etcd service/voltha/resource_manager
-               clear_etcd service/voltha/openolt
-               clear_etcd service/voltha/devices
-               sleep 30
-               DEPLOY_K8S=no ./voltha up  # Will just re-deploy BBSim and ONOS
+               # Work around a known issue in BBSim
+               kubectl -n voltha delete pod -lapp=bbsim
              fi
            done
            '''
@@ -121,31 +112,37 @@
     always {
       sh '''
          set +e
-         cd kind-voltha/
-         cp install-minimal.log $WORKSPACE/
+         cp $WORKSPACE/kind-voltha/install-minimal.log $WORKSPACE/
          kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\t'}{.imageID}{'\\n'}" | sort | uniq -c
          kubectl get nodes -o wide
          kubectl get pods -o wide
          kubectl get pods -n voltha -o wide
-         ## get default pod logs
-         for pod in \$(kubectl get pods --no-headers | awk '{print \$1}');
-         do
-           if [[ \$pod == *"onos"* && \$pod != *"onos-service"* ]]; then
-             kubectl logs \$pod onos> $WORKSPACE/\$pod.log;
-           else
-             kubectl logs \$pod> $WORKSPACE/\$pod.log;
-           fi
-         done
-         ## get voltha pod logs
-         for pod in \$(kubectl get pods --no-headers -n voltha | awk '{print \$1}');
-         do
-           if [[ \$pod == *"-api-"* ]]; then
-             kubectl logs \$pod arouter -n voltha > $WORKSPACE/\$pod.log;
-           else
-             kubectl logs \$pod -n voltha > $WORKSPACE/\$pod.log;
-           fi
-         done
+
+         sync
+         pkill kail || true
+
+         ## Pull out errors from log files
+         extract_errors_go() {
+           echo
+           echo "Error summary for $1:"
+           grep $1 $WORKSPACE/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
+           echo
+         }
+
+         extract_errors_python() {
+           echo
+           echo "Error summary for $1:"
+           grep $1 $WORKSPACE/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
+           echo
+         }
+
+         extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
+         extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
+         extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
+         extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
+
          ## shut down voltha
+         cd $WORKSPACE/kind-voltha/
          WAIT_ON_DOWN=y ./voltha down
          '''
          step([$class: 'RobotPublisher',
diff --git a/jjb/pipeline/voltha-go-tests.groovy b/jjb/pipeline/voltha-go-tests.groovy
index 7e1227e..d737988 100644
--- a/jjb/pipeline/voltha-go-tests.groovy
+++ b/jjb/pipeline/voltha-go-tests.groovy
@@ -37,7 +37,7 @@
     DEPLOY_K8S="y"
     VOLTHA_LOG_LEVEL="DEBUG"
     CONFIG_SADIS="n"
-    EXTRA_HELM_FLAGS="${params.extraHelmFlags}"
+    EXTRA_HELM_FLAGS="--set log_agent.enabled=False ${params.extraHelmFlags}"
     ROBOT_MISC_ARGS="${params.extraRobotArgs} -d $WORKSPACE/RobotLogs"
   }
   stages {
@@ -73,6 +73,8 @@
       steps {
         sh """
            cd kind-voltha/
+           JUST_K8S=y ./voltha up
+           kail -n voltha -n default > $WORKSPACE/onos-voltha-combined.log &
            ./voltha up
            """
       }
@@ -81,9 +83,10 @@
     stage('Run E2E Tests') {
       steps {
         sh '''
+           set +e
            mkdir -p $WORKSPACE/RobotLogs
            git clone https://gerrit.opencord.org/voltha-system-tests
-           make -C $WORKSPACE/voltha-system-tests ${makeTarget}
+           make -C $WORKSPACE/voltha-system-tests ${makeTarget} || true
            '''
       }
     }
@@ -93,31 +96,37 @@
     always {
       sh '''
          set +e
-         cd kind-voltha/
-         cp install-minimal.log $WORKSPACE/
+         cp $WORKSPACE/kind-voltha/install-minimal.log $WORKSPACE/
          kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\t'}{.imageID}{'\\n'}" | sort | uniq -c
          kubectl get nodes -o wide
          kubectl get pods -o wide
          kubectl get pods -n voltha -o wide
-         ## get default pod logs
-         for pod in \$(kubectl get pods --no-headers | awk '{print \$1}');
-         do
-           if [[ \$pod == *"onos"* && \$pod != *"onos-service"* ]]; then
-             kubectl logs \$pod onos> $WORKSPACE/\$pod.log;
-           else
-             kubectl logs \$pod> $WORKSPACE/\$pod.log;
-           fi
-         done
-         ## get voltha pod logs
-         for pod in \$(kubectl get pods --no-headers -n voltha | awk '{print \$1}');
-         do
-           if [[ \$pod == *"-api-"* ]]; then
-             kubectl logs \$pod arouter -n voltha > $WORKSPACE/\$pod.log;
-           else
-             kubectl logs \$pod -n voltha > $WORKSPACE/\$pod.log;
-           fi
-         done
+
+         sync
+         pkill kail || true
+
+         ## Pull out errors from log files
+         extract_errors_go() {
+           echo
+           echo "Error summary for $1:"
+           grep $1 $WORKSPACE/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
+           echo
+         }
+
+         extract_errors_python() {
+           echo
+           echo "Error summary for $1:"
+           grep $1 $WORKSPACE/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
+           echo
+         }
+
+         extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
+         extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
+         extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
+         extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
+
          ## shut down voltha
+         cd $WORKSPACE/kind-voltha/
          WAIT_ON_DOWN=y ./voltha down
          '''
          step([$class: 'RobotPublisher',
diff --git a/jjb/pipeline/voltha-physical-build-and-tests.groovy b/jjb/pipeline/voltha-physical-build-and-tests.groovy
index 190cfc3..65e229c 100644
--- a/jjb/pipeline/voltha-physical-build-and-tests.groovy
+++ b/jjb/pipeline/voltha-physical-build-and-tests.groovy
@@ -153,7 +153,7 @@
         script {
           if ( params.withPatchset ) {
             sh returnStdout: false, script: """
-            export EXTRA_HELM_FLAGS='-f ${localKindVolthaValuesFile} '
+            export EXTRA_HELM_FLAGS='--set log_agent.enabled=False -f ${localKindVolthaValuesFile} '
 
             IMAGES=""
             if [ "${gerritProject}" = "voltha-go" ]; then
@@ -179,13 +179,15 @@
 
             cd $WORKSPACE/kind-voltha/
             echo \$EXTRA_HELM_FLAGS
+            kail -n voltha -n default > $WORKSPACE/onos-voltha-combined.log &
             ./voltha up
             """
           } else {
             sh returnStdout: false, script: """
-            export EXTRA_HELM_FLAGS='-f ${localKindVolthaValuesFile} '
+            export EXTRA_HELM_FLAGS='--set log_agent.enabled=False -f ${localKindVolthaValuesFile} '
             cd $WORKSPACE/kind-voltha/
             echo \$EXTRA_HELM_FLAGS
+            kail -n voltha -n default > $WORKSPACE/onos-voltha-combined.log &
             ./voltha up
             """
           }
@@ -295,6 +297,9 @@
     }
 
     stage('After-Test Delay') {
+      when {
+        expression { params.withPatchset }
+      }
       steps {
         sh returnStdout: false, script: """
         # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
@@ -307,39 +312,43 @@
 
   post {
     always {
-      sh returnStdout: false, script: """
+      sh returnStdout: false, script: '''
       set +e
       cp kind-voltha/install-minimal.log $WORKSPACE/
       kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\t'}{.imageID}{'\\n'}" | sort | uniq -c
       kubectl get nodes -o wide
       kubectl get pods -o wide
       kubectl get pods -n voltha -o wide
-      ## get default pod logs
-      for pod in \$(kubectl get pods --no-headers | awk '{print \$1}');
-      do
-        if [[ \$pod == *"onos"* && \$pod != *"onos-service"* ]]; then
-          kubectl logs \$pod onos> $WORKSPACE/\$pod.log;
-        else
-          kubectl logs \$pod> $WORKSPACE/\$pod.log;
-        fi
-      done
-      ## get voltha pod logs
-      for pod in \$(kubectl get pods --no-headers -n voltha | awk '{print \$1}');
-      do
-        if [[ \$pod == *"-api-"* ]]; then
-          kubectl logs \$pod arouter -n voltha > $WORKSPACE/\$pod.log;
-        elif [[ \$pod == "bbsim-"* ]]; then
-          kubectl logs \$pod -n voltha -p > $WORKSPACE/\$pod.log;
-        else
-          kubectl logs \$pod -n voltha > $WORKSPACE/\$pod.log;
-        fi
-      done
+
+      sync
+      pkill kail || true
+
+      ## Pull out errors from log files
+      extract_errors_go() {
+        echo
+        echo "Error summary for $1:"
+        grep $1 $WORKSPACE/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
+        echo
+      }
+
+      extract_errors_python() {
+        echo
+        echo "Error summary for $1:"
+        grep $1 $WORKSPACE/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
+        echo
+      }
+
+      extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
+      extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
+      extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
+      extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
+
       ## collect events, the chart should be running by now
       kubectl get pods | grep -i voltha-kafka-dump | grep -i running
-      if [[ \$? == 0 ]]; then
+      if [[ $? == 0 ]]; then
          kubectl exec -it `kubectl get pods | grep -i voltha-kafka-dump | grep -i running | cut -f1 -d " "` ./voltha-dump-events.sh > $WORKSPACE/voltha-events.log
       fi
-      """
+      '''
       script {
         deployment_config.olts.each { olt ->
           sh returnStdout: false, script: """
diff --git a/jjb/pipeline/voltha-physical-functional-tests.groovy b/jjb/pipeline/voltha-physical-functional-tests.groovy
index 8aa703e..f419402 100644
--- a/jjb/pipeline/voltha-physical-functional-tests.groovy
+++ b/jjb/pipeline/voltha-physical-functional-tests.groovy
@@ -29,7 +29,7 @@
   environment {
     KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
     VOLTCONFIG="$HOME/.volt/config-minimal"
-    PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$WORKSPACE/kind-voltha/bin"
+    PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$WORKSPACE/bin"
   }
 
   stages {
@@ -57,7 +57,8 @@
         sh returnStdout: false, script: """
         cd voltha
         git clone -b ${branch} ${cordRepoUrl}/cord-tester
-        git clone -b ${branch} ${cordRepoUrl}/voltha # NOTE do we need the voltha source code??
+        mkdir -p $WORKSPACE/bin
+        bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/bin"
         """
       }
     }
@@ -74,6 +75,7 @@
         else
             export ROBOT_MISC_ARGS="--removekeywords wuks -e bbsim -e notready -d $WORKSPACE/RobotLogs -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir}"
         fi
+        kail -n voltha -n default --since=20m > $WORKSPACE/onos-voltha-combined.log &
         make -C $WORKSPACE/voltha/voltha-system-tests voltha-test || true
         """
       }
@@ -82,30 +84,35 @@
 
   post {
     always {
-      sh returnStdout: false, script: """
+      sh returnStdout: false, script: '''
       set +e
       kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\t'}{.imageID}{'\\n'}" | sort | uniq -c
       kubectl get nodes -o wide
       kubectl get pods -n voltha -o wide
-      ## get default pod logs
-      for pod in \$(kubectl get pods --no-headers | awk '{print \$1}');
-      do
-        if [[ \$pod == *"onos"* && \$pod != *"onos-service"* ]]; then
-          kubectl logs \$pod onos> $WORKSPACE/\$pod.log;
-        else
-          kubectl logs \$pod> $WORKSPACE/\$pod.log;
-        fi
-      done
-      ## get voltha pod logs
-      for pod in \$(kubectl get pods --no-headers -n voltha | awk '{print \$1}');
-      do
-        if [[ \$pod == *"-api-"* ]]; then
-          kubectl logs \$pod arouter -n voltha > $WORKSPACE/\$pod.log;
-        else
-          kubectl logs \$pod -n voltha > $WORKSPACE/\$pod.log;
-        fi
-      done
-      """
+
+      sync
+      pkill kail || true
+
+      ## Pull out errors from log files
+      extract_errors_go() {
+        echo
+        echo "Error summary for $1:"
+        grep $1 $WORKSPACE/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
+        echo
+      }
+
+      extract_errors_python() {
+        echo
+        echo "Error summary for $1:"
+        grep $1 $WORKSPACE/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
+        echo
+      }
+
+      extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
+      extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
+      extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
+      extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
+      '''
       script {
         deployment_config.olts.each { olt ->
           sh returnStdout: false, script: """
diff --git a/jjb/pipeline/voltha-system-test-bbsim.groovy b/jjb/pipeline/voltha-system-test-bbsim.groovy
index ba61757..68f96d7 100644
--- a/jjb/pipeline/voltha-system-test-bbsim.groovy
+++ b/jjb/pipeline/voltha-system-test-bbsim.groovy
@@ -16,6 +16,40 @@
 // uses kind-voltha to deploy voltha-2.X
 // uses bbsim to simulate OLT/ONUs
 
+
+  def logKubernetes(prefix) {
+      sh """
+         set +e
+         cd kind-voltha/
+         cp install-minimal.log $WORKSPACE/${prefix}_instsall-minimal.log
+         kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\t'}{.imageID}{'\\n'}" | sort | uniq -c
+         kubectl get nodes -o wide
+         kubectl get pods -o wide
+         kubectl get pods -n voltha -o wide
+         ## get default pod logs
+         for pod in \$(kubectl get pods --no-headers | awk '{print \$1}');
+         do
+           if [[ \$pod == *"onos"* && \$pod != *"onos-service"* ]]; then
+             kubectl logs \$pod onos> $WORKSPACE/${prefix}_\$pod.log;
+           else
+             kubectl logs \$pod> $WORKSPACE/${prefix}_\$pod.log;
+           fi
+         done
+         ## get voltha pod logs
+         for pod in \$(kubectl get pods --no-headers -n voltha | awk '{print \$1}');
+         do
+           if [[ \$pod == *"-api-"* ]]; then
+             kubectl logs \$pod arouter -n voltha > $WORKSPACE/${prefix}_\$pod.log;
+           elif [[ \$pod == "bbsim-"* ]]; then
+             kubectl logs \$pod -n voltha -p > $WORKSPACE/${prefix}_\$pod.log;
+           else
+             kubectl logs \$pod -n voltha > $WORKSPACE/${prefix}_\$pod.log;
+           fi
+         done
+         """
+  }
+
+
 pipeline {
 
   /* no label, executor is determined by JJB */
@@ -23,7 +57,7 @@
     label "${params.buildNode}"
   }
   options {
-      timeout(time: 40, unit: 'MINUTES')
+      timeout(time: 80, unit: 'MINUTES')
   }
   environment {
     KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
@@ -41,7 +75,6 @@
     ROBOT_MISC_ARGS="-d $WORKSPACE/RobotLogs"
   }
   stages {
-
     stage('Download kind-voltha') {
       steps {
         sh """
@@ -65,67 +98,79 @@
         sh '''
            rm -rf $WORKSPACE/RobotLogs; mkdir -p $WORKSPACE/RobotLogs
            git clone https://gerrit.opencord.org/voltha-system-tests
-           make -C $WORKSPACE/voltha-system-tests ${makeTarget} || true
+           make ROBOT_DEBUG_LOG_OPT="-l sanity_log.html -r sanity_result.html -o sanity_result.xml" -C $WORKSPACE/voltha-system-tests ${makeTarget}
            '''
       }
     }
 
+    stage('Log the kubernetes for sanity-test') {
+      steps {
+        logKubernetes('sanity_test')
+      }
+    }
     //Remove this stage once https://jira.opencord.org/browse/VOL-1977 be resolved
-    stage('Deploy Voltha Again') {
+    stage('Deploy Voltha Again for Functional Tests') {
       steps {
         sh """
            pushd kind-voltha/
-           ./voltha down
-           ./voltha up
+           WAIT_ON_DOWN=yes  DEPLOY_K8S=no ./voltha down
+           DEPLOY_K8S=no ./voltha up
            popd
            """
       }
     }
+
     stage('Kubernetes Functional Tests') {
       steps {
         sh '''
-           rm -rf $WORKSPACE/RobotLogs; mkdir -p $WORKSPACE/RobotLogs
-           make -C $WORKSPACE/voltha-system-tests system-scale-test || true
+           make ROBOT_DEBUG_LOG_OPT="-l functional_log.html -r functional_result.html -o functional_output.xml" -C $WORKSPACE/voltha-system-tests system-scale-test
            '''
       }
     }
+
+    stage('Log the kubernetes for functional-test') {
+      steps {
+        logKubernetes('functional')
+      }
+    }
+
+    //Remove this stage once https://jira.opencord.org/browse/VOL-1977 be resolved
+    stage('Deploy Voltha Again for Failure Scenario Tests') {
+      steps {
+        sh """
+           pushd kind-voltha/
+           WAIT_ON_DOWN=yes  DEPLOY_K8S=no ./voltha down
+           DEPLOY_K8S=no ./voltha up
+           popd
+           """
+      }
+    }
+
+    stage('Kubernetes Failure Scenario Tests') {
+      steps {
+        sh '''
+           make ROBOT_DEBUG_LOG_OPT="-l failure_log.html -r failure_result.html -o failure_output.xml"  -C $WORKSPACE/voltha-system-tests failure-test
+           '''
+      }
+    }
+
+    stage('Log the kubernetes for failure scenario test') {
+      steps {
+        logKubernetes('failure')
+      }
+    }
+
   }
 
   post {
+    failure {
+        logKubernetes('last')
+    }
+    aborted {
+        logKubernetes('last')
+    }
     always {
-      sh '''
-         set +e
-         cd kind-voltha/
-         cp install-minimal.log $WORKSPACE/
-         kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\t'}{.imageID}{'\\n'}" | sort | uniq -c
-         kubectl get nodes -o wide
-         kubectl get pods -o wide
-         kubectl get pods -n voltha -o wide
-         ## get default pod logs
-         for pod in \$(kubectl get pods --no-headers | awk '{print \$1}');
-         do
-           if [[ \$pod == *"onos"* && \$pod != *"onos-service"* ]]; then
-             kubectl logs \$pod onos> $WORKSPACE/\$pod.log;
-           else
-             kubectl logs \$pod> $WORKSPACE/\$pod.log;
-           fi
-         done
-         ## get voltha pod logs
-         for pod in \$(kubectl get pods --no-headers -n voltha | awk '{print \$1}');
-         do
-           if [[ \$pod == *"-api-"* ]]; then
-             kubectl logs \$pod arouter -n voltha > $WORKSPACE/\$pod.log;
-           elif [[ \$pod == "bbsim-"* ]]; then
-             kubectl logs \$pod -n voltha -p > $WORKSPACE/\$pod.log;
-           else
-             kubectl logs \$pod -n voltha > $WORKSPACE/\$pod.log;
-           fi
-         done
-         ## clean up node
-         WAIT_ON_DOWN=y ./voltha down
-         cd $WORKSPACE/
-         rm -rf kind-voltha/ voltha-system-tests/ || true
-         '''
+
          step([$class: 'RobotPublisher',
             disableArchiveOutput: false,
             logFileName: 'RobotLogs/*log*.html',
diff --git a/jjb/voltha-e2e.yaml b/jjb/voltha-e2e.yaml
index f7fb5f9..f989db5 100644
--- a/jjb/voltha-e2e.yaml
+++ b/jjb/voltha-e2e.yaml
@@ -45,8 +45,8 @@
           default-image-tag: 'master'
           code-branch: 'master'
           make-target: sanity-multi-kind
-          onus: 2
-          pons: 2
+          onus: 1
+          pons: 1
           test-runs: 5
           time-trigger: "H H/3 * * *"
 
@@ -164,7 +164,7 @@
 - job-template:
     id: 'voltha-patch-test'
     name: 'verify_{project}_sanity-test'
-    extra-helm-flags: '--set defaults.image_tag=$GERRIT_BRANCH'
+    extra-helm-flags: ''
 
     description: |
       <!-- Managed by Jenkins Job Builder -->
@@ -194,7 +194,7 @@
 
       - string:
           name: manifestBranch
-          default: '$GERRIT_BRANCH'
+          default: 'master'
           description: 'Name of the repo branch to use'
 
       - string:
@@ -264,7 +264,7 @@
 
       - string:
           name: manifestBranch
-          default: '$GERRIT_BRANCH'
+          default: master
           description: 'Name of the repo branch to use'
 
       - string:
@@ -406,7 +406,7 @@
 
       - string:
           name: manifestBranch
-          default: '$GERRIT_BRANCH'
+          default: master
           description: 'Name of the repo branch to use'
 
       - string:
@@ -549,7 +549,7 @@
 
       - string:
           name: manifestBranch
-          default: 'master'
+          default: master
           description: 'Name of the repo branch to use (change to $GERRIT_BRANCH if testing a patchset)'
 
       - string: