[VOL-4237] Removing log-collector.sh script from physical pipeline (logs are already collected by a Robot keyword)
Enabling voltha-scale-multi-stack jobs for voltha-2.8

Change-Id: I4b103de08e74f47a6d520302573f61afb0f20a95
diff --git a/jjb/pipeline/voltha-physical-soak-tests.groovy b/jjb/pipeline/voltha-physical-soak-tests.groovy
deleted file mode 100644
index 4bb8b02..0000000
--- a/jjb/pipeline/voltha-physical-soak-tests.groovy
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright 2017-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-node {
-  // Need this so that deployment_config has global scope when it's read later
-  deployment_config = null
-}
-
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: 280, unit: 'MINUTES')
-  }
-
-  environment {
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-    VOLTCONFIG="$HOME/.volt/config-minimal"
-    PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-  }
-
-  stages {
-    stage ('Initialize') {
-      steps {
-        step([$class: 'WsCleanup'])
-        sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/kind-voltha"
-        script {
-          deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        }
-        // This checkout allows us to show changes in Jenkins
-        checkout(changelog: true,
-          poll: false,
-          scm: [$class: 'RepoScm',
-            manifestRepositoryUrl: "${params.manifestUrl}",
-            manifestBranch: "${params.branch}",
-            currentBranch: true,
-            destinationDir: 'voltha',
-            forceSync: true,
-            resetFirst: true,
-            quiet: true,
-            jobs: 4,
-            showAllChanges: true]
-          )
-        sh returnStdout: false, script: """
-        cd voltha
-        git clone -b master ${cordRepoUrl}/cord-tester
-        mkdir -p $WORKSPACE/bin
-        bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/bin"
-        cd $WORKSPACE
-        if [ "${params.branch}" == "voltha-2.8" ]; then
-           VC_VERSION=1.6.10
-        else
-           VC_VERSION=\$(curl -sSL https://api.github.com/repos/opencord/voltctl/releases/latest | jq -r .tag_name | sed -e 's/^v//g')
-        fi
-
-        HOSTOS=\$(uname -s | tr "[:upper:]" "[:lower:"])
-        HOSTARCH=\$(uname -m | tr "[:upper:]" "[:lower:"])
-        if [ \$HOSTARCH == "x86_64" ]; then
-            HOSTARCH="amd64"
-        fi
-        curl -o $WORKSPACE/bin/voltctl -sSL https://github.com/opencord/voltctl/releases/download/v\${VC_VERSION}/voltctl-\${VC_VERSION}-\${HOSTOS}-\${HOSTARCH}
-        chmod 755 $WORKSPACE/bin/voltctl
-        voltctl version --clientonly
-
-        if [ "${params.branch}" == "master" ]; then
-        # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
-        # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
-        # We should change this. In the meantime here is a workaround.
-           set +e
-
-        # Remove noise from voltha-core logs
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        # Remove noise from openolt logs
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        fi
-        """
-      }
-    }
-
-    stage('Functional Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        ROBOT_FILE="Voltha_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/FunctionalTests"
-      }
-      steps {
-        sh """
-        cd $WORKSPACE/voltha/kind-voltha/scripts
-        ./log-collector.sh > /dev/null &
-        ./log-combine.sh > /dev/null &
-
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ "${params.testType}" == "Functional" ]; then
-            if ( ${powerSwitch} ); then
-                export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanity -i functional -e DeleteOLT -e DisableONU_AuthCheck -e DisableDeleteONUandOLT -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-            else
-                export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i sanity -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-            fi
-            make -C $WORKSPACE/voltha/voltha-system-tests voltha-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Failure/Recovery Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        ROBOT_FILE="Voltha_FailureScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/FailureScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ "${params.testType}" == "Failure" ]; then
-           if ( ${powerSwitch} ); then
-              export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-           else
-              export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-           fi
-           make -C $WORKSPACE/voltha/voltha-system-tests voltha-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Dataplane Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        ROBOT_FILE="Voltha_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/DataplaneTests"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ "${params.testType}" == "Dataplane" ]; then
-           export ROBOT_MISC_ARGS="--removekeywords wuks -i dataplane -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-           make -C $WORKSPACE/voltha/voltha-system-tests voltha-test || true
-        fi
-        """
-      }
-    }
-
-  }
-  post {
-    always {
-      sh returnStdout: false, script: '''
-      set +e
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
-      kubectl get nodes -o wide
-      kubectl get pods -n voltha -o wide
-
-      sleep 60 # Wait for log-collector and log-combine to complete
-
-      # Clean up "announcer" pod used by the tests if present
-      kubectl delete pod announcer || true
-
-      ## Pull out errors from log files
-      extract_errors_go() {
-        echo
-        echo "Error summary for $1:"
-        grep '"level":"error"' $WORKSPACE/voltha/kind-voltha/scripts/logger/combined/$1*
-        echo
-      }
-
-      extract_errors_python() {
-        echo
-        echo "Error summary for $1:"
-        grep 'ERROR' $WORKSPACE/voltha/kind-voltha/scripts/logger/combined/$1*
-        echo
-      }
-
-      extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
-      extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
-      extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
-      extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
-      extract_errors_python onos >> $WORKSPACE/error-report.log
-
-      cd $WORKSPACE/voltha/kind-voltha/scripts/logger/combined/
-      tar czf $WORKSPACE/container-logs.tgz *
-
-      cd $WORKSPACE
-      gzip *-combined.log || true
-
-      # collect ETCD cluster logs
-      mkdir -p $WORKSPACE/etcd
-      printf '%s\n' $(kubectl get pods -l app=etcd -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I% bash -c "kubectl logs % > $WORKSPACE/etcd/%.log"
-      '''
-      script {
-        deployment_config.olts.each { olt ->
-          sh returnStdout: false, script: """
-          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
-          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log  # Remove escape sequences
-          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
-          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log  # Remove escape sequences
-          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/startup.log $WORKSPACE/startup-${olt.sship}.log || true
-          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/startup-${olt.sship}.log || true # Remove escape sequences
-          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt_process_watchdog.log $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true
-          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true # Remove escape sequences
-          """
-        }
-      }
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: '**/log*.html',
-        otherFiles: '',
-        outputFileName: '**/output*.xml',
-        outputPath: 'RobotLogs',
-        passThreshold: 100,
-        reportFileName: '**/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true
-        ]);
-      archiveArtifacts artifacts: '*.log,*.gz,*.tgz,etcd/*.log'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha-scale-matrix.groovy b/jjb/pipeline/voltha-scale-matrix.groovy
deleted file mode 100644
index 7cc810b..0000000
--- a/jjb/pipeline/voltha-scale-matrix.groovy
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright 2019-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-pipeline {
-
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-      timeout(time: 120, unit: 'MINUTES')
-  }
-  environment {
-    JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
-    KUBECONFIG="$HOME/.kube/config"
-    VOLTCONFIG="$HOME/.volt/config"
-    SSHPASS="karaf"
-    SCHEDULE_ON_CONTROL_NODES="yes"
-    FANCY=0
-    NAME="minimal"
-
-    WITH_SIM_ADAPTERS="no"
-    WITH_RADIUS="no"
-    WITH_BBSIM="yes"
-    LEGACY_BBSIM_INDEX="no"
-    DEPLOY_K8S="no"
-    CONFIG_SADIS="external"
-    VOLTHA_LOG_LEVEL="WARN"
-
-    // install everything in the default namespace
-    VOLTHA_NS="default"
-    ADAPTER_NS="default"
-    INFRA_NS="default"
-    BBSIM_NS="default"
-
-    // workflow
-    WITH_EAPOL="no"
-    WITH_DHCP="no"
-    WITH_IGMP="no"
-
-    // infrastructure size
-    NUM_OF_OPENONU="${openonuAdapterReplicas}"
-    NUM_OF_ONOS="${onosReplicas}"
-    NUM_OF_ATOMIX="${atomixReplicas}"
-    NUM_OF_KAFKA="${kafkaReplicas}"
-    NUM_OF_ETCD="${etcdReplicas}"
-  }
-
-  stages {
-    stage ('Parse parameters') {
-      steps {
-        script {
-          format = "format is 'olt-pon-onu' separated bya comma, eg: '1-16-16, 1-16-32, 2-16-32'"
-          source = params.topologies
-
-          if (source == null || source == "") {
-            throw new Exception("You need to specify some deployment topologies, " + format)
-          }
-
-          topologies = []
-
-          for(topo in source.split(",")) {
-            t = topo.split("-")
-            topologies.add(['olt': t[0].trim(), 'pon': t[1].trim(), 'onu': t[2].trim()])
-          }
-
-          if (topologies.size() == 0) {
-            throw new Exception("Not enough topologies defined, " + format)
-          }
-          println "Deploying topologies:"
-          println topologies
-        }
-      }
-    }
-    stage ('Cleanup') {
-      steps {
-        timeout(time: 10, unit: 'MINUTES') {
-          sh returnStdout: false, script: """
-            helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com
-            helm repo add stable https://kubernetes-charts.storage.googleapis.com
-            helm repo add onf https://charts.opencord.org
-            helm repo add cord https://charts.opencord.org
-            helm repo add onos https://charts.onosproject.org
-            helm repo add atomix https://charts.atomix.io
-            helm repo add bbsim-sadis https://ciena.github.io/bbsim-sadis-server/charts
-            helm repo update
-
-            for hchart in \$(helm list -q | grep -E -v 'docker-registry|kafkacat');
-            do
-                echo "Purging chart: \${hchart}"
-                helm delete "\${hchart}"
-            done
-            bash /home/cord/voltha-scale/wait_for_pods.sh
-
-            cd $WORKSPACE
-            rm -rf $WORKSPACE/*
-          """
-        }
-      }
-    }
-    stage('Clone kind-voltha') {
-      steps {
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[ url: "https://gerrit.opencord.org/kind-voltha", ]],
-          branches: [[ name: "master", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-        script {
-          sh(script:"""
-          if [ '${kindVolthaChange}' != '' ] ; then
-          cd $WORKSPACE/kind-voltha;
-          git fetch https://gerrit.opencord.org/kind-voltha ${volthaSystemTestsChange} && git checkout FETCH_HEAD
-          fi
-          """)
-        }
-      }
-    }
-    stage('Clone voltha-system-tests') {
-      steps {
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[ url: "https://gerrit.opencord.org/voltha-system-tests", ]],
-          branches: [[ name: "${release}", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-        script {
-          sh(script:"""
-            if [ '${volthaSystemTestsChange}' != '' ] ; then
-              cd $WORKSPACE/voltha-system-tests;
-              git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
-            fi
-            """)
-        }
-      }
-    }
-    stage('Deploy and test') {
-      steps {
-          repeat_deploy_and_test(topologies)
-      }
-    }
-    stage('Aggregate stats') {
-      steps {
-        sh returnStdout: false, script: """
-        export IN_FOLDER=$WORKSPACE/stats/
-        export OUT_FOLDER=$WORKSPACE/plots/
-        mkdir -p \$OUT_FOLDER
-        cd $WORKSPACE/voltha-system-tests
-        make vst_venv
-        source ./vst_venv/bin/activate
-
-        sleep 60 # we have to wait for prometheus to collect all the information
-
-        python tests/scale/stats-aggregation.py -s \$IN_FOLDER -o \$OUT_FOLDER
-        """
-      }
-    }
-  }
-  post {
-    always {
-      archiveArtifacts artifacts: '*-install-minimal.log,*-minimal-env.sh,RobotLogs/**/*,stats/**/*,logs/**/*'
-    }
-  }
-}
-
-def repeat_deploy_and_test(list) {
-  for (int i = 0; i < list.size(); i++) {
-    stage('Cleanup') {
-      sh returnStdout: false, script: """
-      for hchart in \$(helm list -q | grep -E -v 'bbsim-sadis-server|onos|radius');
-      do
-          echo "Purging chart: \${hchart}"
-          helm delete "\${hchart}"
-      done
-      bash /home/cord/voltha-scale/wait_for_pods.sh
-      """
-    }
-    stage('Deploy monitoring infrastructure') {
-      sh returnStdout: false, script: '''
-      helm install nem-monitoring cord/nem-monitoring \
-      -f $HOME/voltha-scale/grafana.yaml \
-      --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
-      --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
-
-      # TODO download this file from https://github.com/opencord/helm-charts/blob/master/scripts/wait_for_pods.sh
-      bash /home/cord/voltha-scale/wait_for_pods.sh
-      '''
-    }
-    stage('Deploy topology: ' + list[i]['olt'] + "-" + list[i]['pon'] + "-" + list[i]['onu']) {
-      timeout(time: 10, unit: 'MINUTES') {
-        script {
-          now = new Date();
-          currentRunStart = now.getTime() / 1000;
-          println("Start: " + currentRunStart)
-        }
-        sh returnStdout: false, script: """
-        cd $WORKSPACE/kind-voltha/
-
-        if [ '${release.trim()}' != 'master' ]; then
-          source $WORKSPACE/kind-voltha/releases/${release}
-        fi
-
-        # if it's newer than voltha-2.4 set the correct BBSIM_CFG
-        if [ '${release.trim()}' != 'voltha-2.4' ]; then
-          export BBSIM_CFG="$WORKSPACE/kind-voltha/configs/bbsim-sadis-dt.yaml"
-        fi
-
-        export NUM_OF_BBSIM=${list[i]['olt']}
-        export EXTRA_HELM_FLAGS+="--set enablePerf=true,pon=${list[i]['pon']},onu=${list[i]['onu']} "
-        export EXTRA_HELM_FLAGS+="--set prometheus.kafka.enabled=true,prometheus.operator.enabled=true,prometheus.jmx.enabled=true,prometheus.operator.serviceMonitor.namespace=default"
-        ./voltha up
-
-        # disable LLDP
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled false
-
-        cp minimal-env.sh ../${list[i]['olt']}-${list[i]['pon']}-${list[i]['onu']}-minimal-env.sh
-        cp install-minimal.log ../${list[i]['olt']}-${list[i]['pon']}-${list[i]['onu']}-install-minimal.log
-        """
-        sleep(120) // TODO can we improve and check once the bbsim-sadis-server is actually done loading subscribers??
-      }
-    }
-    stage('Test topology: ' + list[i]['olt'] + "-" + list[i]['pon'] + "-" + list[i]['onu']) {
-      timeout(time: 15, unit: 'MINUTES') {
-        sh returnStdout: false, script: """
-        mkdir -p $WORKSPACE/RobotLogs/${list[i]['olt']}-${list[i]['pon']}-${list[i]['onu']}
-        cd $WORKSPACE/voltha-system-tests
-        make vst_venv
-
-        export ROBOT_PARAMS=" \
-          -v olt:${list[i]['olt']} \
-          -v pon:${list[i]['pon']} \
-          -v onu:${list[i]['onu']} \
-          -v workflow:dt \
-          -v withEapol:false \
-          -v withDhcp:false \
-          -v withIgmp:false \
-          -e authentication \
-          -e dhcp"
-
-        cd $WORKSPACE/voltha-system-tests
-        source ./vst_venv/bin/activate
-        robot -d $WORKSPACE/RobotLogs/${list[i]['olt']}-${list[i]['pon']}-${list[i]['onu']} \
-        \$ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
-        """
-      }
-    }
-    stage('Collect metrics: ' + list[i]['olt'] + "-" + list[i]['pon'] + "-" + list[i]['onu']) {
-      script {
-        now = new Date();
-        currentRunEnd = now.getTime() / 1000;
-        println("End: " + currentRunEnd)
-        delta = currentRunEnd - currentRunStart
-        println("Delta: " + delta)
-        minutesDelta = Math.ceil(delta / 60).toInteger()
-        println("Delta in minutes: " + minutesDelta)
-      }
-      sh returnStdout: false, script: """
-      export LOG_FOLDER=$WORKSPACE/stats/${list[i]['olt']}-${list[i]['pon']}-${list[i]['onu']}
-      mkdir -p \$LOG_FOLDER
-      cd $WORKSPACE/voltha-system-tests
-      make vst_venv
-      source ./vst_venv/bin/activate
-
-      sleep 60 # we have to wait for prometheus to collect all the information
-
-      python tests/scale/sizing.py -o \$LOG_FOLDER -s ${minutesDelta}
-      """
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha-scale-multi-stack.groovy b/jjb/pipeline/voltha-scale-multi-stack.groovy
deleted file mode 100644
index 8264387..0000000
--- a/jjb/pipeline/voltha-scale-multi-stack.groovy
+++ /dev/null
@@ -1,724 +0,0 @@
-// Copyright 2019-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// deploy VOLTHA using kind-voltha and performs a scale test
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-      timeout(time: 120, unit: 'MINUTES')
-  }
-  environment {
-    JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
-    KUBECONFIG="$HOME/.kube/config"
-    SSHPASS="karaf"
-    PATH="$PATH:$WORKSPACE/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-    SCHEDULE_ON_CONTROL_NODES="yes"
-    FANCY=0
-    WAIT_ON_DOWN="yes"
-    WITH_SIM_ADAPTERS="no"
-    WITH_RADIUS="${withRadius}"
-    WITH_BBSIM="yes"
-    LEGACY_BBSIM_INDEX="no"
-    DEPLOY_K8S="no"
-    CONFIG_SADIS="external"
-    WITH_KAFKA="yes"
-    WITH_ETCD="yes"
-    VOLTHA_ETCD_PORT=9999
-    INFRA_NS="infra"
-
-    // configurable options
-    WITH_EAPOL="${withEapol}"
-    WITH_DHCP="${withDhcp}"
-    WITH_IGMP="${withIgmp}"
-    VOLTHA_LOG_LEVEL="${logLevel}"
-    NUM_OF_BBSIM="${olts}"
-    NUM_OF_OPENONU="${openonuAdapterReplicas}"
-    NUM_OF_ONOS="${onosReplicas}"
-    NUM_OF_ATOMIX="${atomixReplicas}"
-    NUM_OF_KAFKA="${kafkaReplicas}"
-    NUM_OF_ETCD="${etcdReplicas}"
-    WITH_PPROF="${withProfiling}"
-    EXTRA_HELM_FLAGS="${extraHelmFlags} " // note that the trailing space is required to separate the parameters from appends done later
-    VOLTHA_CHART="${volthaChart}"
-    VOLTHA_BBSIM_CHART="${bbsimChart}"
-    VOLTHA_ADAPTER_OPEN_OLT_CHART="${openoltAdapterChart}"
-    VOLTHA_ADAPTER_OPEN_ONU_CHART="${openonuAdapterChart}"
-    ONOS_CLASSIC_CHART="${onosChart}"
-    RADIUS_CHART="${radiusChart}"
-
-    APPS_TO_LOG="etcd kafka onos-onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server"
-    LOG_FOLDER="$WORKSPACE/logs"
-
-    GERRIT_PROJECT="${GERRIT_PROJECT}"
-  }
-
-  stages {
-    stage ('Cleanup') {
-      steps {
-        timeout(time: 11, unit: 'MINUTES') {
-          sh returnStdout: false, script: """
-            helm repo add stable https://charts.helm.sh/stable
-            helm repo add onf https://charts.opencord.org
-            helm repo add cord https://charts.opencord.org
-            helm repo add onos https://charts.onosproject.org
-            helm repo add atomix https://charts.atomix.io
-            helm repo add bbsim-sadis https://ciena.github.io/bbsim-sadis-server/charts
-            helm repo update
-
-            # removing ETCD port forward
-            P_ID="\$(ps e -ww -A | grep "_TAG=etcd-port-forward" | grep -v grep | awk '{print \$1}')"
-            if [ -n "\$P_ID" ]; then
-              kill -9 \$P_ID
-            fi
-
-            NAMESPACES="voltha1 voltha2 infra default"
-            for NS in \$NAMESPACES
-            do
-                for hchart in \$(helm list -n \$NS -q | grep -E -v 'docker-registry|kafkacat');
-                do
-                    echo "Purging chart: \${hchart}"
-                    helm delete -n \$NS "\${hchart}"
-                done
-            done
-
-            test -e $WORKSPACE/kind-voltha/voltha && cd $WORKSPACE/kind-voltha && ./voltha down
-
-            cd $WORKSPACE
-            rm -rf $WORKSPACE/*
-
-            # remove orphaned port-forward from different namespaces
-            ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
-          """
-        }
-      }
-    }
-    stage('Clone kind-voltha') {
-      steps {
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/kind-voltha",
-            refspec: "${kindVolthaChange}"
-          ]],
-          branches: [[ name: "master", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-        script {
-          sh(script:"""
-          if [ '${kindVolthaChange}' != '' ] ; then
-            cd $WORKSPACE/kind-voltha;
-            git fetch https://gerrit.opencord.org/kind-voltha ${kindVolthaChange} && git checkout FETCH_HEAD
-          fi
-          """)
-        }
-      }
-    }
-    stage('Clone voltha-system-tests') {
-      steps {
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/voltha-system-tests",
-            refspec: "${volthaSystemTestsChange}"
-          ]],
-          branches: [[ name: "${release}", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-        script {
-          sh(script:"""
-            if [ '${volthaSystemTestsChange}' != '' ] ; then
-              cd $WORKSPACE/voltha-system-tests;
-              git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
-            fi
-            """)
-        }
-      }
-    }
-    stage('Build patch') {
-      when {
-        expression {
-          return params.GERRIT_PROJECT
-        }
-      }
-      steps {
-        sh """
-        git clone https://\$GERRIT_HOST/\$GERRIT_PROJECT
-        cd \$GERRIT_PROJECT
-        git fetch https://\$GERRIT_HOST/\$GERRIT_PROJECT \$GERRIT_REFSPEC && git checkout FETCH_HEAD
-
-        DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-build
-        DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-push
-        """
-      }
-    }
-    stage('Deploy common infrastructure') {
-      // includes monitoring, kafka, etcd
-      steps {
-        sh '''
-        if [ ${withMonitoring} = true ] ; then
-          helm install -n $INFRA_NS nem-monitoring cord/nem-monitoring \
-          -f $HOME/voltha-scale/grafana.yaml \
-          --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
-          --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
-        fi
-        '''
-      }
-    }
-    stage('Deploy VOLTHA infrastructure') {
-      steps {
-        sh returnStdout: false, script: '''
-
-          cd $WORKSPACE/kind-voltha/
-
-          export ETCD_CHART=$HOME/teone/helm-charts/etcd
-          export KAFKA_CHART=$HOME/teone/helm-charts/kafka
-
-          # KAFKA config
-          export NUM_OF_KAFKA=${kafkaReplicas}
-          export EXTRA_HELM_FLAGS+=' --set prometheus.kafka.enabled=true,prometheus.operator.enabled=true,prometheus.jmx.enabled=true,prometheus.operator.serviceMonitor.namespace=default '
-
-          # ETCD config
-          export EXTRA_HELM_FLAGS+=" --set memoryMode=${inMemoryEtcdStorage} "
-
-          NAME=infra JUST_INFRA=y ./voltha up
-
-          # Forward the ETCD port onto $VOLTHA_ETCD_PORT
-          _TAG=etcd-port-forward kubectl -n \$INFRA_NS port-forward --address 0.0.0.0 -n default service/etcd $VOLTHA_ETCD_PORT:2379&
-        '''
-      }
-    }
-    stage('Deploy Voltha') {
-      steps {
-        deploy_voltha_stacks(params.volthaStacks)
-      }
-    }
-    stage('Start logging') {
-      steps {
-        sh returnStdout: false, script: '''
-        # start logging with kail
-
-        mkdir -p $LOG_FOLDER
-
-        list=($APPS_TO_LOG)
-        for app in "${list[@]}"
-        do
-          echo "Starting logs for: ${app}"
-          _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
-        done
-        '''
-      }
-    }
-    stage('Configuration') {
-      steps {
-        script {
-          sh returnStdout: false, script: """
-
-          # TODO this needs to be repeated per stack
-          # kubectl exec \$(kubectl get pods | grep -E "bbsim[0-9]" | awk 'NR==1{print \$1}') -- bbsimctl log ${logLevel.toLowerCase()} false
-
-          #Setting link discovery
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 1000
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
-
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.onosproject
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.opencord
-
-          # Set Flows/Ports/Meters poll frequency
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
-
-          if [ ${withFlows} = false ]; then
-            sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.olt
-          fi
-
-          if [ ${withPcap} = true ] && [ ${volthaStacks} -eq 1 ] ; then
-            # Start the tcp-dump in ofagent
-            export OF_AGENT=\$(kubectl -n \$INFRA_NS get pods -l app=ofagent -o name)
-            kubectl exec \$OF_AGENT -- apk update
-            kubectl exec \$OF_AGENT -- apk add tcpdump
-            kubectl exec \$OF_AGENT -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
-            _TAG=ofagent-tcpdump kubectl -n \$INFRA_NS exec \$OF_AGENT -- tcpdump -nei eth0 -w out.pcap&
-
-            # Start the tcp-dump in radius
-            export RADIUS=\$(kubectl -n \$INFRA_NS get pods -l app=radius -o name)
-            kubectl exec \$RADIUS -- apt-get update
-            kubectl exec \$RADIUS -- apt-get install -y tcpdump
-            _TAG=radius-tcpdump kubectl -n \$INFRA_NS exec \$RADIUS -- tcpdump -w out.pcap&
-
-            # Start the tcp-dump in ONOS
-            for i in \$(seq 0 \$ONOSES); do
-              INSTANCE="onos-onos-classic-\$i"
-              kubectl exec \$INSTANCE -- apt-get update
-              kubectl exec \$INSTANCE -- apt-get install -y tcpdump
-              kubectl exec \$INSTANCE -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
-              _TAG=\$INSTANCE kubectl -n \$INFRA_NS exec \$INSTANCE -- /usr/bin/tcpdump -nei eth0 port 1812 -w out.pcap&
-            done
-          else
-            echo "PCAP not supported for multiple VOLTHA stacks"
-          fi
-          """
-        }
-      }
-    }
-    stage('Setup Test') {
-      steps {
-        sh '''
-          mkdir -p $WORKSPACE/RobotLogs
-          cd $WORKSPACE/voltha-system-tests
-          make vst_venv
-        '''
-        sh '''
-          if [ ${withProfiling} = true ] && [ ${volthaStacks} -eq 1 ]; then
-            mkdir -p $LOG_FOLDER/pprof
-            cat << EOF > $WORKSPACE/pprof.sh
-timestamp() {
-  date +"%T"
-}
-
-i=0
-while [[ true ]]; do
-  ((i++))
-  ts=$(timestamp)
-  go tool pprof -png http://127.0.0.1:6060/debug/pprof/heap > $LOG_FOLDER/pprof/rw-core-heap-\\$i-\\$ts.png
-  go tool pprof -png http://127.0.0.1:6060/debug/pprof/goroutine > $LOG_FOLDER/pprof/rw-core-goroutine-\\$i-\\$ts.png
-  curl -o $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof http://127.0.0.1:6060/debug/pprof/profile?seconds=10
-  go tool pprof -png $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.png
-
-  go tool pprof -png http://127.0.0.1:6061/debug/pprof/heap > $LOG_FOLDER/pprof/openolt-heap-\\$i-\\$ts.png
-  go tool pprof -png http://127.0.0.1:6061/debug/pprof/goroutine > $LOG_FOLDER/pprof/openolt-goroutine-\\$i-\\$ts.png
-  curl -o $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof http://127.0.0.1:6061/debug/pprof/profile?seconds=10
-  go tool pprof -png $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.png
-
-  go tool pprof -png http://127.0.0.1:6062/debug/pprof/heap > $LOG_FOLDER/pprof/ofagent-heap-\\$i-\\$ts.png
-  go tool pprof -png http://127.0.0.1:6062/debug/pprof/goroutine > $LOG_FOLDER/pprof/ofagent-goroutine-\\$i-\\$ts.png
-  curl -o $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof http://127.0.0.1:6062/debug/pprof/profile?seconds=10
-  go tool pprof -png $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.png
-
-  sleep 10
-done
-EOF
-
-            _TAG="pprof"
-            _TAG=$_TAG bash $WORKSPACE/pprof.sh &
-          else
-            echo "Profiling not supported for multiple VOLTHA stacks"
-          fi
-        '''
-      }
-    }
-    stage('Run Test') {
-      steps {
-        test_voltha_stacks(params.volthaStacks)
-      }
-    }
-  }
-  post {
-    always {
-      // collect result, done in the "post" step so it's executed even in the
-      // event of a timeout in the tests
-      sh '''
-
-        # stop the kail processes
-        list=($APPS_TO_LOG)
-        for app in "${list[@]}"
-        do
-          echo "Stopping logs for: ${app}"
-          _TAG="kail-$app"
-          P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
-          if [ -n "$P_IDS" ]; then
-            echo $P_IDS
-            for P_ID in $P_IDS; do
-              kill -9 $P_ID
-            done
-          fi
-        done
-      '''
-      // compressing the logs to save space on Jenkins
-      sh '''
-        cd $LOG_FOLDER
-        tar -czf logs.tar.gz *.log
-        rm *.log
-      '''
-      sh '''
-
-        if [ ${withPcap} = true ] && [ ${volthaStacks} -eq 1 ]; then
-          # stop ofAgent tcpdump
-          P_ID="\$(ps e -ww -A | grep "_TAG=ofagent-tcpdump" | grep -v grep | awk '{print \$1}')"
-          if [ -n "\$P_ID" ]; then
-            kill -9 \$P_ID
-          fi
-
-          # stop radius tcpdump
-          P_ID="\$(ps e -ww -A | grep "_TAG=radius-tcpdump" | grep -v grep | awk '{print \$1}')"
-          if [ -n "\$P_ID" ]; then
-            kill -9 \$P_ID
-          fi
-
-          # stop onos tcpdump
-          LIMIT=$(($NUM_OF_ONOS - 1))
-          for i in $(seq 0 $LIMIT); do
-            INSTANCE="onos-onos-classic-$i"
-            P_ID="\$(ps e -ww -A | grep "_TAG=$INSTANCE" | grep -v grep | awk '{print \$1}')"
-            if [ -n "\$P_ID" ]; then
-              kill -9 \$P_ID
-            fi
-          done
-
-          # copy the file
-          export OF_AGENT=$(kubectl get pods -l app=ofagent | awk 'NR==2{print $1}') || true
-          kubectl cp $OF_AGENT:out.pcap $LOG_FOLDER/ofagent.pcap || true
-
-          export RADIUS=$(kubectl get pods -l app=radius | awk 'NR==2{print $1}') || true
-          kubectl cp $RADIUS:out.pcap $LOG_FOLDER/radius.pcap || true
-
-          LIMIT=$(($NUM_OF_ONOS - 1))
-          for i in $(seq 0 $LIMIT); do
-            INSTANCE="onos-onos-classic-$i"
-            kubectl cp $INSTANCE:out.pcap $LOG_FOLDER/$INSTANCE.pcap || true
-          done
-        fi
-      '''
-      sh '''
-        if [ ${withProfiling} = true ] && [ ${volthaStacks} -eq 1 ]; then
-          _TAG="pprof"
-          P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
-          if [ -n "$P_IDS" ]; then
-            echo $P_IDS
-            for P_ID in $P_IDS; do
-              kill -9 $P_ID
-            done
-          fi
-        fi
-      '''
-      plot([
-        csvFileName: 'scale-test.csv',
-        csvSeries: [
-          [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-        ],
-        group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (Stacks: ${params.volthaStacks}, OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus})", yaxis: 'Time (s)', useDescr: true
-      ])
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: 'RobotLogs/**/log.html',
-        otherFiles: '',
-        outputFileName: 'RobotLogs/**/output.xml',
-        outputPath: '.',
-        passThreshold: 100,
-        reportFileName: 'RobotLogs/**/report.html',
-        unstableThreshold: 0]);
-      // get all the logs from kubernetes PODs
-      sh returnStdout: false, script: '''
-
-        # store information on running charts
-        helm ls --all-namespaces > $LOG_FOLDER/helm-list.txt || true
-
-        # store information on the running pods
-        kubectl get pods --all-namespaces -o wide > $LOG_FOLDER/pods.txt || true
-        kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
-        kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
-
-        # copy the ONOS logs directly from the container to avoid the color codes
-        printf '%s\n' $(kubectl -n \$INFRA_NS get pods -l app=onos-onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl -n \$INFRA_NS cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
-
-        # get radius logs out of the container
-        kubectl -n \$INFRA_NS  cp $(kubectl -n \$INFRA_NS get pods -l app=radius --no-headers  | awk '{print $1}'):/var/log/freeradius/radius.log $LOG_FOLDER//radius.log || true
-      '''
-      // dump all the BBSim(s) ONU information
-      script {
-        for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
-          stack_ns="voltha"+i
-          sh """
-          BBSIM_IDS=\$(kubectl -n ${stack_ns} get pods | grep bbsim | grep -v server | awk '{print \$1}')
-          IDS=(\$BBSIM_IDS)
-
-          for bbsim in "\${IDS[@]}"
-          do
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl onu list > $LOG_FOLDER/${stack_ns}/\$bbsim-device-list.txt || true
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl service list > $LOG_FOLDER/${stack_ns}/\$bbsim-service-list.txt || true
-          done
-          """
-        }
-      }
-      // get ONOS debug infos
-      sh '''
-
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt || true
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt || true
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt || true
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt || true
-
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt || true
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt || true
-
-        if [ ${withFlows} = true ] ; then
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt || true
-        fi
-
-        if [ ${provisionSubscribers} = true ]; then
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt || true
-        fi
-
-        if [ ${withEapol} = true ] ; then
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt || true
-        fi
-
-        if [ ${withDhcp} = true ] ; then
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt || true
-        fi
-      '''
-      // collect etcd metrics
-      sh '''
-        mkdir -p $WORKSPACE/etcd-metrics
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-backend-write-time.json || true
-      '''
-      // get VOLTHA debug infos
-      script {
-        for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
-          stack_ns="voltha"+i
-          voltcfg="~/.volt/config-voltha"+i
-          try {
-            sh """
-            voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device list -o json > $LOG_FOLDER/${stack_ns}/device-list.json || true
-            python -m json.tool $LOG_FOLDER/${stack_ns}/device-list.json > $LOG_FOLDER/${stack_ns}/voltha-devices-list.json || true
-            rm $LOG_FOLDER/${stack_ns}/device-list.json || true
-            voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device list > $LOG_FOLDER/${stack_ns}/voltha-devices-list.txt || true
-
-            DEVICE_LIST=
-            printf '%s\n' \$(voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -c $HOME/.volt/config-${stack_ns}-m 8MB device flows # > $LOG_FOLDER/${stack_ns}/voltha-device-flows-#.txt" || true
-            printf '%s\n' \$(voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/${stack_ns}/voltha-device-ports-#.txt" || true
-
-            printf '%s\n' \$(voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB logicaldevice flows # > $LOG_FOLDER/${stack_ns}/voltha-logicaldevice-flows-#.txt" || true
-            printf '%s\n' \$(voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB logicaldevice port list # > $LOG_FOLDER/${stack_ns}/voltha-logicaldevice-ports-#.txt" || true
-            """
-          } catch(e) {
-            sh '''
-            echo "Can't get device list from voltclt"
-            '''
-          }
-        }
-      }
-      // get cpu usage by container
-      sh '''
-      if [ ${withMonitoring} = true ] ; then
-        cd $WORKSPACE/voltha-system-tests
-        source ./vst_venv/bin/activate
-        sleep 60 # we have to wait for prometheus to collect all the information
-        python tests/scale/sizing.py -o $WORKSPACE/plots || true
-      fi
-      '''
-      archiveArtifacts artifacts: 'kind-voltha/install-*.log,execution-time-*.txt,logs/**/*.txt,logs/**/*.tar.gz,RobotLogs/**/*,plots/*,etcd-metrics/*'
-    }
-  }
-}
-
-def deploy_voltha_stacks(numberOfStacks) {
-  for (int i = 1; i <= numberOfStacks.toInteger(); i++) {
-    stage("Deploy VOLTHA stack " + i) {
-      sh returnStdout: false, script: """
-
-      # unset voltha-api port so that the port is forwarded on a new one
-      unset VOLTHA_API_PORT
-
-      cd $WORKSPACE/kind-voltha/
-
-      export NAME=voltha${i}
-      export VOLTHA_NS=voltha${i}
-      export ADAPTER_NS=voltha${i}
-      export BBSIM_NS=voltha${i}
-      export BBSIM_BASE_INDEX=${i}
-      export WITH_ETCD=etcd.\$INFRA_NS.svc:2379
-      export WITH_KAFKA=kafka.\$INFRA_NS.svc:9092
-      export WITH_ONOS=onos-onos-classic-hs.\$INFRA_NS.svc:6653
-
-      export EXTRA_HELM_FLAGS+=' '
-
-      # Load the release defaults
-      if [ '${release.trim()}' != 'master' ]; then
-        source $WORKSPACE/kind-voltha/releases/${release}
-        EXTRA_HELM_FLAGS+=" ${extraHelmFlags} "
-      fi
-
-      # BBSim custom image handling
-      if [ '${bbsimImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'bbsim' ]; then
-        IFS=: read -r bbsimRepo bbsimTag <<< '${bbsimImg.trim()}'
-        EXTRA_HELM_FLAGS+="--set images.bbsim.repository=\$bbsimRepo,images.bbsim.tag=\$bbsimTag "
-      fi
-
-      # VOLTHA and ofAgent custom image handling
-      # NOTE to override the rw-core image in a released version you must set the ofAgent image too
-      # TODO split ofAgent and voltha-go
-      if [ '${rwCoreImg.trim()}' != '' ] && [ '${ofAgentImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-go' ]; then
-        IFS=: read -r rwCoreRepo rwCoreTag <<< '${rwCoreImg.trim()}'
-        IFS=: read -r ofAgentRepo ofAgentTag <<< '${ofAgentImg.trim()}'
-        EXTRA_HELM_FLAGS+="--set images.rw_core.repository=\$rwCoreRepo,images.rw_core.tag=\$rwCoreTag,images.ofagent.repository=\$ofAgentRepo,images.ofagent.tag=\$ofAgentTag "
-      fi
-
-      # OpenOLT custom image handling
-      if [ '${openoltAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openolt-adapter' ]; then
-        IFS=: read -r openoltAdapterRepo openoltAdapterTag <<< '${openoltAdapterImg.trim()}'
-        EXTRA_HELM_FLAGS+="--set images.adapter_open_olt.repository=\$openoltAdapterRepo,images.adapter_open_olt.tag=\$openoltAdapterTag "
-      fi
-
-      # OpenONU custom image handling
-      if [ '${openonuAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter' ]; then
-        IFS=: read -r openonuAdapterRepo openonuAdapterTag <<< '${openonuAdapterImg.trim()}'
-        EXTRA_HELM_FLAGS+="--set images.adapter_open_onu.repository=\$openonuAdapterRepo,images.adapter_open_onu.tag=\$openonuAdapterTag "
-      fi
-
-      # OpenONU Go custom image handling
-      if [ '${openonuAdapterGoImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter-go' ]; then
-        IFS=: read -r openonuAdapterGoRepo openonuAdapterGoTag <<< '${openonuAdapterGoImg.trim()}'
-        EXTRA_HELM_FLAGS+="--set images.adapter_open_onu_go.repository=\$openonuAdapterGoRepo,images.adapter_open_onu_go.tag=\$openonuAdapterGoTag "
-      fi
-
-      # ONOS custom image handling
-      if [ '${onosImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-onos' ]; then
-        IFS=: read -r onosRepo onosTag <<< '${onosImg.trim()}'
-        EXTRA_HELM_FLAGS+="--set images.onos.repository=\$onosRepo,images.onos.tag=\$onosTag "
-      fi
-
-      # set BBSim parameters
-      EXTRA_HELM_FLAGS+='--set enablePerf=true,pon=${pons},onu=${onus} '
-
-      # disable the securityContext, this is a development cluster
-      EXTRA_HELM_FLAGS+='--set securityContext.enabled=false '
-
-      # No persistent-volume-claims in Atomix
-      EXTRA_HELM_FLAGS+="--set atomix.persistence.enabled=false "
-
-      echo "Installing with the following extra arguments:"
-      echo $EXTRA_HELM_FLAGS
-
-      # if it's newer than voltha-2.4 set the correct BBSIM_CFG
-      if [ '${release.trim()}' != 'voltha-2.4' ]; then
-        export BBSIM_CFG="$WORKSPACE/kind-voltha/configs/bbsim-sadis-${workflow}.yaml"
-      fi
-
-      # Use custom built images
-
-      if [ '\$GERRIT_PROJECT' == 'voltha-go' ]; then
-        EXTRA_HELM_FLAGS+="--set images.rw_core.repository=${dockerRegistry}/voltha/voltha-rw-core,images.rw_core.tag=voltha-scale "
-      fi
-
-      if [ '\$GERRIT_PROJECT' == 'voltha-openolt-adapter' ]; then
-        EXTRA_HELM_FLAGS+="--set images.adapter_open_olt.repository=${dockerRegistry}/voltha/voltha-openolt-adapter,images.adapter_open_olt.tag=voltha-scale "
-      fi
-
-      if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter' ]; then
-        EXTRA_HELM_FLAGS+="--set images.adapter_open_onu.repository=${dockerRegistry}/voltha/voltha-openonu-adapter,images.adapter_open_onu.tag=voltha-scale "
-      fi
-
-      if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter-go' ]; then
-        EXTRA_HELM_FLAGS+="--set images.adapter_open_onu_go.repository=${dockerRegistry}/voltha/voltha-openonu-adapter-go,images.adapter_open_onu_go.tag=voltha-scale "
-      fi
-
-      if [ '\$GERRIT_PROJECT' == 'ofagent-go' ]; then
-        EXTRA_HELM_FLAGS+="--set images.ofagent.repository=${dockerRegistry}/voltha/voltha-ofagent-go,images.ofagent.tag=voltha-scale "
-      fi
-
-      if [ '\$GERRIT_PROJECT' == 'voltha-onos' ]; then
-        EXTRA_HELM_FLAGS+="--set images.onos.repository=${dockerRegistry}/voltha/voltha-onos,images.onos.tag=voltha-scale "
-      fi
-
-      if [ '\$GERRIT_PROJECT' == 'bbsim' ]; then
-        EXTRA_HELM_FLAGS+="--set images.bbsim.repository=${dockerRegistry}/voltha/bbsim,images.bbsim.tag=voltha-scale "
-      fi
-
-      ./voltha up
-      """
-    }
-  }
-}
-
-def test_voltha_stacks(numberOfStacks) {
-  for (int i = 1; i <= numberOfStacks.toInteger(); i++) {
-    stage("Test VOLTHA stack " + i) {
-      timeout(time: 15, unit: 'MINUTES') {
-        sh """
-          export VOLTCONFIG="$HOME/.volt/config-voltha${i}"
-          ROBOT_PARAMS="-v stackId:${i} \
-            -v olt:${olts} \
-            -v pon:${pons} \
-            -v onu:${onus} \
-            -v workflow:${workflow} \
-            -v withEapol:${withEapol} \
-            -v withDhcp:${withDhcp} \
-            -v withIgmp:${withIgmp} \
-            --noncritical non-critical \
-            -e teardown "
-
-          if [ ${withEapol} = false ] ; then
-            ROBOT_PARAMS+="-e authentication "
-          fi
-
-          if [ ${withDhcp} = false ] ; then
-            ROBOT_PARAMS+="-e dhcp "
-          fi
-
-          if [ ${provisionSubscribers} = false ] ; then
-            # if we're not considering subscribers then we don't care about authentication and dhcp
-            ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
-          fi
-
-          if [ ${withFlows} = false ] ; then
-            ROBOT_PARAMS+="-i setup -i activation "
-          fi
-
-          cd $WORKSPACE/voltha-system-tests
-          source ./vst_venv/bin/activate
-          robot -d $WORKSPACE/RobotLogs/voltha${i} \
-          \$ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
-
-          # collect results
-          python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/voltha${i}/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time-voltha${i}.txt || true
-          cat $WORKSPACE/execution-time-voltha${i}.txt
-        """
-      }
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/master/voltha-dt-physical-functional-tests.groovy b/jjb/pipeline/voltha/master/voltha-dt-physical-functional-tests.groovy
index 7b90b2a..040c065 100644
--- a/jjb/pipeline/voltha/master/voltha-dt-physical-functional-tests.groovy
+++ b/jjb/pipeline/voltha/master/voltha-dt-physical-functional-tests.groovy
@@ -239,21 +239,14 @@
   }
   post {
     always {
+      getPodsInfo("$WORKSPACE/pods")
       sh returnStdout: false, script: '''
       set +e
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
-      kubectl get nodes -o wide
-      kubectl get pods -n voltha -o wide
-      kubectl get pods -o wide
 
-      # store information on running charts
-      helm ls > $WORKSPACE/helm-list.txt || true
-
-      # store information on the running pods
-      kubectl get pods --all-namespaces -o wide > $WORKSPACE/pods.txt || true
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-images.txt || true
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-imagesId.txt || true
+      # collect logs collected in the Robot Framework StartLogging keyword
+      cd $WORKSPACE
+      gzip *-combined.log || true
+      rm *-combined.log || true
       '''
       script {
         deployment_config.olts.each { olt ->
@@ -282,7 +275,7 @@
         unstableThreshold: 0,
         onlyCritical: true
         ]);
-      archiveArtifacts artifacts: '**/*.log,**/*.tgz,*.txt'
+      archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
     }
   }
 }
diff --git a/jjb/pipeline/voltha-physical-functional-tests.groovy b/jjb/pipeline/voltha/master/voltha-physical-functional-tests.groovy
similarity index 79%
rename from jjb/pipeline/voltha-physical-functional-tests.groovy
rename to jjb/pipeline/voltha/master/voltha-physical-functional-tests.groovy
index 241d5d9..05cc053 100644
--- a/jjb/pipeline/voltha-physical-functional-tests.groovy
+++ b/jjb/pipeline/voltha/master/voltha-physical-functional-tests.groovy
@@ -32,25 +32,8 @@
     PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
   }
   stages {
-    stage('Clone kind-voltha') {
-      steps {
-        step([$class: 'WsCleanup'])
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/kind-voltha",
-            refspec: "${kindVolthaChange}"
-          ]],
-          branches: [[ name: "master", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-      }
-    }
     stage('Clone voltha-system-tests') {
+      step([$class: 'WsCleanup'])
       steps {
         checkout([
           $class: 'GitSCM',
@@ -67,23 +50,6 @@
         ])
       }
     }
-    stage('Clone cord-tester') {
-      steps {
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/cord-tester",
-            refspec: "${cordTesterChange}"
-          ]],
-          branches: [[ name: "master", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "cord-tester"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-      }
-    }
     stage('Download All the VOLTHA repos') {
       when {
         expression {
@@ -157,10 +123,6 @@
       }
       steps {
         sh """
-        cd $WORKSPACE/kind-voltha/scripts
-        ./log-collector.sh > /dev/null &
-        ./log-combine.sh > /dev/null &
-
         mkdir -p $ROBOT_LOGS_DIR
         if ( ${powerSwitch} ); then
              export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanity -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
@@ -237,54 +199,15 @@
   }
   post {
     always {
+      getPodsInfo("$WORKSPACE/pods")
       sh returnStdout: false, script: '''
       set +e
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
-      kubectl get nodes -o wide
-      kubectl get pods -n voltha -o wide
-      kubectl get pods -o wide
 
-      sleep 60 # Wait for log-collector and log-combine to complete
-
-      # Clean up "announcer" pod used by the tests if present
-      kubectl delete pod announcer || true
-
-      ## Pull out errors from log files
-      extract_errors_go() {
-        echo
-        echo "Error summary for $1:"
-        grep '"level":"error"' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
-        echo
-      }
-
-      extract_errors_python() {
-        echo
-        echo "Error summary for $1:"
-        grep 'ERROR' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
-        echo
-      }
-
-      extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
-      extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
-      extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
-      extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
-      extract_errors_python onos >> $WORKSPACE/error-report.log
-
-      gzip error-report.log || true
-      rm error-report.log || true
-
-      cd $WORKSPACE/kind-voltha/scripts/logger/combined/
-      tar czf $WORKSPACE/container-logs.tgz *
-      rm * || true
-
+      # collect logs collected in the Robot Framework StartLogging keyword
       cd $WORKSPACE
       gzip *-combined.log || true
       rm *-combined.log || true
 
-      # store information on running charts
-      helm ls > $WORKSPACE/helm-list.txt || true
-
       # store information on the running pods
       kubectl get pods --all-namespaces -o wide > $WORKSPACE/pods.txt || true
       kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-images.txt || true
@@ -322,7 +245,7 @@
         unstableThreshold: 0,
         onlyCritical: true
         ]);
-      archiveArtifacts artifacts: '*.log,*.gz,*.tgz,etcd/*.log'
+      archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
     }
   }
 }
diff --git a/jjb/pipeline/voltha-physical-soak-dt-tests.groovy b/jjb/pipeline/voltha/master/voltha-physical-soak-dt-tests.groovy
similarity index 78%
rename from jjb/pipeline/voltha-physical-soak-dt-tests.groovy
rename to jjb/pipeline/voltha/master/voltha-physical-soak-dt-tests.groovy
index 2a0902f..117fc4c 100644
--- a/jjb/pipeline/voltha-physical-soak-dt-tests.groovy
+++ b/jjb/pipeline/voltha/master/voltha-physical-soak-dt-tests.groovy
@@ -34,24 +34,6 @@
 
 
   stages {
-    stage('Clone kind-voltha') {
-      steps {
-        step([$class: 'WsCleanup'])
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/kind-voltha",
-            refspec: "${kindVolthaChange}"
-          ]],
-          branches: [[ name: "master", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-      }
-    }
     stage('Clone voltha-system-tests') {
       steps {
         checkout([
@@ -77,23 +59,6 @@
         }
       }
     }
-    stage('Clone cord-tester') {
-      steps {
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/cord-tester",
-            refspec: "${cordTesterChange}"
-          ]],
-          branches: [[ name: "master", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "cord-tester"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-      }
-    }
     // This checkout allows us to show changes in Jenkins
     // we only do this on master as we don't branch all the repos for all the releases
     // (we should compute the difference by tracking the container version, not the code)
@@ -170,10 +135,6 @@
       }
       steps {
         sh """
-        cd $WORKSPACE/kind-voltha/scripts
-        ./log-collector.sh > /dev/null &
-        ./log-combine.sh > /dev/null &
-
         mkdir -p $ROBOT_LOGS_DIR
         if [ "${params.testType}" == "Functional" ]; then
             if ( ${powerSwitch} ); then
@@ -224,44 +185,14 @@
   }
   post {
     always {
+      getPodsInfo("$WORKSPACE/pods")
       sh returnStdout: false, script: '''
       set +e
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
-      kubectl get nodes -o wide
-      kubectl get pods -n voltha -o wide
 
-      sleep 60 # Wait for log-collector and log-combine to complete
-
-      # Clean up "announcer" pod used by the tests if present
-      kubectl delete pod announcer || true
-
-      ## Pull out errors from log files
-      extract_errors_go() {
-        echo
-        echo "Error summary for $1:"
-        grep '"level":"error"' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
-        echo
-      }
-
-      extract_errors_python() {
-        echo
-        echo "Error summary for $1:"
-        grep 'ERROR' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
-        echo
-      }
-
-      extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
-      extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
-      extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
-      extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
-      extract_errors_python onos >> $WORKSPACE/error-report.log
-
-      cd $WORKSPACE/kind-voltha/scripts/logger/combined/
-      tar czf $WORKSPACE/container-logs.tgz *
-
+      # collect logs collected in the Robot Framework StartLogging keyword
       cd $WORKSPACE
       gzip *-combined.log || true
+      rm *-combined.log || true
 
       # collect ETCD cluster logs
       mkdir -p $WORKSPACE/etcd
@@ -292,7 +223,7 @@
         unstableThreshold: 0,
         onlyCritical: true
         ]);
-      archiveArtifacts artifacts: '*.log,*.gz,*.tgz,etcd/*.log'
+      archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
     }
   }
 }
diff --git a/jjb/pipeline/voltha/master/voltha-scale-test.groovy b/jjb/pipeline/voltha/master/voltha-scale-test.groovy
index bfe8fc0..8248f36 100644
--- a/jjb/pipeline/voltha/master/voltha-scale-test.groovy
+++ b/jjb/pipeline/voltha/master/voltha-scale-test.groovy
@@ -611,17 +611,9 @@
         reportFileName: '**/report*.html',
         onlyCritical: true,
         unstableThreshold: 0]);
+      getPodsInfo("$LOG_FOLDER")
       // get all the logs from kubernetes PODs
       sh returnStdout: false, script: '''
-
-        # store information on running charts
-        helm ls > $LOG_FOLDER/helm-list.txt || true
-
-        # store information on the running pods
-        kubectl get pods -o wide > $LOG_FOLDER/pods.txt || true
-        kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
-        kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
-
         # copy the ONOS logs directly from the container to avoid the color codes
         printf '%s\n' $(kubectl get pods -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
 
diff --git a/jjb/pipeline/voltha-tt-physical-functional-tests.groovy b/jjb/pipeline/voltha/master/voltha-tt-physical-functional-tests.groovy
similarity index 74%
rename from jjb/pipeline/voltha-tt-physical-functional-tests.groovy
rename to jjb/pipeline/voltha/master/voltha-tt-physical-functional-tests.groovy
index 35b034a..ac7ddf0 100644
--- a/jjb/pipeline/voltha-tt-physical-functional-tests.groovy
+++ b/jjb/pipeline/voltha/master/voltha-tt-physical-functional-tests.groovy
@@ -33,25 +33,8 @@
   }
 
   stages {
-    stage('Clone kind-voltha') {
-      steps {
-        step([$class: 'WsCleanup'])
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/kind-voltha",
-            refspec: "${kindVolthaChange}"
-          ]],
-          branches: [[ name: "master", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-      }
-    }
     stage('Clone voltha-system-tests') {
+      step([$class: 'WsCleanup'])
       steps {
         checkout([
           $class: 'GitSCM',
@@ -76,23 +59,6 @@
         }
       }
     }
-   stage('Clone cord-tester') {
-      steps {
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/cord-tester",
-            refspec: "${cordTesterChange}"
-          ]],
-          branches: [[ name: "master", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "cord-tester"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-      }
-    }
     // This checkout allows us to show changes in Jenkins
     // we only do this on master as we don't branch all the repos for all the releases
     // (we should compute the difference by tracking the container version, not the code)
@@ -171,10 +137,6 @@
       }
       steps {
         sh """
-        cd $WORKSPACE/kind-voltha/scripts
-        ./log-collector.sh > /dev/null &
-        ./log-combine.sh > /dev/null &
-
         mkdir -p $ROBOT_LOGS_DIR
         if ( ${powerSwitch} ); then
              export ROBOT_MISC_ARGS="--removekeywords wuks -i functionalTT -i PowerSwitch -i sanityTT -i sanityTT-MCAST -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
@@ -234,57 +196,11 @@
     always {
       sh returnStdout: false, script: '''
       set +e
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
-      kubectl get nodes -o wide
-      kubectl get pods -n voltha -o wide
-      kubectl get pods -o wide
 
-      sleep 60 # Wait for log-collector and log-combine to complete
-
-      # Clean up "announcer" pod used by the tests if present
-      kubectl delete pod announcer || true
-
-      ## Pull out errors from log files
-      extract_errors_go() {
-        echo
-        echo "Error summary for $1:"
-        grep '"level":"error"' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
-        echo
-      }
-
-      extract_errors_python() {
-        echo
-        echo "Error summary for $1:"
-        grep 'ERROR' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
-        echo
-      }
-
-      extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
-      extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
-      extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
-      extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
-      extract_errors_python onos >> $WORKSPACE/error-report.log
-
-      gzip error-report.log || true
-      rm error-report.log || true
-
-      cd $WORKSPACE/kind-voltha/scripts/logger/combined/
-      tar czf $WORKSPACE/container-logs.tgz *
-      rm * || true
-
+      # collect logs collected in the Robot Framework StartLogging keyword
       cd $WORKSPACE
       gzip *-combined.log || true
       rm *-combined.log || true
-
-      # store information on running charts
-      helm ls > $WORKSPACE/helm-list.txt || true
-
-      # store information on the running pods
-      kubectl get pods --all-namespaces -o wide > $WORKSPACE/pods.txt || true
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-images.txt || true
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-imagesId.txt || true
-
       '''
       script {
         deployment_config.olts.each { olt ->
@@ -307,7 +223,7 @@
         unstableThreshold: 0,
         onlyCritical: true
         ]);
-      archiveArtifacts artifacts: '*.log,*.gz,*.tgz'
+      archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
     }
   }
 }
diff --git a/jjb/pipeline/voltha-physical-functional-tests.groovy b/jjb/pipeline/voltha/voltha-2.8/voltha-physical-functional-tests.groovy
similarity index 79%
copy from jjb/pipeline/voltha-physical-functional-tests.groovy
copy to jjb/pipeline/voltha/voltha-2.8/voltha-physical-functional-tests.groovy
index 241d5d9..05cc053 100644
--- a/jjb/pipeline/voltha-physical-functional-tests.groovy
+++ b/jjb/pipeline/voltha/voltha-2.8/voltha-physical-functional-tests.groovy
@@ -32,25 +32,8 @@
     PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
   }
   stages {
-    stage('Clone kind-voltha') {
-      steps {
-        step([$class: 'WsCleanup'])
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/kind-voltha",
-            refspec: "${kindVolthaChange}"
-          ]],
-          branches: [[ name: "master", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-      }
-    }
     stage('Clone voltha-system-tests') {
+      step([$class: 'WsCleanup'])
       steps {
         checkout([
           $class: 'GitSCM',
@@ -67,23 +50,6 @@
         ])
       }
     }
-    stage('Clone cord-tester') {
-      steps {
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/cord-tester",
-            refspec: "${cordTesterChange}"
-          ]],
-          branches: [[ name: "master", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "cord-tester"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-      }
-    }
     stage('Download All the VOLTHA repos') {
       when {
         expression {
@@ -157,10 +123,6 @@
       }
       steps {
         sh """
-        cd $WORKSPACE/kind-voltha/scripts
-        ./log-collector.sh > /dev/null &
-        ./log-combine.sh > /dev/null &
-
         mkdir -p $ROBOT_LOGS_DIR
         if ( ${powerSwitch} ); then
              export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanity -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
@@ -237,54 +199,15 @@
   }
   post {
     always {
+      getPodsInfo("$WORKSPACE/pods")
       sh returnStdout: false, script: '''
       set +e
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
-      kubectl get nodes -o wide
-      kubectl get pods -n voltha -o wide
-      kubectl get pods -o wide
 
-      sleep 60 # Wait for log-collector and log-combine to complete
-
-      # Clean up "announcer" pod used by the tests if present
-      kubectl delete pod announcer || true
-
-      ## Pull out errors from log files
-      extract_errors_go() {
-        echo
-        echo "Error summary for $1:"
-        grep '"level":"error"' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
-        echo
-      }
-
-      extract_errors_python() {
-        echo
-        echo "Error summary for $1:"
-        grep 'ERROR' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
-        echo
-      }
-
-      extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
-      extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
-      extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
-      extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
-      extract_errors_python onos >> $WORKSPACE/error-report.log
-
-      gzip error-report.log || true
-      rm error-report.log || true
-
-      cd $WORKSPACE/kind-voltha/scripts/logger/combined/
-      tar czf $WORKSPACE/container-logs.tgz *
-      rm * || true
-
+      # collect logs collected in the Robot Framework StartLogging keyword
       cd $WORKSPACE
       gzip *-combined.log || true
       rm *-combined.log || true
 
-      # store information on running charts
-      helm ls > $WORKSPACE/helm-list.txt || true
-
       # store information on the running pods
       kubectl get pods --all-namespaces -o wide > $WORKSPACE/pods.txt || true
       kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-images.txt || true
@@ -322,7 +245,7 @@
         unstableThreshold: 0,
         onlyCritical: true
         ]);
-      archiveArtifacts artifacts: '*.log,*.gz,*.tgz,etcd/*.log'
+      archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
     }
   }
 }
diff --git a/jjb/pipeline/voltha-physical-soak-dt-tests.groovy b/jjb/pipeline/voltha/voltha-2.8/voltha-physical-soak-dt-tests.groovy
similarity index 78%
copy from jjb/pipeline/voltha-physical-soak-dt-tests.groovy
copy to jjb/pipeline/voltha/voltha-2.8/voltha-physical-soak-dt-tests.groovy
index 2a0902f..117fc4c 100644
--- a/jjb/pipeline/voltha-physical-soak-dt-tests.groovy
+++ b/jjb/pipeline/voltha/voltha-2.8/voltha-physical-soak-dt-tests.groovy
@@ -34,24 +34,6 @@
 
 
   stages {
-    stage('Clone kind-voltha') {
-      steps {
-        step([$class: 'WsCleanup'])
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/kind-voltha",
-            refspec: "${kindVolthaChange}"
-          ]],
-          branches: [[ name: "master", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-      }
-    }
     stage('Clone voltha-system-tests') {
       steps {
         checkout([
@@ -77,23 +59,6 @@
         }
       }
     }
-    stage('Clone cord-tester') {
-      steps {
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/cord-tester",
-            refspec: "${cordTesterChange}"
-          ]],
-          branches: [[ name: "master", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "cord-tester"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-      }
-    }
     // This checkout allows us to show changes in Jenkins
     // we only do this on master as we don't branch all the repos for all the releases
     // (we should compute the difference by tracking the container version, not the code)
@@ -170,10 +135,6 @@
       }
       steps {
         sh """
-        cd $WORKSPACE/kind-voltha/scripts
-        ./log-collector.sh > /dev/null &
-        ./log-combine.sh > /dev/null &
-
         mkdir -p $ROBOT_LOGS_DIR
         if [ "${params.testType}" == "Functional" ]; then
             if ( ${powerSwitch} ); then
@@ -224,44 +185,14 @@
   }
   post {
     always {
+      getPodsInfo("$WORKSPACE/pods")
       sh returnStdout: false, script: '''
       set +e
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
-      kubectl get nodes -o wide
-      kubectl get pods -n voltha -o wide
 
-      sleep 60 # Wait for log-collector and log-combine to complete
-
-      # Clean up "announcer" pod used by the tests if present
-      kubectl delete pod announcer || true
-
-      ## Pull out errors from log files
-      extract_errors_go() {
-        echo
-        echo "Error summary for $1:"
-        grep '"level":"error"' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
-        echo
-      }
-
-      extract_errors_python() {
-        echo
-        echo "Error summary for $1:"
-        grep 'ERROR' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
-        echo
-      }
-
-      extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
-      extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
-      extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
-      extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
-      extract_errors_python onos >> $WORKSPACE/error-report.log
-
-      cd $WORKSPACE/kind-voltha/scripts/logger/combined/
-      tar czf $WORKSPACE/container-logs.tgz *
-
+      # collect logs collected in the Robot Framework StartLogging keyword
       cd $WORKSPACE
       gzip *-combined.log || true
+      rm *-combined.log || true
 
       # collect ETCD cluster logs
       mkdir -p $WORKSPACE/etcd
@@ -292,7 +223,7 @@
         unstableThreshold: 0,
         onlyCritical: true
         ]);
-      archiveArtifacts artifacts: '*.log,*.gz,*.tgz,etcd/*.log'
+      archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
     }
   }
 }
diff --git a/jjb/pipeline/voltha/voltha-2.8/voltha-scale-multi-stack.groovy b/jjb/pipeline/voltha/voltha-2.8/voltha-scale-multi-stack.groovy
index 6d75846..8c2f72e 100644
--- a/jjb/pipeline/voltha/voltha-2.8/voltha-scale-multi-stack.groovy
+++ b/jjb/pipeline/voltha/voltha-2.8/voltha-scale-multi-stack.groovy
@@ -115,6 +115,7 @@
 
         helm upgrade --install -n infra voltha-infra onf/voltha-infra \
           -f $WORKSPACE/voltha-helm-charts/examples/${workflow}-values.yaml \
+          -f /home/jenkins/voltha-scale/voltha-values.yaml \
           --set onos-classic.replicas=${onosReplicas},onos-classic.atomix.replicas=${atomixReplicas} \
           --set radius.enabled=${withEapol} \
           --set global.log_level=${logLevel} \
diff --git a/jjb/pipeline/voltha-tt-physical-functional-tests.groovy b/jjb/pipeline/voltha/voltha-2.8/voltha-tt-physical-functional-tests.groovy
similarity index 74%
copy from jjb/pipeline/voltha-tt-physical-functional-tests.groovy
copy to jjb/pipeline/voltha/voltha-2.8/voltha-tt-physical-functional-tests.groovy
index 35b034a..ac7ddf0 100644
--- a/jjb/pipeline/voltha-tt-physical-functional-tests.groovy
+++ b/jjb/pipeline/voltha/voltha-2.8/voltha-tt-physical-functional-tests.groovy
@@ -33,25 +33,8 @@
   }
 
   stages {
-    stage('Clone kind-voltha') {
-      steps {
-        step([$class: 'WsCleanup'])
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/kind-voltha",
-            refspec: "${kindVolthaChange}"
-          ]],
-          branches: [[ name: "master", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-      }
-    }
     stage('Clone voltha-system-tests') {
+      step([$class: 'WsCleanup'])
       steps {
         checkout([
           $class: 'GitSCM',
@@ -76,23 +59,6 @@
         }
       }
     }
-   stage('Clone cord-tester') {
-      steps {
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/cord-tester",
-            refspec: "${cordTesterChange}"
-          ]],
-          branches: [[ name: "master", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "cord-tester"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-      }
-    }
     // This checkout allows us to show changes in Jenkins
     // we only do this on master as we don't branch all the repos for all the releases
     // (we should compute the difference by tracking the container version, not the code)
@@ -171,10 +137,6 @@
       }
       steps {
         sh """
-        cd $WORKSPACE/kind-voltha/scripts
-        ./log-collector.sh > /dev/null &
-        ./log-combine.sh > /dev/null &
-
         mkdir -p $ROBOT_LOGS_DIR
         if ( ${powerSwitch} ); then
              export ROBOT_MISC_ARGS="--removekeywords wuks -i functionalTT -i PowerSwitch -i sanityTT -i sanityTT-MCAST -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
@@ -234,57 +196,11 @@
     always {
       sh returnStdout: false, script: '''
       set +e
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
-      kubectl get nodes -o wide
-      kubectl get pods -n voltha -o wide
-      kubectl get pods -o wide
 
-      sleep 60 # Wait for log-collector and log-combine to complete
-
-      # Clean up "announcer" pod used by the tests if present
-      kubectl delete pod announcer || true
-
-      ## Pull out errors from log files
-      extract_errors_go() {
-        echo
-        echo "Error summary for $1:"
-        grep '"level":"error"' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
-        echo
-      }
-
-      extract_errors_python() {
-        echo
-        echo "Error summary for $1:"
-        grep 'ERROR' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
-        echo
-      }
-
-      extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
-      extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
-      extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
-      extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
-      extract_errors_python onos >> $WORKSPACE/error-report.log
-
-      gzip error-report.log || true
-      rm error-report.log || true
-
-      cd $WORKSPACE/kind-voltha/scripts/logger/combined/
-      tar czf $WORKSPACE/container-logs.tgz *
-      rm * || true
-
+      # collect logs collected in the Robot Framework StartLogging keyword
       cd $WORKSPACE
       gzip *-combined.log || true
       rm *-combined.log || true
-
-      # store information on running charts
-      helm ls > $WORKSPACE/helm-list.txt || true
-
-      # store information on the running pods
-      kubectl get pods --all-namespaces -o wide > $WORKSPACE/pods.txt || true
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-images.txt || true
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-imagesId.txt || true
-
       '''
       script {
         deployment_config.olts.each { olt ->
@@ -307,7 +223,7 @@
         unstableThreshold: 0,
         onlyCritical: true
         ]);
-      archiveArtifacts artifacts: '*.log,*.gz,*.tgz'
+      archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
     }
   }
 }
diff --git a/jjb/voltha-scale.yaml b/jjb/voltha-scale.yaml
index 1155584..b3ccb65 100644
--- a/jjb/voltha-scale.yaml
+++ b/jjb/voltha-scale.yaml
@@ -256,8 +256,8 @@
       - 'voltha-scale-measurements':
           name: 'voltha-scale-measurements-2.8-10-stacks-2-16-32-att-subscribers'
           build-node: 'voltha-scale-2'
-          'disable-job': true
-          pipeline-script: 'voltha-scale-multi-stack.groovy'
+          'disable-job': false
+          pipeline-script: 'voltha/voltha-2.8/voltha-scale-multi-stack.groovy'
           time-trigger: "H H/4 * * *"
           withMonitoring: false
           logLevel: WARN
@@ -283,8 +283,8 @@
       - 'voltha-scale-measurements':
           name: 'voltha-scale-measurements-2.8-10-stacks-2-16-32-dt-subscribers'
           build-node: 'voltha-scale-2'
-          pipeline-script: 'voltha-scale-multi-stack.groovy'
-          'disable-job': true
+          pipeline-script: 'voltha/voltha-2.8/voltha-scale-multi-stack.groovy'
+          'disable-job': false
           time-trigger: "H H/4 * * *"
           withMonitoring: false
           logLevel: WARN
@@ -310,8 +310,8 @@
       - 'voltha-scale-measurements':
           name: 'voltha-scale-measurements-2.8-10-stacks-2-16-32-tt-subscribers'
           build-node: 'voltha-scale-2'
-          pipeline-script: 'voltha-scale-multi-stack.groovy'
-          'disable-job': true
+          pipeline-script: 'voltha/voltha-2.8/voltha-scale-multi-stack.groovy'
+          'disable-job': false
           time-trigger: "H H/4 * * *"
           withMonitoring: false
           logLevel: WARN
@@ -385,21 +385,6 @@
           build-node: 'voltha-scale'
           extraHelmFlags: '--set defaults.rw_core.timeout=30s '
 
-      # development matrix
-      - 'voltha-scale-matrix':
-          name: 'voltha-scale-matrix-voltha-master-dev'
-          build-node: 'voltha-scale'
-
-      # development matrix
-      - 'voltha-scale-matrix':
-          name: 'voltha-scale-matrix-voltha-master'
-          build-node: 'voltha-scale-1'
-          onosReplicas: 3
-          atomixReplicas: 3
-          kafkaReplicas: 3
-          etcdReplicas: 3
-          topologies: 1-16-16, 1-16-32, 2-16-32
-          time-trigger: H 0 * * *
 
 # list of parameters for the VOLTHA Jobs,
 # used as anchor so that can be shared across multiple jobs
@@ -815,98 +800,3 @@
               branches:
                 - branch-compare-type: REG_EXP
                   branch-pattern: '{all-branches-regexp}'
-- job-template:
-    id: 'voltha-scale-matrix'
-    name: '{name}'
-    pipeline-script: 'voltha-scale-matrix.groovy'
-
-    description: |
-      <!-- Managed by Jenkins Job Builder -->
-      Created by {id} job-template from ci-management/jjb/voltha-scale.yaml  <br /><br />
-      Using pipeline {pipeline-script} <br/><br/>
-      Scale measurements for VOLTHA 2.x
-
-    properties:
-      - cord-infra-properties:
-          build-days-to-keep: '{big-build-days-to-keep}'
-          artifact-num-to-keep: '{big-artifact-num-to-keep}'
-
-    wrappers:
-      - lf-infra-wrappers:
-          build-timeout: '{build-timeout}'
-          jenkins-ssh-credential: '{jenkins-ssh-credential}'
-
-    # default values
-    time-trigger: 0 0 29 2 *
-    release: master
-    build-node: 'voltha-scale'
-    volthaSystemTestsChange: ''
-    volthaHelmChartsChange: ''
-    kindVolthaChange: ''
-    onosReplicas: 1
-    atomixReplicas: 0
-    kafkaReplicas: 1
-    etcdReplicas: 1
-    openonuAdapterReplicas: 1
-    topologies: 1-1-1, 1-2-2, 2-2-2
-
-    project-type: pipeline
-    concurrent: false
-
-    dsl: !include-raw-escape: pipeline/{pipeline-script}
-
-    parameters:
-      - string:
-          name: release
-          default: '{release}'
-          description: 'Version of the code to test (matches a branch in kind-voltha and voltha-system-tests repos)'
-
-      - string:
-          name: buildNode
-          default: '{build-node}'
-          description: 'Name of the Jenkins node to run the job on'
-
-      - string:
-          name: volthaSystemTestsChange
-          default: '{volthaSystemTestsChange}'
-          description: 'Download a change for gerrit in the voltha-system-tests repo, example value: "refs/changes/79/18779/13"'
-
-      - string:
-          name: volthaHelmChartsChange
-          default: '{volthaHelmChartsChange}'
-          description: 'Download a change for gerrit in the voltha-helm-charts repo, example value: "refs/changes/32/19132/1"'
-
-      - string:
-          name: onosReplicas
-          default: '{onosReplicas}'
-          description: 'How many ONOSes instances to run'
-
-      - string:
-          name: atomixReplicas
-          default: '{atomixReplicas}'
-          description: 'How many Atomix instances to run'
-
-      - string:
-          name: kafkaReplicas
-          default: '{kafkaReplicas}'
-          description: 'How many Kafka instances to run'
-
-      - string:
-          name: etcdReplicas
-          default: '{etcdReplicas}'
-          description: 'How many ETCD instances to run'
-
-      - string:
-          name: openonuAdapterReplicas
-          default: '{openonuAdapterReplicas}'
-          description: 'How many OpenONU adapter instances to run'
-
-      - string:
-          name: topologies
-          default: '{topologies}'
-          description: 'Topologies configuration, comma separate list of "olt-pon-onu" eg: "1-16-16, 1-16-32"'
-
-    triggers:
-      - timed: |
-                 TZ=America/Los_Angeles
-                 {time-trigger}
diff --git a/jjb/voltha-test/voltha-nightly-jobs.yaml b/jjb/voltha-test/voltha-nightly-jobs.yaml
index cf7c91b..5d4a1fd 100644
--- a/jjb/voltha-test/voltha-nightly-jobs.yaml
+++ b/jjb/voltha-test/voltha-nightly-jobs.yaml
@@ -283,7 +283,7 @@
                   Copyright (c) 2017 Open Networking Foundation (ONF)
 
     <<: *voltha-pipe-job-boiler-plate
-    pipeline-script: 'voltha-physical-functional-tests.groovy'
+    pipeline-script: 'voltha/master/voltha-physical-functional-tests.groovy'
     manifest-url: 'https://gerrit.opencord.org/voltha-test-manifest.git'
     manifest-branch: 'master'
 
@@ -410,7 +410,7 @@
                   Copyright (c) 2017 Open Networking Foundation (ONF)
 
     <<: *voltha-pipe-job-boiler-plate
-    pipeline-script: 'voltha-physical-functional-tests.groovy'
+    pipeline-script: 'voltha/master/voltha-physical-functional-tests.groovy'
     manifest-url: 'https://gerrit.opencord.org/voltha-test-manifest.git'
     manifest-branch: 'master'
 
@@ -534,7 +534,7 @@
                   Created by Suchitra Vemuri, suchitra@opennetworking.org <br />
                   Copyright (c) 2017 Open Networking Foundation (ONF)
     <<: *voltha-pipe-job-boiler-plate
-    pipeline-script: 'voltha-physical-functional-tests.groovy'
+    pipeline-script: 'voltha/master/voltha-physical-functional-tests.groovy'
     manifest-url: 'https://gerrit.opencord.org/voltha-test-manifest.git'
     manifest-branch: 'master'
 
diff --git a/jjb/voltha-test/voltha.yaml b/jjb/voltha-test/voltha.yaml
index 6ed18ff..3aae296 100644
--- a/jjb/voltha-test/voltha.yaml
+++ b/jjb/voltha-test/voltha.yaml
@@ -50,7 +50,7 @@
           name-extension: '_TT'
           test-type: ''
           test-repo: 'voltha-system-tests'
-          pipeline-script: 'voltha-tt-physical-functional-tests.groovy'
+          pipeline-script: 'voltha/master/voltha-tt-physical-functional-tests.groovy'
 
       # flex OCP pod with olt/onu - Default tech profile and timer based job
       - 'build_voltha_pod_release_timer':
@@ -118,7 +118,7 @@
           name-extension: '_TT'
           work-flow: 'TT'
           power-switch: True
-          pipeline-script: 'voltha-tt-physical-functional-tests.groovy'
+          pipeline-script: 'voltha/master/voltha-tt-physical-functional-tests.groovy'
           test-repo: 'voltha-system-tests'
           profile: 'TP'
 
@@ -146,7 +146,7 @@
           name-extension: '_TT'
           work-flow: 'TT'
           power-switch: True
-          pipeline-script: 'voltha-tt-physical-functional-tests.groovy'
+          pipeline-script: 'voltha/voltha-2.8/voltha-tt-physical-functional-tests.groovy'
           test-repo: 'voltha-system-tests'
           profile: 'TP'
 
@@ -176,7 +176,7 @@
           name-extension: '_TT'
           work-flow: 'TT'
           power-switch: True
-          pipeline-script: 'voltha-tt-physical-functional-tests.groovy'
+          pipeline-script: 'voltha/master/voltha-tt-physical-functional-tests.groovy'
           test-repo: 'voltha-system-tests'
           profile: 'TP'
           enableMultiUni: true
@@ -284,7 +284,7 @@
           name-extension: '_DT_soak_Func'
           test-type: 'Functional'
           test-repo: 'voltha-system-tests'
-          pipeline-script: 'voltha-physical-soak-dt-tests.groovy'
+          pipeline-script: 'voltha/master/voltha-physical-soak-dt-tests.groovy'
 
       # ONF Menlo Soak POD test job - voltha-master branch
       # Run failure/recovery tests every Wednesday
@@ -299,7 +299,7 @@
           name-extension: '_DT_soak_Fail'
           test-type: 'Failure'
           test-repo: 'voltha-system-tests'
-          pipeline-script: 'voltha-physical-soak-dt-tests.groovy'
+          pipeline-script: 'voltha/master/voltha-physical-soak-dt-tests.groovy'
           time-trigger: "H H * * 3"
 
       # ONF Menlo Soak POD test job - voltha-master branch
@@ -315,7 +315,7 @@
           name-extension: '_DT_soak_DP'
           test-type: 'Dataplane'
           test-repo: 'voltha-system-tests'
-          pipeline-script: 'voltha-physical-soak-dt-tests.groovy'
+          pipeline-script: 'voltha/master/voltha-physical-soak-dt-tests.groovy'
           time-trigger: "H H * * 6"
 
       # ONF Menlo Soak POD build job - voltha-2.8 branch
@@ -348,7 +348,7 @@
           name-extension: '_DT_soak_Func'
           test-type: 'Functional'
           test-repo: 'voltha-system-tests'
-          pipeline-script: 'voltha-physical-soak-dt-tests.groovy'
+          pipeline-script: 'voltha/voltha-2.8/voltha-physical-soak-dt-tests.groovy'
 
       # ONF Menlo Soak POD test job - voltha-2.8 branch
       # Run failure/recovery tests every Wednesday
@@ -363,7 +363,7 @@
           name-extension: '_DT_soak_Fail'
           test-type: 'Failure'
           test-repo: 'voltha-system-tests'
-          pipeline-script: 'voltha-physical-soak-dt-tests.groovy'
+          pipeline-script: 'voltha/voltha-2.8/voltha-physical-soak-dt-tests.groovy'
           time-trigger: "H H * * 3"
 
       # ONF Menlo Soak POD test job - voltha-2.8 branch
@@ -379,7 +379,7 @@
           name-extension: '_DT_soak_DP'
           test-type: 'Dataplane'
           test-repo: 'voltha-system-tests'
-          pipeline-script: 'voltha-physical-soak-dt-tests.groovy'
+          pipeline-script: 'voltha/voltha-2.8/voltha-physical-soak-dt-tests.groovy'
           time-trigger: "H H * * 6"
 
       # Certification (Radisys) pod with olt/onu - master versions timer based job
@@ -462,7 +462,7 @@
             test-repo: 'voltha-system-tests'
             profile: '1T4GEM'
             power-switch: True
-            pipeline-script: 'voltha-physical-functional-tests.groovy'
+            pipeline-script: 'voltha/master/voltha-physical-functional-tests.groovy'
 
         # Certification (Radisys) pod with olt/onu - master versions timer based job , two OLTs
       - 'build_voltha_pod_release_timer':
@@ -487,7 +487,7 @@
             test-repo: 'voltha-system-tests'
             profile: '1T4GEM-unencrypted'
             power-switch: True
-            pipeline-script: 'voltha-physical-functional-tests.groovy'
+            pipeline-script: 'voltha/master/voltha-physical-functional-tests.groovy'
 
       # Berlin pod with olt/onu - master versions timer based job , two OLTs
       - 'build_voltha_pod_release_timer':