Merge "clean up folder used as pseudo branches of voltha pipeline-scripts"
diff --git a/jjb/pipeline/voltha/master/bbsim-tests.groovy b/jjb/pipeline/voltha/bbsim-tests.groovy
similarity index 100%
rename from jjb/pipeline/voltha/master/bbsim-tests.groovy
rename to jjb/pipeline/voltha/bbsim-tests.groovy
diff --git a/jjb/pipeline/voltha/master/device-management-mock-tests.groovy b/jjb/pipeline/voltha/device-management-mock-tests.groovy
similarity index 100%
rename from jjb/pipeline/voltha/master/device-management-mock-tests.groovy
rename to jjb/pipeline/voltha/device-management-mock-tests.groovy
diff --git a/jjb/pipeline/voltha/master/dmi-build-and-test.groovy b/jjb/pipeline/voltha/dmi-build-and-test.groovy
similarity index 100%
rename from jjb/pipeline/voltha/master/dmi-build-and-test.groovy
rename to jjb/pipeline/voltha/dmi-build-and-test.groovy
diff --git a/jjb/pipeline/voltha/makefile b/jjb/pipeline/voltha/makefile
deleted file mode 100644
index 6c948c3..0000000
--- a/jjb/pipeline/voltha/makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-# -*- makefile -*-
-
-version += master
-version += voltha-2.12
-version += voltha-2.11
-version += voltha-2.8
-version += playground
-
-all: $(version)
-
-$(version):
-	mkdir -p $(version)
-	rsync -rv --checksum master/. $@/.
-
-# [EOF]
diff --git a/jjb/pipeline/voltha/master/physical-build.groovy b/jjb/pipeline/voltha/physical-build.groovy
similarity index 100%
rename from jjb/pipeline/voltha/master/physical-build.groovy
rename to jjb/pipeline/voltha/physical-build.groovy
diff --git a/jjb/pipeline/voltha/playground/bbsim-tests.groovy b/jjb/pipeline/voltha/playground/bbsim-tests.groovy
deleted file mode 100644
index 6a2330f..0000000
--- a/jjb/pipeline/voltha/playground/bbsim-tests.groovy
+++ /dev/null
@@ -1,359 +0,0 @@
-// Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// voltha-2.x e2e tests for openonu-go
-// uses bbsim to simulate OLT/ONUs
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-def clusterName = "kind-ci"
-
-def execute_test(testTarget, workflow, testLogging, teardown, testSpecificHelmFlags = "") {
-    def infraNamespace = "default"
-    def volthaNamespace = "voltha"
-    def logsDir = "$WORKSPACE/${testTarget}"
-
-    stage('IAM')
-    {
-	script
-	{
-	    String iam = [
-		'ci-management',
-		'jjb',
-		'pipeline',
-		'voltha',
-		'master',
-		'bbsim-tests.groovy'
-	    ].join('/')
-            println("** ${iam}: ENTER")
-
-	    String cmd = "which pkill"
-	    def stream = sh(
-		returnStatus:false,
-		returnStdout: true,
-		script: cmd)
-	    println(" ** ${cmd}:\n${stream}")
-	    
-            println("** ${iam}: LEAVE")
-	}
-    }
-
-    stage('Cleanup') {
-	if (teardown) {
-	    timeout(15) {
-		script {
-		    helmTeardown(["default", infraNamespace, volthaNamespace])
-		}
-	    timeout(1) {
-		    sh returnStdout: false, script: '''
-          # remove orphaned port-forward from different namespaces
-          ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-          '''
-		}
-	    }
-	}
-    }
-
-    stage ('Initialize')
-    {
-	// VOL-4926 - Is voltha-system-tests available ?
-	String cmd = [
-	    'make',
-	    '-C', "$WORKSPACE/voltha-system-tests",
-	    "KAIL_PATH=\"$WORKSPACE/bin\"",
-	    'kail',
-	].join(' ')
-	println(" ** Running: ${cmd}:\n")
-        sh("${cmd}")
-    }
-
-    stage('Deploy common infrastructure') {
-	sh '''
-    helm repo add onf https://charts.opencord.org
-    helm repo update
-    if [ ${withMonitoring} = true ] ; then
-      helm install nem-monitoring onf/nem-monitoring \
-      --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
-      --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
-    fi
-    '''
-    }
-
-    stage('Deploy Voltha') {
-    if (teardown) {
-      timeout(10) {
-        script {
-
-          sh """
-          mkdir -p ${logsDir}
-          _TAG=kail-startup kail -n ${infraNamespace} -n ${volthaNamespace} > ${logsDir}/onos-voltha-startup-combined.log &
-          """
-
-          // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
-          def localCharts = false
-          if (volthaHelmChartsChange != "" || gerritProject == "voltha-helm-charts") {
-            localCharts = true
-          }
-
-          // NOTE temporary workaround expose ONOS node ports
-          def localHelmFlags = extraHelmFlags.trim() + " --set global.log_level=${logLevel.toUpperCase()} " +
-          " --set onos-classic.onosSshPort=30115 " +
-          " --set onos-classic.onosApiPort=30120 " +
-          " --set onos-classic.onosOfPort=31653 " +
-          " --set onos-classic.individualOpenFlowNodePorts=true " + testSpecificHelmFlags
-
-          if (gerritProject != "") {
-            localHelmFlags = "${localHelmFlags} " + getVolthaImageFlags("${gerritProject}")
-          }
-
-          volthaDeploy([
-            infraNamespace: infraNamespace,
-            volthaNamespace: volthaNamespace,
-            workflow: workflow.toLowerCase(),
-            withMacLearning: enableMacLearning.toBoolean(),
-            extraHelmFlags: localHelmFlags,
-            localCharts: localCharts,
-            bbsimReplica: olts.toInteger(),
-            dockerRegistry: registry,
-            ])
-        }
-
-        // stop logging
-        sh """
-          P_IDS="\$(ps e -ww -A | grep "_TAG=kail-startup" | grep -v grep | awk '{print \$1}')"
-          if [ -n "\$P_IDS" ]; then
-            echo \$P_IDS
-            for P_ID in \$P_IDS; do
-              kill -9 \$P_ID
-            done
-          fi
-          cd ${logsDir}
-          gzip -k onos-voltha-startup-combined.log
-          rm onos-voltha-startup-combined.log
-        """
-      }
-      sh """
-      JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
-      JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-infra-etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd 2379:2379; done"&
-      JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-infra-kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
-      bbsimDmiPortFwd=50075
-      for i in {0..${olts.toInteger() - 1}}; do
-        JENKINS_NODE_COOKIE="dontKillMe" _TAG="bbsim\${i}" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/bbsim\${i} \${bbsimDmiPortFwd}:50075; done"&
-        ((bbsimDmiPortFwd++))
-      done
-      if [ ${withMonitoring} = true ] ; then
-        JENKINS_NODE_COOKIE="dontKillMe" _TAG="nem-monitoring-prometheus-server" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n default svc/nem-monitoring-prometheus-server 31301:80; done"&
-      fi
-      ps aux | grep port-forward
-      """
-      // setting ONOS log level
-      script {
-        setOnosLogLevels([
-          onosNamespace: infraNamespace,
-          apps: [
-            'org.opencord.dhcpl2relay',
-            'org.opencord.olt',
-            'org.opencord.aaa',
-            'org.opencord.maclearner',
-            'org.onosproject.net.flowobjective.impl.FlowObjectiveManager',
-            'org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager'
-          ],
-          logLevel: logLevel
-        ])
-      }
-    }
-  }
-
-  stage('Run test ' + testTarget + ' on ' + workflow + ' workFlow') {
-    sh """
-    if [ ${withMonitoring} = true ] ; then
-      mkdir -p "$WORKSPACE/voltha-pods-mem-consumption-${workflow}"
-      cd "$WORKSPACE/voltha-system-tests"
-      make vst_venv
-      source ./vst_venv/bin/activate || true
-      # Collect initial memory consumption
-      python scripts/mem_consumption.py -o $WORKSPACE/voltha-pods-mem-consumption-${workflow} -a 0.0.0.0:31301 -n ${volthaNamespace} || true
-    fi
-    """
-    sh """
-    mkdir -p ${logsDir}
-    export ROBOT_MISC_ARGS="-d ${logsDir} ${params.extraRobotArgs} "
-    ROBOT_MISC_ARGS+="-v ONOS_SSH_PORT:30115 -v ONOS_REST_PORT:30120 -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace} -v container_log_dir:${logsDir} -v logging:${testLogging}"
-    export KVSTOREPREFIX=voltha/voltha_voltha
-
-    make -C "$WORKSPACE/voltha-system-tests" ${testTarget} || true
-    """
-    getPodsInfo("${logsDir}")
-    sh """
-      set +e
-      # collect logs collected in the Robot Framework StartLogging keyword
-      cd ${logsDir}
-      gzip *-combined.log || true
-      rm *-combined.log || true
-    """
-    sh """
-    if [ ${withMonitoring} = true ] ; then
-      cd "$WORKSPACE/voltha-system-tests"
-      source ./vst_venv/bin/activate || true
-      # Collect memory consumption of voltha pods once all the tests are complete
-      python scripts/mem_consumption.py -o $WORKSPACE/voltha-pods-mem-consumption-${workflow} -a 0.0.0.0:31301 -n ${volthaNamespace} || true
-    fi
-    """
-  }
-}
-
-def collectArtifacts(exitStatus) {
-  getPodsInfo("$WORKSPACE/${exitStatus}")
-  sh """
-  kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/${exitStatus}/voltha.log || true
-  """
-  archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.txt,**/*.html,**/voltha-pods-mem-consumption-att/*,**/voltha-pods-mem-consumption-dt/*,**/voltha-pods-mem-consumption-tt/*'
-  sh '''
-    sync
-    pkill kail || true
-    which voltctl
-    md5sum $(which voltctl)
-  '''
-  step([$class: 'RobotPublisher',
-    disableArchiveOutput: false,
-    logFileName: "**/*/log*.html",
-    otherFiles: '',
-    outputFileName: "**/*/output*.xml",
-    outputPath: '.',
-    passThreshold: 100,
-    reportFileName: "**/*/report*.html",
-    unstableThreshold: 0,
-    onlyCritical: true]);
-}
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: "${timeout}", unit: 'MINUTES')
-  }
-  environment {
-    KUBECONFIG="$HOME/.kube/kind-${clusterName}"
-    VOLTCONFIG="$HOME/.volt/config"
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-    DIAGS_PROFILE="VOLTHA_PROFILE"
-    SSHPASS="karaf"
-  }
-  stages {
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${branch}",
-          gerritProject: "${gerritProject}",
-          gerritRefspec: "${gerritRefspec}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage('Build patch') {
-      // build the patch only if gerritProject is specified
-      when {
-        expression {
-          return !gerritProject.isEmpty()
-        }
-      }
-      steps {
-        // NOTE that the correct patch has already been checked out
-        // during the getVolthaCode step
-        buildVolthaComponent("${gerritProject}")
-      }
-    }
-    stage('Create K8s Cluster') {
-      steps {
-        script {
-          def clusterExists = sh returnStdout: true, script: """
-          kind get clusters | grep ${clusterName} | wc -l
-          """
-          if (clusterExists.trim() == "0") {
-            createKubernetesCluster([nodes: 3, name: clusterName])
-          }
-        }
-      }
-    }
-    stage('Replace voltctl') {
-      // if the project is voltctl override the downloaded one with the built one
-      when {
-        expression {
-          return gerritProject == "voltctl"
-        }
-      }
-      steps{
-        sh """
-        # [TODO] - why is this platform specific (?)
-        # [TODO] - revisit, command alteration has masked an error (see: voltha-2.11).
-        #          find will fail when no filsystem matches are found.
-        #          mv(ls) succeded simply by accident/invoked at a different time.
-        mv `ls $WORKSPACE/voltctl/release/voltctl-*-linux-amd*` $WORKSPACE/bin/voltctl
-        chmod +x $WORKSPACE/bin/voltctl
-        """
-      }
-    }
-    stage('Load image in kind nodes') {
-      when {
-        expression {
-          return !gerritProject.isEmpty()
-        }
-      }
-      steps {
-        loadToKind()
-      }
-    }
-    stage('Parse and execute tests') {
-        steps {
-          script {
-            def tests = readYaml text: testTargets
-
-            for(int i = 0;i<tests.size();i++) {
-              def test = tests[i]
-              def target = test["target"]
-              def workflow = test["workflow"]
-              def flags = test["flags"]
-              def teardown = test["teardown"].toBoolean()
-              def logging = test["logging"].toBoolean()
-              def testLogging = 'False'
-              if (logging) {
-                  testLogging = 'True'
-              }
-              println "Executing test ${target} on workflow ${workflow} with logging ${testLogging} and extra flags ${flags}"
-              execute_test(target, workflow, testLogging, teardown, flags)
-            }
-          }
-        }
-    }
-  }
-  post {
-    aborted {
-      collectArtifacts("aborted")
-    }
-    failure {
-      collectArtifacts("failed")
-    }
-    always {
-      collectArtifacts("always")
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/playground/device-management-mock-tests.groovy b/jjb/pipeline/voltha/playground/device-management-mock-tests.groovy
deleted file mode 100644
index 8362a08..0000000
--- a/jjb/pipeline/voltha/playground/device-management-mock-tests.groovy
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-def localCharts = false
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: 90, unit: 'MINUTES')
-  }
-  environment {
-    KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-    ROBOT_MISC_ARGS="-d $WORKSPACE/RobotLogs"
-  }
-
-  stages {
-
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${branch}",
-          gerritProject: "${gerritProject}",
-          gerritRefspec: "${gerritRefspec}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage('Build Redfish Importer Image') {
-      steps {
-        sh """
-           make -C $WORKSPACE/device-management/\$1 DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build-importer
-           """
-      }
-    }
-    stage('Build demo_test Image') {
-      steps {
-        sh """
-           make -C $WORKSPACE/device-management/\$1/demo_test DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build
-           """
-      }
-    }
-    stage('Build mock-redfish-server  Image') {
-      steps {
-        sh """
-           make -C $WORKSPACE/device-management/\$1/mock-redfish-server DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build
-           """
-      }
-    }
-    stage('Create K8s Cluster') {
-      steps {
-        createKubernetesCluster([nodes: 3])
-      }
-    }
-    stage('Load image in kind nodes') {
-      steps {
-        loadToKind()
-      }
-    }
-    stage('Deploy Voltha') {
-      steps {
-        script {
-          if (branch != "master" || volthaHelmChartsChange != "") {
-            // if we're using a release or testing changes in the charts, then use the local clone
-            localCharts = true
-          }
-        }
-        volthaDeploy([
-          workflow: "att",
-          extraHelmFlags: extraHelmFlags,
-          dockerRegistry: "mirror.registry.opennetworking.org",
-          localCharts: localCharts,
-        ])
-        // start logging
-        sh """
-        mkdir -p $WORKSPACE/att
-        _TAG=kail-att kail -n infra -n voltha -n default > $WORKSPACE/att/onos-voltha-combined.log &
-        """
-        // forward ONOS and VOLTHA ports
-        sh """
-        _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8101:8101&
-        _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8181:8181&
-        _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha svc/voltha-voltha-api 55555:55555&
-        """
-      }
-    }
-
-    stage('Run E2E Tests') {
-      steps {
-        sh '''
-           mkdir -p $WORKSPACE/RobotLogs
-
-           # tell the kubernetes script to use images tagged citest and pullPolicy:Never
-           sed -i 's/master/citest/g' $WORKSPACE/device-management/kubernetes/deploy*.yaml
-           sed -i 's/imagePullPolicy: Always/imagePullPolicy: Never/g' $WORKSPACE/device-management/kubernetes/deploy*.yaml
-           make -C $WORKSPACE/device-management functional-mock-test || true
-           '''
-      }
-    }
-  }
-
-  post {
-    always {
-      sh '''
-         set +e
-         kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
-         kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
-         kubectl get nodes -o wide
-         kubectl get pods -o wide --all-namespaces
-
-         sync
-         pkill kail || true
-
-         ## Pull out errors from log files
-         extract_errors_go() {
-           echo
-           echo "Error summary for $1:"
-           grep $1 $WORKSPACE/att/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
-           echo
-         }
-
-         extract_errors_python() {
-           echo
-           echo "Error summary for $1:"
-           grep $1 $WORKSPACE/att/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
-           echo
-         }
-
-         extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
-         extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
-         extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
-         extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
-
-         gzip $WORKSPACE/att/onos-voltha-combined.log
-         '''
-         step([$class: 'RobotPublisher',
-            disableArchiveOutput: false,
-            logFileName: 'RobotLogs/log*.html',
-            otherFiles: '',
-            outputFileName: 'RobotLogs/output*.xml',
-            outputPath: '.',
-            passThreshold: 80,
-            reportFileName: 'RobotLogs/report*.html',
-            unstableThreshold: 0]);
-         archiveArtifacts artifacts: '**/*.log,**/*.gz'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/playground/dmi-build-and-test.groovy b/jjb/pipeline/voltha/playground/dmi-build-and-test.groovy
deleted file mode 100644
index 6d66a53..0000000
--- a/jjb/pipeline/voltha/playground/dmi-build-and-test.groovy
+++ /dev/null
@@ -1,355 +0,0 @@
-#!/usr/bin/env groovy
-// -----------------------------------------------------------------------
-// Copyright 2022-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// -----------------------------------------------------------------------
-// Intent: used to deploy VOLTHA and configure ONOS physical PODs
-//
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-// -----------------------------------------------------------------------
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-
-def deploy_custom_chart(namespace, name, chart, extraHelmFlags) {
-  sh """
-    helm install --create-namespace --set defaults.image_pullPolicy=Always --namespace ${namespace} ${extraHelmFlags} ${name} ${chart}
-   """
-}
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: 45, unit: 'MINUTES')
-  }
-  environment {
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-    VOLTCONFIG="$HOME/.volt/config-minimal"
-    LOG_FOLDER="$WORKSPACE/dmi/"
-    APPS_TO_LOG="${OltDevMgr}"
-  }
-
-  stages{
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${branch}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage ("Parse deployment configuration file") {
-      steps {
-        sh returnStdout: true, script: "rm -rf ${configBaseDir}"
-        sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-          if ( params.workFlow == "DT" ) {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-          }
-          else if ( params.workFlow == "TT" )
-          {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-          }
-          else
-          {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-          }
-        }
-      }
-    }
-    stage('Clean up') {
-      steps {
-        timeout(15) {
-          script {
-            helmTeardown(["default", infraNamespace, volthaNamespace])
-          }
-          timeout(1) {
-            sh returnStdout: false, script: '''
-            # remove orphaned port-forward from different namespaces
-            ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-            '''
-          }
-        }
-      }
-    }
-    stage('Install Voltha')  {
-      steps {
-        timeout(20) {
-          installVoltctl("${branch}")
-          script {
-            // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
-            def localCharts = false
-            if (volthaHelmChartsChange != "") {
-              localCharts = true
-            }
-
-            // should the config file be suffixed with the workflow? see "deployment_config"
-            def localHelmFlags = "-f $WORKSPACE/${configBaseDir}/${configKubernetesDir}/voltha/${configFileName}.yml --set global.log_level=${logLevel} "
-
-            if (workFlow.toLowerCase() == "dt") {
-              localHelmFlags += " --set radius.enabled=false "
-            }
-            if (workFlow.toLowerCase() == "tt") {
-              localHelmFlags += " --set radius.enabled=false --set global.incremental_evto_update=true "
-                if (enableMultiUni.toBoolean()) {
-                    localHelmFlags += " --set voltha-adapter-openonu.adapter_open_onu.uni_port_mask=${uniPortMask} "
-                }
-            }
-
-            // NOTE temporary workaround expose ONOS node ports (pod-config needs to be updated to contain these values)
-            // and to connect the ofagent to all instances of ONOS
-            localHelmFlags = localHelmFlags + " --set onos-classic.onosSshPort=30115 " +
-            "--set onos-classic.onosApiPort=30120 " +
-            "--set onos-classic.onosOfPort=31653 " +
-            "--set onos-classic.individualOpenFlowNodePorts=true " +
-            "--set voltha.onos_classic.replicas=${params.NumOfOnos}"
-
-            if (bbsimReplicas.toInteger() != 0) {
-              localHelmFlags = localHelmFlags + " --set onu=${onuNumber},pon=${ponNumber} "
-            }
-
-            // adding user specified helm flags at the end so they'll have priority over everything else
-            localHelmFlags = localHelmFlags + " ${extraHelmFlags}"
-
-            def numberOfAdaptersToWait = 2
-
-            if(openoltAdapterChart != "onf/voltha-adapter-openolt") {
-              localHelmFlags = localHelmFlags + " --set voltha-adapter-openolt.enabled=false"
-              // We skip waiting for adapters in the volthaDeploy step because it's already waiting for
-              // both of them after the deployment of the custom olt adapter. See line 156.
-              numberOfAdaptersToWait = 0
-            }
-
-            volthaDeploy([
-              workflow: workFlow.toLowerCase(),
-              extraHelmFlags: localHelmFlags,
-              localCharts: localCharts,
-              kubeconfig: "$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf",
-              onosReplica: params.NumOfOnos,
-              atomixReplica: params.NumOfAtomix,
-              kafkaReplica: params.NumOfKafka,
-              etcdReplica: params.NumOfEtcd,
-              bbsimReplica: bbsimReplicas.toInteger(),
-              adaptersToWait: numberOfAdaptersToWait,
-              withVolthaInfra: installVolthaInfra.toBoolean(),
-              withVolthaStack: installVolthaStack.toBoolean(),
-              ])
-
-            if(installVolthaStack.toBoolean()) {
-              if(openoltAdapterChart != "onf/voltha-adapter-openolt"){
-                extraHelmFlags = extraHelmFlags + " --set global.log_level=${logLevel}"
-                deploy_custom_chart(volthaNamespace, oltAdapterReleaseName, openoltAdapterChart, extraHelmFlags)
-                waitForAdapters([
-                  adaptersToWait: 2
-                ])
-              }
-            }
-          }
-          sh """
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd ${params.VolthaEtcdPort}:2379; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
-          ps aux | grep port-forward
-          """
-          getPodsInfo("$WORKSPACE")
-        }
-      }
-    }
-    stage('Deploy Device Manager Interface Chart') {
-      steps {
-        script {
-          deploy_custom_chart('default', 'olt-device-manager', dmiChart, extraHelmFlags)
-        }
-        println "Wait for olt-device-manager to start"
-        sh """
-            set +x
-            devmgr=\$(kubectl get pods -l app.kubernetes.io/name=${params.OltDevMgr} --no-headers | grep "0/" | wc -l)
-            while [[ \$devmgr != 0 ]]; do
-              sleep 5
-              devmgr=\$(kubectl get pods -l app.kubernetes.io/name=${params.OltDevMgr} --no-headers | grep "0/" | wc -l)
-            done
-        """
-        sh """
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="${params.OltDevMgr}" bash -c "while true; do kubectl port-forward --address 0.0.0.0 svc/${params.OltDevMgr} 50051; done"&
-          ps aux | grep port-forward
-        """
-      }
-    }
-	stage('Start logging')
-	{
-	    steps
-	    {
-		// Install kail
-		sh("""make -C "$WORKSPACE/voltha-system-tests" KAIL_PATH="$WORKSPACE/bin" kail""")
-
-		sh returnStdout: false, script: '''
-          # start logging with kail
-          cd $WORKSPACE
-          mkdir -p $LOG_FOLDER
-          list=($APPS_TO_LOG)
-          for app in "${list[@]}"
-          do
-            echo "Starting logs for: ${app}"
-            _TAG=kail-$app kail -l app.kubernetes.io/name=$app --since 1h > $LOG_FOLDER/$app.log&
-          done
-        '''
-	    }
-	}
-
-	stage('Reinstall OLT software') {
-      steps {
-        script {
-          if ( params.reinstallOlt ) {
-            for(int i=0; i < deployment_config.olts.size(); i++) {
-              sh returnStdout: true, script: """
-              ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
-              if [ "${params.inBandManagement}" == "true" ]; then
-                sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'kill -9 `pgrep -f "[b]ash /opt/openolt/openolt_dev_mgmt_daemon_process_watchdog"` || true'
-              fi
-              sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} "dpkg --install ${deployment_config.olts[i].oltDebVersion}"
-              sleep 10
-              """
-              timeout(5) {
-                waitUntil {
-                  olt_sw_present = sh returnStdout: true, script: """
-                  if [[ "${deployment_config.olts[i].oltDebVersion}" == *"asfvolt16"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep asfvolt16 | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"asgvolt64"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep asgvolt64 | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-1600x-w"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-1600x-w | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-1600g-w"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-1600g-w | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-3200g-w"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-3200g-w | wc -l'
-                  else
-                    echo Unknown Debian package for openolt
-                  fi
-                  if (${deployment_config.olts[i].fortygig}); then
-                    if [[ "${params.inBandManagement}" == "true" ]]; then
-                      ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'mkdir -p /opt/openolt/'
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp /root/watchdog-script/* /opt/openolt/'
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp /root/bal_cli_appl/example_user_appl /broadcom'
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp in-band-startup-script/* /etc/init.d/'
-                    fi
-                  fi
-                  """
-                  return olt_sw_present.toInteger() > 0
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-    stage('Restart OLT processes') {
-      steps {
-        script {
-          if ( params.restartOlt ) {
-            //rebooting OLTs
-            for(int i=0; i < deployment_config.olts.size(); i++) {
-              timeout(15) {
-                sh returnStdout: true, script: """
-                ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
-                sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; rm -f /var/log/openolt_process_watchdog.log; reboot > /dev/null &' || true
-                """
-              }
-            }
-            sh returnStdout: true, script: """
-            sleep ${params.waitTimerForOltUp}
-            """
-            //Checking dev_management_deamon and openoltprocesses
-            for(int i=0; i < deployment_config.olts.size(); i++) {
-              if ( params.oltAdapterReleaseName != "open-olt" ) {
-                timeout(15) {
-                  waitUntil {
-                    devprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep dev_mgmt_daemon | wc -l'"
-                    return devprocess.toInteger() > 0
-                  }
-                }
-                timeout(15) {
-                  waitUntil {
-                    openoltprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep openolt | wc -l'"
-                    return openoltprocess.toInteger() > 0
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-    stage('Run Device Management Interface Tests') {
-      environment {
-        ROBOT_FILE="dmi-hw-management.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs"
-        ROBOT_CONFIG_FILE="$WORKSPACE/voltha-system-tests/tests/data/dmi-components-adtran.yaml"
-      }
-      steps {
-        sh """
-          mkdir -p $ROBOT_LOGS_DIR
-          export ROBOT_MISC_ARGS="--removekeywords wuks -e notreadyDMI -i functionalDMI -d $ROBOT_LOGS_DIR"
-          make -C $WORKSPACE/voltha-system-tests voltha-dmi-test || true
-        """
-      }
-    }
-  }
-
-  post {
-    always {
-      getPodsInfo("$WORKSPACE")
-      sh '''
-      # stop the kail processes
-      list=($APPS_TO_LOG)
-      for app in "${list[@]}"
-      do
-        echo "Stopping logs for: ${app}"
-        _TAG="kail-$app"
-        P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
-        if [ -n "$P_IDS" ]; then
-          echo $P_IDS
-          for P_ID in $P_IDS; do
-            kill -9 $P_ID
-          done
-        fi
-      done
-      '''
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: 'RobotLogs/log*.html',
-        otherFiles: '',
-        outputFileName: 'RobotLogs/output*.xml',
-        outputPath: '.',
-        passThreshold: 100,
-        reportFileName: 'RobotLogs/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true]);
-      archiveArtifacts artifacts: '**/*.txt,**/*.gz,*.gz,**/*.log'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/playground/physical-build.groovy b/jjb/pipeline/voltha/playground/physical-build.groovy
deleted file mode 100644
index 701be97..0000000
--- a/jjb/pipeline/voltha/playground/physical-build.groovy
+++ /dev/null
@@ -1,468 +0,0 @@
-// -*- groovy -*-
-// -----------------------------------------------------------------------
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// -----------------------------------------------------------------------
-// Intent: used to deploy VOLTHA and configure ONOS physical PODs
-// -----------------------------------------------------------------------
-
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-// -----------------------------------------------------------------------
-// -----------------------------------------------------------------------
-def getIam(String func)
-{
-    // Cannot rely on a stack trace due to jenkins manipulation
-    String src = 'jjb/pipeline/playground/physical-build.groovy'
-    String iam = [src, func].join('::')
-    return iam
-}
-
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-
-// -----------------------------------------------------------------------
-// -----------------------------------------------------------------------
-def deploy_custom_oltAdapterChart(namespace, name, chart, extraHelmFlags) {
-    String iam = getIam('deploy_custom_oltAdapterChart')
-    println("** ${iam}: ENTER")
-
-    sh """
-    helm install --create-namespace --set defaults.image_pullPolicy=Always --namespace ${namespace} ${extraHelmFlags} ${name} ${chart}
-   """
-
-    println("** ${iam}: LEAVE")
-    return
-}
-
-pipeline
-{
-    /* no label, executor is determined by JJB */
-    agent
-    {
-        label "${params.buildNode}"
-    }
-
-    options
-    {
-        timeout(time: 35, unit: 'MINUTES')
-    }
-
-    environment
-    {
-        PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-        KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-    }
-
-    stages
-    {
-        stage('Download Code')
-        {
-            steps
-            {
-                iam
-                {
-                    enter = true
-                    label = getIam()
-                }
-
-                getVolthaCode([
-                    branch: "${branch}",
-                    volthaSystemTestsChange: "${volthaSystemTestsChange}",
-                    volthaHelmChartsChange: "${volthaHelmChartsChange}",
-                ])
-
-                iam
-                {
-                    leave = true
-                    label = getIam()
-                }
-            }
-        }
-
-        stage ("Parse deployment configuration file") {
-            steps {
-                sh returnStdout: true, script: "rm -rf ${configBaseDir}"
-                sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-                script {
-                    String conf = "${configBaseDir}/${configDeploymentDir}/${configFileName}"
-                    String flow = params.workFlow
-
-                    conf += (flow == 'DT') ? '-DT.yaml'
-                        : (flow == 'TT') ? '-TT.yaml'
-                        : '.yaml'
-
-                    deployment_config = readYaml file: conf
-
-                    /*
-                    if ( params.workFlow == "DT" )
-                    {
-                        conf += '-DT.yaml'
-//            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-          }
-          else if ( params.workFlow == "TT" )
-          {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-          }
-          else
-          {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-          }
-                     */
-        }
-      }
-    }
-
-    stage('Clean up') {
-      steps {
-        timeout(15) {
-          script {
-            helmTeardown(["default", infraNamespace, volthaNamespace])
-          }
-          timeout(1) {
-            sh returnStdout: false, script: '''
-            # remove orphaned port-forward from different namespaces
-            ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-            '''
-          }
-        }
-      }
-    }
-    stage('Install Voltha')  {
-      steps {
-        timeout(20) {
-          installVoltctl("${branch}")
-          script {
-            // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
-            def localCharts = false
-            if (volthaHelmChartsChange != "") {
-              localCharts = true
-            }
-
-            // should the config file be suffixed with the workflow? see "deployment_config"
-            def localHelmFlags = "-f $WORKSPACE/${configBaseDir}/${configKubernetesDir}/voltha/${configFileName}.yml --set global.log_level=${logLevel} "
-
-            if (workFlow.toLowerCase() == "dt") {
-              localHelmFlags += " --set radius.enabled=false "
-            }
-            if (workFlow.toLowerCase() == "tt") {
-              localHelmFlags += " --set radius.enabled=false --set global.incremental_evto_update=true "
-                if (enableMultiUni.toBoolean()) {
-                    localHelmFlags += " --set voltha-adapter-openonu.adapter_open_onu.uni_port_mask=${uniPortMask} "
-                }
-            }
-
-            // NOTE temporary workaround expose ONOS node ports (pod-config needs to be updated to contain these values)
-            // and to connect the ofagent to all instances of ONOS
-            localHelmFlags = localHelmFlags + " --set onos-classic.onosSshPort=30115 " +
-            "--set onos-classic.onosApiPort=30120 " +
-            "--set onos-classic.onosOfPort=31653 " +
-            "--set onos-classic.individualOpenFlowNodePorts=true " +
-            "--set voltha.onos_classic.replicas=${params.NumOfOnos}"
-
-            if (bbsimReplicas.toInteger() != 0) {
-              localHelmFlags = localHelmFlags + " --set onu=${onuNumber},pon=${ponNumber} "
-            }
-
-            // adding user specified helm flags at the end so they'll have priority over everything else
-            localHelmFlags = localHelmFlags + " ${extraHelmFlags}"
-
-            def numberOfAdaptersToWait = 2
-
-            if(openoltAdapterChart != "onf/voltha-adapter-openolt") {
-              localHelmFlags = localHelmFlags + " --set voltha-adapter-openolt.enabled=false"
-              // We skip waiting for adapters in the volthaDeploy step because it's already waiting for
-              // both of them after the deployment of the custom olt adapter. See line 156.
-              numberOfAdaptersToWait = 0
-            }
-
-            volthaDeploy([
-              workflow: workFlow.toLowerCase(),
-              extraHelmFlags: localHelmFlags,
-              localCharts: localCharts,
-              kubeconfig: "$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf",
-              onosReplica: params.NumOfOnos,
-              atomixReplica: params.NumOfAtomix,
-              kafkaReplica: params.NumOfKafka,
-              etcdReplica: params.NumOfEtcd,
-              bbsimReplica: bbsimReplicas.toInteger(),
-              withFttb: withFttb.toBoolean(),
-              adaptersToWait: numberOfAdaptersToWait,
-              ])
-
-            if(openoltAdapterChart != "onf/voltha-adapter-openolt"){
-              extraHelmFlags = extraHelmFlags + " --set global.log_level=${logLevel}"
-              deploy_custom_oltAdapterChart(volthaNamespace, oltAdapterReleaseName, openoltAdapterChart, extraHelmFlags)
-              waitForAdapters([
-                adaptersToWait: 2
-              ])
-            }
-          }
-          sh """
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd ${params.VolthaEtcdPort}:2379; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
-          ps aux | grep port-forward
-          """
-          getPodsInfo("$WORKSPACE")
-        }
-      }
-    }
-    stage('Push Tech-Profile') {
-      steps {
-        script {
-          if ( params.configurePod && params.profile != "Default" ) {
-            for(int i=0; i < deployment_config.olts.size(); i++) {
-              def tech_prof_directory = "XGS-PON"
-              if (deployment_config.olts[i].containsKey("board_technology")){
-                tech_prof_directory = deployment_config.olts[i]["board_technology"]
-              }
-              timeout(1) {
-                sh returnStatus: true, script: """
-                export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
-                etcd_container=\$(kubectl get pods -n ${infraNamespace} -l app.kubernetes.io/name=etcd --no-headers | awk 'NR==1{print \$1}')
-                if [[ "${workFlow}" == "TT" ]]; then
-                   kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-HSIA.json \$etcd_container:/tmp/hsia.json
-                   kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64'
-                   kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-VoIP.json \$etcd_container:/tmp/voip.json
-                   kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65'
-                   if [[ "${params.enableMultiUni}" == "true" ]]; then
-                      kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-MCAST-AdditionalBW-None.json \$etcd_container:/tmp/mcast_additionalBW_none.json
-                      kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_none.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66'
-                      kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-MCAST-AdditionalBW-NA.json \$etcd_container:/tmp/mcast_additionalBW_na.json
-                      kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_na.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/67'
-                   else
-                      kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST-AdditionalBW-None.json \$etcd_container:/tmp/mcast_additionalBW_none.json
-                      kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_none.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66'
-                      kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST-AdditionalBW-NA.json \$etcd_container:/tmp/mcast_additionalBW_na.json
-                      kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_na.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/67'
-                   fi
-                else
-                   kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-${profile}.json \$etcd_container:/tmp/flexpod.json
-                   kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64'
-                fi
-                """
-              }
-              timeout(1) {
-                sh returnStatus: true, script: """
-                export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
-                etcd_container=\$(kubectl get pods -n ${infraNamespace} -l app.kubernetes.io/name=etcd --no-headers | awk 'NR==1{print \$1}')
-                kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'ETCDCTL_API=3 etcdctl get --prefix service/voltha/technology_profiles/${tech_prof_directory}/64'
-                """
-              }
-            }
-          }
-        }
-      }
-    }
-    stage('Push MIB templates') {
-      steps {
-        sh """
-        export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
-        etcd_container=\$(kubectl get pods -n ${infraNamespace} -l app.kubernetes.io/name=etcd --no-headers | awk 'NR==1{print \$1}')
-        kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/MIB_Alpha.json \$etcd_container:/tmp/MIB_Alpha.json
-        kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Alpha.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/go_templates/BRCM/BVM4K00BRA0915-0083/5023_020O02414'
-        kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Alpha.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/templates/BRCM/BVM4K00BRA0915-0083/5023_020O02414'
-        kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/MIB_Scom.json \$etcd_container:/tmp/MIB_Scom.json
-        kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Scom.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/go_templates/SCOM/Glasfaser-Modem/090140.1.0.304'
-        kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Scom.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/templates/SCOM/Glasfaser-Modem/090140.1.0.304'
-        """
-      }
-    }
-    stage('Push Sadis-config') {
-      steps {
-        timeout(1) {
-          sh returnStatus: true, script: """
-          if [[ "${workFlow}" == "DT" ]]; then
-            curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-DT.json
-          elif [[ "${workFlow}" == "TT" ]]; then
-            curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-TT.json
-          else
-            # this is the ATT case, rename the file in *-sadis-ATT.json so that we can avoid special cases and just load the file
-            curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis.json
-          fi
-          """
-        }
-      }
-    }
-    stage('Switch Configurations in ONOS') {
-      steps {
-        script {
-          if ( deployment_config.fabric_switches.size() > 0 ) {
-            timeout(1) {
-              def netcfg = "$WORKSPACE/${configBaseDir}/${configToscaDir}/voltha/${configFileName}-onos-netcfg-switch.json"
-              if (params.inBandManagement){
-                netcfg = "$WORKSPACE/${configBaseDir}/${configToscaDir}/voltha/${configFileName}-onos-netcfg-switch-inband.json"
-              }
-              sh """
-              curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @${netcfg}
-              curl -sSL --user karaf:karaf -X POST http://${deployment_config.nodes[0].ip}:30120/onos/v1/applications/org.onosproject.segmentrouting/active
-              """
-            }
-            timeout(3) {
-              setOnosLogLevels([
-                  onosNamespace: infraNamespace,
-                  apps: [
-                    'org.opencord.dhcpl2relay',
-                    'org.opencord.olt',
-                    'org.opencord.aaa',
-                    'org.opencord.maclearner',
-                    'org.onosproject.net.flowobjective.impl.FlowObjectiveManager',
-                    'org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager'
-                  ]
-              ])
-              waitUntil {
-                sr_active_out = sh returnStatus: true, script: """
-                curl -sSL --user karaf:karaf -X GET http://${deployment_config.nodes[0].ip}:30120/onos/v1/applications/org.onosproject.segmentrouting | jq '.state' | grep ACTIVE
-                sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled false"
-                sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "cfg set org.onosproject.net.flow.impl.FlowRuleManager purgeOnDisconnection false"
-                sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "cfg set org.onosproject.net.meter.impl.MeterManager purgeOnDisconnection false"
-                """
-                return sr_active_out == 0
-              }
-            }
-            timeout(8) {
-              for(int i=0; i < deployment_config.hosts.src.size(); i++) {
-                for(int j=0; j < deployment_config.olts.size(); j++) {
-                  def aggPort = -1
-                  if(deployment_config.olts[j].serial == deployment_config.hosts.src[i].olt){
-                      aggPort = deployment_config.olts[j].aggPort
-                      if(aggPort == -1){
-                        throw new Exception("Upstream port for the olt is not configured, field aggPort is empty")
-                      }
-                      sh """
-                      sleep 10 # NOTE why are we sleeping?
-                      curl -X POST --user karaf:karaf --header 'Content-Type: application/json' --header 'Accept: application/json' -d '{"deviceId": "${deployment_config.fabric_switches[0].device_id}", "vlanId": "${deployment_config.hosts.src[i].s_tag}", "endpoints": [${deployment_config.fabric_switches[0].bngPort},${aggPort}]}' 'http://${deployment_config.nodes[0].ip}:30120/onos/segmentrouting/xconnect'
-                      """
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-    stage('Reinstall OLT software') {
-      steps {
-        script {
-          if ( params.reinstallOlt ) {
-            for(int i=0; i < deployment_config.olts.size(); i++) {
-              // NOTE what is oltDebVersion23? is that for VOLTHA-2.3? do we still need this differentiation?
-              sh returnStdout: true, script: """
-              ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
-              if [ "${params.inBandManagement}" == "true" ]; then
-                sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'kill -9 `pgrep -f "[b]ash /opt/openolt/openolt_dev_mgmt_daemon_process_watchdog"` || true'
-              fi
-              sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} "dpkg --install ${deployment_config.olts[i].oltDebVersion}"
-              sleep 10
-              """
-              timeout(5) {
-                waitUntil {
-                  olt_sw_present = sh returnStdout: true, script: """
-                  if [[ "${deployment_config.olts[i].oltDebVersion}" == *"asfvolt16"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep asfvolt16 | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"asgvolt64"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep asgvolt64 | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-1600x-w"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-1600x-w | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-1600g-w"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-1600g-w | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-3200g-w"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-3200g-w | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"sda3016ss"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep sda3016ss | wc -l'
-                  else
-                    echo Unknown Debian package for openolt
-                  fi
-                  if (${deployment_config.olts[i].fortygig}); then
-                    if [[ "${params.inBandManagement}" == "true" ]]; then
-                      ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'mkdir -p /opt/openolt/'
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp /root/watchdog-script/* /opt/openolt/'
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp /root/bal_cli_appl/example_user_appl /broadcom'
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp in-band-startup-script/* /etc/init.d/'
-                    fi
-                  fi
-                  """
-                  return olt_sw_present.toInteger() > 0
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-    stage('Restart OLT processes') {
-      steps {
-        script {
-          //rebooting OLTs
-          for(int i=0; i < deployment_config.olts.size(); i++) {
-            timeout(15) {
-              sh returnStdout: true, script: """
-              ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
-              sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; rm -f /var/log/openolt_process_watchdog.log; reboot > /dev/null &' || true
-              """
-            }
-          }
-          sh returnStdout: true, script: """
-          sleep ${params.waitTimerForOltUp}
-          """
-          //Checking dev_management_deamon and openoltprocesses
-          for(int i=0; i < deployment_config.olts.size(); i++) {
-            if ( params.oltAdapterReleaseName != "open-olt" ) {
-              timeout(15) {
-                waitUntil {
-                  devprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep dev_mgmt_daemon | wc -l'"
-                  return devprocess.toInteger() > 0
-                }
-              }
-              timeout(15) {
-                waitUntil {
-                  openoltprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep openolt | wc -l'"
-                  return openoltprocess.toInteger() > 0
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-  }
-
-  post {
-    aborted {
-      getPodsInfo("$WORKSPACE/failed")
-      sh """
-      kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.log || true
-      """
-      archiveArtifacts artifacts: '**/*.log,**/*.txt'
-    }
-    failure {
-      getPodsInfo("$WORKSPACE/failed")
-      sh """
-      kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.logs || true
-      """
-      archiveArtifacts artifacts: '**/*.log,**/*.txt'
-    }
-    always {
-      archiveArtifacts artifacts: '*.txt'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/playground/software-upgrades.groovy b/jjb/pipeline/voltha/playground/software-upgrades.groovy
deleted file mode 100644
index 1238a53..0000000
--- a/jjb/pipeline/voltha/playground/software-upgrades.groovy
+++ /dev/null
@@ -1,308 +0,0 @@
-// Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// voltha-2.x e2e tests
-// uses bbsim to simulate OLT/ONUs
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-// fetches the versions/tags of the voltha component
-// returns the deployment version which is one less than the latest available tag of the repo, first voltha stack gets deployed using this;
-// returns the test version which is the latest tag of the repo, the component upgrade gets tested on this.
-// Note: if there is a major version change between deployment and test tags, then deployment tag will be same as test tag, i.e. both as latest.
-def get_voltha_comp_versions(component, base_deploy_tag) {
-    def comp_test_tag = sh (
-      script: "git ls-remote --refs --tags https://github.com/opencord/${component} | cut --delimiter='/' --fields=3 | tr '-' '~' | sort --version-sort | tail --lines=1 | sed 's/v//'",
-      returnStdout: true
-    ).trim()
-    def comp_deploy_tag = sh (
-      script: "git ls-remote --refs --tags https://github.com/opencord/${component} | cut --delimiter='/' --fields=3 | tr '-' '~' | sort --version-sort | tail --lines=2 | head -n 1 | sed 's/v//'",
-      returnStdout: true
-    ).trim()
-    def comp_deploy_major = comp_deploy_tag.substring(0, comp_deploy_tag.indexOf('.'))
-    def comp_test_major = comp_test_tag.substring(0, comp_test_tag.indexOf('.'))
-    if ( "${comp_deploy_major.trim()}" != "${comp_test_major.trim()}") {
-      comp_deploy_tag = comp_test_tag
-    }
-    if ( "${comp_test_tag.trim()}" == "${base_deploy_tag.trim()}") {
-      comp_deploy_tag = comp_test_tag
-      comp_test_tag = "master"
-    }
-    println "${component}: deploy_tag: ${comp_deploy_tag}, test_tag: ${comp_test_tag}"
-    return [comp_deploy_tag, comp_test_tag]
-}
-
-def test_software_upgrade(name) {
-  def infraNamespace = "infra"
-  def volthaNamespace = "voltha"
-  def openolt_adapter_deploy_tag = ""
-  def openolt_adapter_test_tag = ""
-  def openonu_adapter_deploy_tag = ""
-  def openonu_adapter_test_tag = ""
-  def rw_core_deploy_tag = ""
-  def rw_core_test_tag = ""
-  def ofagent_deploy_tag = ""
-  def ofagent_test_tag = ""
-  def logsDir = "$WORKSPACE/${name}"
-  stage('Deploy Voltha - '+ name) {
-    timeout(10) {
-      // start logging
-      sh """
-      rm -rf ${logsDir} || true
-      mkdir -p ${logsDir}
-      _TAG=kail-${name} kail -n ${infraNamespace} -n ${volthaNamespace} > ${logsDir}/onos-voltha-startup-combined.log &
-      """
-      def extraHelmFlags = extraHelmFlags.trim()
-      if ("${name}" == "onos-app-upgrade" || "${name}" == "onu-software-upgrade" || "${name}" == "onu-software-upgrade-omci-extended-msg" || "${name}" == "voltha-component-upgrade" || "${name}" == "voltha-component-rolling-upgrade") {
-          extraHelmFlags = " --set global.log_level=${logLevel.toUpperCase()},onu=1,pon=1 --set onos-classic.replicas=3,onos-classic.atomix.replicas=3 " + extraHelmFlags
-      }
-      if ("${name}" == "onu-software-upgrade" || "${name}" == "onu-software-upgrade-omci-extended-msg") {
-          extraHelmFlags = " --set global.extended_omci_support.enabled=true " + extraHelmFlags
-      }
-      if ("${name}" == "onu-software-upgrade-omci-extended-msg") {
-          extraHelmFlags = " --set omccVersion=180 " + extraHelmFlags
-      }
-      if ("${name}" == "onu-image-dwl-simultaneously") {
-          extraHelmFlags = " --set global.log_level=${logLevel.toUpperCase()},onu=2,pon=2 --set onos-classic.replicas=3,onos-classic.atomix.replicas=3 " + extraHelmFlags
-      }
-      if ("${name}" == "onos-app-upgrade" || "${name}" == "onu-software-upgrade" || "${name}" == "onu-software-upgrade-omci-extended-msg" || "${name}" == "onu-image-dwl-simultaneously") {
-          extraHelmFlags = " --set global.image_tag=master --set onos-classic.image.tag=master " + extraHelmFlags
-      }
-      if ("${name}" == "voltha-component-upgrade" || "${name}" == "voltha-component-rolling-upgrade") {
-          extraHelmFlags = " --set images.onos_config_loader.tag=master-onos-config-loader --set onos-classic.image.tag=master " + extraHelmFlags
-      }
-      extraHelmFlags = extraHelmFlags + " --set onos-classic.onosSshPort=30115 --set onos-classic.onosApiPort=30120 "
-      extraHelmFlags = extraHelmFlags + " --set voltha.onos_classic.replicas=3"
-      //ONOS custom image handling
-      if ( onosImg.trim() != '' ) {
-         String[] split;
-         onosImg = onosImg.trim()
-         split = onosImg.split(':')
-        extraHelmFlags = extraHelmFlags + " --set onos-classic.image.repository=" + split[0] +",onos-classic.image.tag=" + split[1] + " "
-      }
-      Integer olts = 1
-      if ("${name}" == "onu-image-dwl-simultaneously") {
-          olts = 2
-      }
-      if ("${name}" == "voltha-component-upgrade" || "${name}" == "voltha-component-rolling-upgrade") {
-        // fetch voltha components versions/tags
-        (openolt_adapter_deploy_tag, openolt_adapter_test_tag) = get_voltha_comp_versions("voltha-openolt-adapter", openoltAdapterDeployBaseTag.trim())
-        extraHelmFlags = extraHelmFlags + " --set voltha-adapter-openolt.images.adapter_open_olt.tag=${openolt_adapter_deploy_tag} "
-        (openonu_adapter_deploy_tag, openonu_adapter_test_tag) = get_voltha_comp_versions("voltha-openonu-adapter-go", openonuAdapterDeployBaseTag.trim())
-        extraHelmFlags = extraHelmFlags + " --set voltha-adapter-openonu.images.adapter_open_onu_go.tag=${openonu_adapter_deploy_tag} "
-        (rw_core_deploy_tag, rw_core_test_tag) = get_voltha_comp_versions("voltha-go", rwCoreDeployBaseTag.trim())
-        extraHelmFlags = extraHelmFlags + " --set voltha.images.rw_core.tag=${rw_core_deploy_tag} "
-        (ofagent_deploy_tag, ofagent_test_tag) = get_voltha_comp_versions("ofagent-go", ofagentDeployBaseTag.trim())
-        extraHelmFlags = extraHelmFlags + " --set voltha.images.ofagent.tag=${ofagent_deploy_tag} "
-      }
-      def localCharts = false
-      // Currently only testing with ATT workflow
-      // TODO: Support for other workflows
-      volthaDeploy([bbsimReplica: olts.toInteger(), workflow: "att", extraHelmFlags: extraHelmFlags, localCharts: localCharts])
-      // stop logging
-      sh """
-        P_IDS="\$(ps e -ww -A | grep "_TAG=kail-${name}" | grep -v grep | awk '{print \$1}')"
-        if [ -n "\$P_IDS" ]; then
-          echo \$P_IDS
-          for P_ID in \$P_IDS; do
-            kill -9 \$P_ID
-          done
-        fi
-        cd ${logsDir}
-        gzip -k onos-voltha-startup-combined.log
-        rm onos-voltha-startup-combined.log
-      """
-      // forward ONOS and VOLTHA ports
-      sh """
-      JENKINS_NODE_COOKIE="dontKillMe" _TAG=onos-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/voltha-infra-onos-classic-hs 8101:8101; done 2>&1 " &
-      JENKINS_NODE_COOKIE="dontKillMe" _TAG=onos-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/voltha-infra-onos-classic-hs 8181:8181; done 2>&1 " &
-      JENKINS_NODE_COOKIE="dontKillMe" _TAG=port-forward-voltha-api /bin/bash -c "while true; do kubectl -n voltha port-forward --address 0.0.0.0 service/voltha-voltha-api 55555:55555; done 2>&1 " &
-      """
-      sh """
-      sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 log:set DEBUG org.opencord
-      """
-    }
-  }
-  stage('Test - '+ name) {
-    timeout(75) {
-      sh """
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/${name}"
-        mkdir -p \$ROBOT_LOGS_DIR
-        if [[ ${name} == 'onos-app-upgrade' ]]; then
-          export ONOS_APPS_UNDER_TEST+=''
-          if [ ${aaaVer.trim()} != '' ] && [ ${aaaOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.aaa,${aaaVer.trim()},${aaaOarUrl.trim()}*"
-          fi
-          if [ ${oltVer.trim()} != '' ] && [ ${oltOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.olt,${oltVer.trim()},${oltOarUrl.trim()}*"
-          fi
-          if [ ${dhcpl2relayVer.trim()} != '' ] && [ ${dhcpl2relayOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.dhcpl2relay,${dhcpl2relayVer.trim()},${dhcpl2relayOarUrl.trim()}*"
-          fi
-          if [ ${igmpproxyVer.trim()} != '' ] && [ ${igmpproxyOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.igmpproxy,${igmpproxyVer.trim()},${igmpproxyOarUrl.trim()}*"
-          fi
-          if [ ${sadisVer.trim()} != '' ] && [ ${sadisOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.sadis,${sadisVer.trim()},${sadisOarUrl.trim()}*"
-          fi
-          if [ ${mcastVer.trim()} != '' ] && [ ${mcastOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.mcast,${mcastVer.trim()},${mcastOarUrl.trim()}*"
-          fi
-          if [ ${kafkaVer.trim()} != '' ] && [ ${kafkaOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.kafka,${kafkaVer.trim()},${kafkaOarUrl.trim()}*"
-          fi
-          export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v onos_apps_under_test:\$ONOS_APPS_UNDER_TEST -e PowerSwitch"
-          export TARGET=onos-app-upgrade-test
-        fi
-        if [ ${name} == 'voltha-component-upgrade' ] || [ ${name} == 'voltha-component-rolling-upgrade' ]; then
-          export VOLTHA_COMPS_UNDER_TEST+=''
-          VOLTHA_COMPS_UNDER_TEST+="adapter-open-olt,adapter-open-olt,voltha/voltha-openolt-adapter:${openolt_adapter_test_tag}*"
-          VOLTHA_COMPS_UNDER_TEST+="adapter-open-onu,adapter-open-onu,voltha/voltha-openonu-adapter-go:${openonu_adapter_test_tag}*"
-          VOLTHA_COMPS_UNDER_TEST+="rw-core,voltha,voltha/voltha-rw-core:${rw_core_test_tag}*"
-          VOLTHA_COMPS_UNDER_TEST+="ofagent,ofagent,voltha/voltha-ofagent-go:${ofagent_test_tag}*"
-          export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v voltha_comps_under_test:\$VOLTHA_COMPS_UNDER_TEST -e PowerSwitch"
-        fi
-        if [[ ${name} == 'voltha-component-upgrade' ]]; then
-          export TARGET=voltha-comp-upgrade-test
-        fi
-        if [[ ${name} == 'voltha-component-rolling-upgrade' ]]; then
-          export TARGET=voltha-comp-rolling-upgrade-test
-        fi
-        if [ ${name} == 'onu-software-upgrade' ] || [ ${name} == 'onu-software-upgrade-omci-extended-msg' ]; then
-          export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v image_version:${onuImageVersion.trim()} -v image_url:${onuImageUrl.trim()} -v image_vendor:${onuImageVendor.trim()} -v image_activate_on_success:${onuImageActivateOnSuccess.trim()} -v image_commit_on_success:${onuImageCommitOnSuccess.trim()} -v image_crc:${onuImageCrc.trim()} -e PowerSwitch"
-          export TARGET=onu-upgrade-test
-        fi
-        if [[ ${name} == 'onu-image-dwl-simultaneously' ]]; then
-          export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v image_version:${onuImageVersion.trim()} -v image_url:${onuImageUrl.trim()} -v image_vendor:${onuImageVendor.trim()} -v image_activate_on_success:${onuImageActivateOnSuccess.trim()} -v image_commit_on_success:${onuImageCommitOnSuccess.trim()} -v image_crc:${onuImageCrc.trim()} -e PowerSwitch"
-          export TARGET=onu-upgrade-test-multiolt-kind-att
-        fi
-        testLogging='False'
-        if [ ${logging} = true ]; then
-          testLogging='True'
-        fi
-        export VOLTCONFIG=$HOME/.volt/config-minimal
-        export KUBECONFIG=$HOME/.kube/kind-config-voltha-minimal
-        ROBOT_MISC_ARGS+=" -v ONOS_SSH_PORT:30115 -v ONOS_REST_PORT:30120 -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace} -v container_log_dir:${logsDir} -v logging:\$testLogging"
-        # Run the specified tests
-        make -C $WORKSPACE/voltha-system-tests \$TARGET || true
-      """
-      // remove port-forwarding
-      sh """
-        # remove orphaned port-forward from different namespaces
-        ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
-      """
-      // collect pod details
-      get_pods_info("$WORKSPACE/${name}")
-      sh """
-        set +e
-        # collect logs collected in the Robot Framework StartLogging keyword
-        cd ${logsDir}
-        gzip *-combined.log || true
-        rm *-combined.log || true
-      """
-      helmTeardown(['infra', 'voltha'])
-    }
-  }
-}
-def get_pods_info(dest) {
-  // collect pod details, this is here in case of failure
-  sh """
-  mkdir -p ${dest} || true
-  kubectl get pods --all-namespaces -o wide > ${dest}/pods.txt || true
-  kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee ${dest}/pod-images.txt || true
-  kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee ${dest}/pod-imagesId.txt || true
-  kubectl describe pods --all-namespaces -l app.kubernetes.io/part-of=voltha > ${dest}/voltha-pods-describe.txt
-  kubectl describe pods -n infra -l app=onos-classic > ${dest}/onos-pods-describe.txt
-  helm ls --all-namespaces > ${dest}/helm-charts.txt
-  """
-  sh '''
-  # copy the ONOS logs directly from the container to avoid the color codes
-  printf '%s\\n' $(kubectl get pods -n infra -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c 'kubectl -n infra cp #:apache-karaf-4.2.14/data/log/karaf.log ''' + dest + '''/#.log' || true
-  '''
-}
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: 220, unit: 'MINUTES')
-  }
-  environment {
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-    KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
-    SSHPASS="karaf"
-  }
-  stages{
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${branch}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage('Cleanup') {
-      steps {
-        // remove port-forwarding
-        sh """
-          # remove orphaned port-forward from different namespaces
-          ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
-        """
-        helmTeardown(['infra', 'voltha'])
-      }
-    }
-    stage('Create K8s Cluster') {
-      steps {
-        createKubernetesCluster([nodes: 3])
-      }
-    }
-    stage('Run Test') {
-      steps {
-        test_software_upgrade("onos-app-upgrade")
-        test_software_upgrade("voltha-component-upgrade")
-        test_software_upgrade("voltha-component-rolling-upgrade")
-        test_software_upgrade("onu-software-upgrade")
-        test_software_upgrade("onu-software-upgrade-omci-extended-msg")
-        test_software_upgrade("onu-image-dwl-simultaneously")
-      }
-    }
-  }
-  post {
-    aborted {
-      get_pods_info("$WORKSPACE/failed")
-    }
-    failure {
-      get_pods_info("$WORKSPACE/failed")
-    }
-    always {
-      step([$class: 'RobotPublisher',
-         disableArchiveOutput: false,
-         logFileName: 'RobotLogs/*/log*.html',
-         otherFiles: '',
-         outputFileName: 'RobotLogs/*/output*.xml',
-         outputPath: '.',
-         passThreshold: 100,
-         reportFileName: 'RobotLogs/*/report*.html',
-         unstableThreshold: 0,
-         onlyCritical: true]);
-      archiveArtifacts artifacts: '*.log,**/*.log,**/*.gz,*.gz,*.txt,**/*.txt'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/playground/tucson-build-and-test.groovy b/jjb/pipeline/voltha/playground/tucson-build-and-test.groovy
deleted file mode 100644
index 81b26ab..0000000
--- a/jjb/pipeline/voltha/playground/tucson-build-and-test.groovy
+++ /dev/null
@@ -1,367 +0,0 @@
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// used to deploy VOLTHA and configure ONOS physical PODs
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-def clusterName = "kind-ci"
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: 120, unit: 'MINUTES')
-  }
-  environment {
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-    KUBECONFIG="$HOME/.kube/kind-${clusterName}"
-    VOLTCONFIG="$HOME/.volt/config"
-    LOG_FOLDER="$WORKSPACE/${workflow}/"
-    APPS_TO_LOG="etcd kafka onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server onos-config-loader"
-
-  }
-  stages{
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${branch}",
-          gerritProject: "${gerritProject}",
-          gerritRefspec: "${gerritRefspec}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage ("Parse deployment configuration file") {
-      steps {
-        sh returnStdout: true, script: "rm -rf ${configBaseDir}"
-        sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-
-          if (params.workflow.toUpperCase() == "TT") {
-            error("The Tucson POD does not support TT workflow at the moment")
-          }
-
-          if ( params.workflow.toUpperCase() == "DT" ) {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-          }
-          else if ( params.workflow.toUpperCase() == "TT" ) {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-          }
-          else {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-          }
-        }
-      }
-    }
-    stage('Clean up') {
-      steps {
-        timeout(15) {
-          script {
-            helmTeardown(["default", infraNamespace, volthaNamespace])
-          }
-          timeout(1) {
-            sh returnStdout: false, script: '''
-            # remove orphaned port-forward from different namespaces
-            ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-            '''
-          }
-        }
-      }
-    }
-    stage('Build patch') {
-      steps {
-        // NOTE that the correct patch has already been checked out
-        // during the getVolthaCode step
-        buildVolthaComponent("${gerritProject}")
-      }
-    }
-    stage('Create K8s Cluster') {
-      steps {
-        script {
-          def clusterExists = sh returnStdout: true, script: """
-          kind get clusters | grep ${clusterName} | wc -l
-          """
-          if (clusterExists.trim() == "0") {
-            createKubernetesCluster([nodes: 3, name: clusterName])
-          }
-        }
-      }
-    }
-    stage('Load image in kind nodes') {
-      steps {
-        loadToKind()
-      }
-    }
-    stage('Install Voltha')  {
-      steps {
-        timeout(20) {
-          script {
-            imageFlags = getVolthaImageFlags(gerritProject)
-            // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
-            def localCharts = false
-            if (volthaHelmChartsChange != "" || gerritProject == "voltha-helm-charts") {
-              localCharts = true
-            }
-            def flags = "-f $WORKSPACE/${configBaseDir}/${configKubernetesDir}/voltha/${configFileName}.yml ${imageFlags} "
-            // NOTE temporary workaround expose ONOS node ports (pod-config needs to be updated to contain these values)
-            flags = flags + "--set onos-classic.onosSshPort=30115 " +
-            "--set onos-classic.onosApiPort=30120 " +
-            "--set onos-classic.onosOfPort=31653 " +
-            "--set onos-classic.individualOpenFlowNodePorts=true " + extraHelmFlags
-            volthaDeploy([
-              workflow: workFlow.toLowerCase(),
-              extraHelmFlags: flags,
-              localCharts: localCharts,
-              kubeconfig: "$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf",
-              onosReplica: 3,
-              atomixReplica: 3,
-              kafkaReplica: 3,
-              etcdReplica: 3,
-              ])
-          }
-          // start logging
-          sh """
-          rm -rf $WORKSPACE/${workFlow}/
-          mkdir -p $WORKSPACE/${workFlow}
-          _TAG=kail-${workFlow} kail -n infra -n voltha > $WORKSPACE/${workFlow}/onos-voltha-combined.log &
-          """
-          sh returnStdout: false, script: '''
-          # start logging with kail
-
-          mkdir -p $LOG_FOLDER
-
-          list=($APPS_TO_LOG)
-          for app in "${list[@]}"
-          do
-            echo "Starting logs for: ${app}"
-            _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
-          done
-          '''
-          sh """
-          JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd 2379:2379; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
-          ps aux | grep port-forward
-          """
-          getPodsInfo("$WORKSPACE")
-        }
-      }
-    }
-    stage('Deploy Kafka Dump Chart') {
-      steps {
-        script {
-          sh returnStdout: false, script: """
-              helm repo add cord https://charts.opencord.org
-              helm repo update
-              if helm version -c --short|grep v2 -q; then
-                helm install -n voltha-kafka-dump cord/voltha-kafka-dump
-              else
-                helm install voltha-kafka-dump cord/voltha-kafka-dump
-              fi
-          """
-        }
-      }
-    }
-    stage('Push Tech-Profile') {
-      when {
-        expression { params.profile != "Default" }
-      }
-      steps {
-        sh returnStdout: false, script: """
-        etcd_container=\$(kubectl get pods -n voltha | grep voltha-etcd-cluster | awk 'NR==1{print \$1}')
-        kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-${profile}.json voltha/\$etcd_container:/tmp/flexpod.json
-        kubectl exec -it \$etcd_container -n voltha -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/XGS-PON/64'
-        """
-      }
-    }
-
-    stage('Push Sadis-config') {
-      steps {
-        sh returnStdout: false, script: """
-        ssh-keygen -R [${deployment_config.nodes[0].ip}]:30115
-        ssh-keyscan -p 30115 -H ${deployment_config.nodes[0].ip} >> ~/.ssh/known_hosts
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.dhcpl2relay"
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.aaa"
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.olt"
-        #TRACE in the pipeliner is too chatty, moving to DEBUG
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.opencord.olt.driver"
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.FlowObjectiveManager"
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager"
-
-        if [[ "${workFlow.toUpperCase()}" == "DT" ]]; then
-          curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-DT.json
-        elif [[ "${workFlow.toUpperCase()}" == "TT" ]]; then
-          curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-TT.json
-        else
-          # this is the ATT case, rename the file in *-sadis-ATT.json so that we can avoid special cases and just load the file
-          curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis.json
-        fi
-        """
-      }
-    }
-    stage('Reinstall OLT software') {
-      when {
-        expression { params.reinstallOlt }
-      }
-      steps {
-        script {
-          deployment_config.olts.each { olt ->
-            sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --remove asfvolt16 && dpkg --purge asfvolt16'"
-            waitUntil {
-              olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
-              return olt_sw_present.toInteger() == 0
-            }
-            if ( params.branch == 'voltha-2.3' ) {
-              oltDebVersion = oltDebVersionVoltha23
-            } else {
-              oltDebVersion = oltDebVersionMaster
-            }
-            sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --install ${oltDebVersion}'"
-            waitUntil {
-              olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
-              return olt_sw_present.toInteger() == 1
-            }
-            if ( olt.fortygig ) {
-              // If the OLT is connected to a 40G switch interface, set the NNI port to be downgraded
-              sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'echo port ce128 sp=40000 >> /broadcom/qax.soc ; /opt/bcm68620/svk_init.sh'"
-            }
-          }
-        }
-      }
-    }
-
-    stage('Restart OLT processes') {
-      steps {
-        script {
-          deployment_config.olts.each { olt ->
-            sh returnStdout: false, script: """
-            ssh-keyscan -H ${olt.ip} >> ~/.ssh/known_hosts
-            sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; reboot'
-            sleep 120
-            """
-            waitUntil {
-              onu_discovered = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'grep \"onu discover indication\" /var/log/openolt.log | wc -l'"
-              return onu_discovered.toInteger() > 0
-            }
-          }
-        }
-      }
-    }
-    stage('Run E2E Tests') {
-      steps {
-        script {
-          // different workflows need different make targets and different robot files
-          if ( params.workflow.toUpperCase() == "DT" ) {
-            robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-            robotFile = "Voltha_DT_PODTests.robot"
-            makeTarget = "voltha-dt-test"
-            robotFunctionalKeyword = "-i functionalDt"
-            robotDataplaneKeyword = "-i dataplaneDt"
-          }
-          else if ( params.workflow.toUpperCase() == "TT" ) {
-            // TODO the TT tests have diffent tags, address once/if TT is support on the Tucson POD
-            robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-            robotFile = "Voltha_TT_PODTests.robot"
-            makeTarget = "voltha-tt-test"
-            robotFunctionalKeyword = "-i functionalTt"
-            robotDataplaneKeyword = "-i dataplaneTt"
-          }
-          else {
-            robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-            robotFile = "Voltha_PODTests.robot"
-            makeTarget = "voltha-test"
-            robotFunctionalKeyword = "-i functional"
-            robotDataplaneKeyword = "-i dataplane"
-          }
-        }
-        sh returnStdout: false, script: """
-        mkdir -p $WORKSPACE/RobotLogs
-
-        export ROBOT_CONFIG_FILE="$WORKSPACE/${robotConfigFile}"
-        export ROBOT_MISC_ARGS="${params.extraRobotArgs} --removekeywords wuks -d $WORKSPACE/RobotLogs -v container_log_dir:$WORKSPACE "
-        export ROBOT_FILE="${robotFile}"
-
-        # If the Gerrit comment contains a line with "functional tests" then run the full
-        # functional test suite.  This covers tests tagged either 'sanity' or 'functional'.
-        # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
-        REGEX="functional tests"
-        if [[ "${gerritComment}" =~ \$REGEX ]]; then
-          ROBOT_MISC_ARGS+="${robotFunctionalKeyword} "
-        fi
-        # Likewise for dataplane tests
-        REGEX="dataplane tests"
-        if [[ "${gerritComment}" =~ \$REGEX ]]; then
-          ROBOT_MISC_ARGS+="${robotDataplaneKeyword}"
-        fi
-
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests ${makeTarget} || true
-        """
-      }
-    }
-  }
-  post {
-    always {
-      // stop logging
-      sh """
-        P_IDS="\$(ps e -ww -A | grep "_TAG=kail-${workFlow}" | grep -v grep | awk '{print \$1}')"
-        if [ -n "\$P_IDS" ]; then
-          echo \$P_IDS
-          for P_ID in \$P_IDS; do
-            kill -9 \$P_ID
-          done
-        fi
-        gzip $WORKSPACE/${workFlow}/onos-voltha-combined.log || true
-      """
-      sh '''
-      # stop the kail processes
-      list=($APPS_TO_LOG)
-      for app in "${list[@]}"
-      do
-        echo "Stopping logs for: ${app}"
-        _TAG="kail-$app"
-        P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
-        if [ -n "$P_IDS" ]; then
-          echo $P_IDS
-          for P_ID in $P_IDS; do
-            kill -9 $P_ID
-          done
-        fi
-      done
-      '''
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: 'RobotLogs/log*.html',
-        otherFiles: '',
-        outputFileName: 'RobotLogs/output*.xml',
-        outputPath: '.',
-        passThreshold: 100,
-        reportFileName: 'RobotLogs/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true]);
-      archiveArtifacts artifacts: '**/*.txt,**/*.gz,*.gz,**/*.log'
-    }
-  }
-}
-
-// refs/changes/06/24206/5
diff --git a/jjb/pipeline/voltha/playground/voltha-dt-physical-functional-tests.groovy b/jjb/pipeline/voltha/playground/voltha-dt-physical-functional-tests.groovy
deleted file mode 100644
index 5a14eab..0000000
--- a/jjb/pipeline/voltha/playground/voltha-dt-physical-functional-tests.groovy
+++ /dev/null
@@ -1,332 +0,0 @@
-#!/usr/bin/env groovy
-// -----------------------------------------------------------------------
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// -----------------------------------------------------------------------
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-node {
-  // Need this so that deployment_config has global scope when it's read later
-  deployment_config = null
-}
-
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: "${timeout}", unit: 'MINUTES')
-  }
-
-  environment {
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-    VOLTCONFIG="$HOME/.volt/config-minimal"
-    PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-  }
-
-  stages {
-    stage('Clone voltha-system-tests') {
-      steps {
-        step([$class: 'WsCleanup'])
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/voltha-system-tests",
-            refspec: "${volthaSystemTestsChange}"
-          ]],
-          branches: [[ name: "${branch}", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-        script {
-          sh(script:"""
-            if [ '${volthaSystemTestsChange}' != '' ] ; then
-              cd $WORKSPACE/voltha-system-tests;
-              git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
-            fi
-            """)
-        }
-      }
-    }
-    // This checkout allows us to show changes in Jenkins
-    // we only do this on master as we don't branch all the repos for all the releases
-    // (we should compute the difference by tracking the container version, not the code)
-    stage('Download All the VOLTHA repos') {
-      when {
-        expression {
-          return "${branch}" == 'master';
-        }
-      }
-      steps {
-       checkout(changelog: true,
-         poll: false,
-         scm: [$class: 'RepoScm',
-           manifestRepositoryUrl: "${params.manifestUrl}",
-           manifestBranch: "${params.branch}",
-           currentBranch: true,
-           destinationDir: 'voltha',
-           forceSync: true,
-           resetFirst: true,
-           quiet: true,
-           jobs: 4,
-           showAllChanges: true]
-         )
-      }
-    }
-    stage ('Initialize') {
-      steps {
-        sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-           deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        }
-        installVoltctl("${branch}")
-        sh """
-        ps -ef | grep port-forward
-        """
-
-        sh returnStdout: false, script: '''
-        # remove orphaned port-forward from different namespaces
-        ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-        '''
-        sh """
-        JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
-        JENKINS_NODE_COOKIE="dontKillMe" _TAG="etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd ${params.VolthaEtcdPort}:2379; done"&
-        JENKINS_NODE_COOKIE="dontKillMe" _TAG="kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
-        ps aux | grep port-forward
-        """
-
-        sh("""ps -ef | grep port-forward""")
-
-        sh(returnStdout: false, script: """
-        mkdir -p "$WORKSPACE/bin"
-
-        # install kail
-        make -C "$WORKSPACE/voltha-system-tests" KAIL_PATH="$WORKSPACE/bin" kail
-
-        # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
-        # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
-        # We should change this. In the meantime here is a workaround.
-           set +e
-
-        # Remove noise from voltha-core logs
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        # Remove noise from openolt logs
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        """)
-      }
-    }
-
-    stage('Functional Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/FunctionalTests"
-      }
-      steps {
-        sh """
-        ps -ef | grep port-forward
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.withFttb} = false ]; then
-          if ( ${powerSwitch} ); then
-               export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanityDt -i functionalDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-               if ( ${powerCycleOlt} ); then
-                    ROBOT_MISC_ARGS+=" -v power_cycle_olt:True"
-               fi
-          else
-               export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i sanityDt -i functionalDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          fi
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('FTTB Functional Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_FTTB_Tests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/FunctionalTests"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.withFttb} = true ]; then
-          export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i sanityDtFttb -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace} -v has_dataplane:False"
-          make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Failure/Recovery Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_FailureScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/FailureScenarios"
-      }
-      steps {
-        sh """
-        ps -ef | grep port-forward
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.withFttb} = false ]; then
-          if ( ${powerSwitch} ); then
-               export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalDt -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          else
-               export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalDt -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          fi
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Dataplane Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/DataplaneTests"
-      }
-      steps {
-        sh """
-        ps -ef | grep port-forward
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.withFttb} = false ]; then
-          export ROBOT_MISC_ARGS="--removekeywords wuks -i dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-    stage('HA Tests') {
-       environment {
-       ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-       ROBOT_FILE="Voltha_ONOSHATests.robot"
-       ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ONOSHAScenarios"
-      }
-      steps {
-        sh """
-        ps -ef | grep port-forward
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.withFttb} = false ]; then
-          export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v workflow:${params.workFlow} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Multiple OLT Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_MultiOLT_Tests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/MultipleOLTScenarios"
-      }
-      steps {
-        sh """
-        ps -ef | grep port-forward
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.withFttb} = false ]; then
-          if ( ${powerSwitch} ); then
-               export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalDt -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          else
-               export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalDt -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          fi
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-
-    stage('Error Scenario Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_ErrorScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/ErrorScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.withFttb} = false ]; then
-          export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v workflow:${params.workFlow} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        fi
-        """
-      }
-    }
-  }
-  post {
-    always {
-      getPodsInfo("$WORKSPACE/pods")
-      sh returnStdout: false, script: '''
-      set +e
-
-      # collect logs collected in the Robot Framework StartLogging keyword
-      cd $WORKSPACE
-      gzip *-combined.log || true
-      rm *-combined.log || true
-      '''
-      script {
-        deployment_config.olts.each { olt ->
-            if (olt.type == null || olt.type == "" || olt.type == "openolt") {
-              sh returnStdout: false, script: """
-              sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
-              sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log  # Remove escape sequences
-              sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
-              sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log  # Remove escape sequences
-              sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/startup.log $WORKSPACE/startup-${olt.sship}.log || true
-              sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/startup-${olt.sship}.log || true # Remove escape sequences
-              sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt_process_watchdog.log $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true
-              sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true # Remove escape sequences
-              """
-            }
-        }
-      }
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: '**/log*.html',
-        otherFiles: '',
-        outputFileName: '**/output*.xml',
-        outputPath: 'RobotLogs',
-        passThreshold: 100,
-        reportFileName: '**/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true
-        ]);
-      archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/playground/voltha-physical-functional-tests.groovy b/jjb/pipeline/voltha/playground/voltha-physical-functional-tests.groovy
deleted file mode 100644
index 8565148..0000000
--- a/jjb/pipeline/voltha/playground/voltha-physical-functional-tests.groovy
+++ /dev/null
@@ -1,269 +0,0 @@
-#!/usr/bin/env groovy
-// -----------------------------------------------------------------------
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// -----------------------------------------------------------------------
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-
-node {
-  // Need this so that deployment_config has global scope when it's read later
-  deployment_config = null
-}
-
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: "${timeout}", unit: 'MINUTES')
-  }
-
-  environment {
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-    VOLTCONFIG="$HOME/.volt/config-minimal"
-    PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-  }
-  stages {
-    stage('Clone voltha-system-tests') {
-      steps {
-        step([$class: 'WsCleanup'])
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/voltha-system-tests",
-            refspec: "${volthaSystemTestsChange}"
-          ]],
-          branches: [[ name: "${branch}", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-        script {
-          sh(script:"""
-            if [ '${volthaSystemTestsChange}' != '' ] ; then
-              cd $WORKSPACE/voltha-system-tests;
-              git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
-            fi
-            """)
-        }
-      }
-    }
-    stage('Download All the VOLTHA repos') {
-      when {
-        expression {
-          return "${branch}" == 'master';
-        }
-      }
-      steps {
-       checkout(changelog: true,
-         poll: false,
-         scm: [$class: 'RepoScm',
-           manifestRepositoryUrl: "${params.manifestUrl}",
-           manifestBranch: "${params.branch}",
-           currentBranch: true,
-           destinationDir: 'voltha',
-           forceSync: true,
-           resetFirst: true,
-           quiet: true,
-           jobs: 4,
-           showAllChanges: true]
-         )
-      }
-    }
-    stage ('Initialize') {
-      steps {
-        sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-          deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        }
-	installVoltctl("${branch}")
-	
-	sh(returnStdout: false, script: """
-
-        mkdir -p "$WORKSPACE/bin"
-
-        # install kail
-        make -C "$WORKSPACE/voltha-system-tests" KAIL_PATH="$WORKSPACE/bin" kail
-
-        if [ "${params.branch}" == "master" ]; then
-        # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
-        # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
-        # We should change this. In the meantime here is a workaround.
-           set +e
-
-        # Remove noise from voltha-core logs
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        # Remove noise from openolt logs
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        fi
-        """)
-      }
-    }
-
-    stage('Functional Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        ROBOT_FILE="Voltha_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/FunctionalTests"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if ( ${powerSwitch} ); then
-             export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanity -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-             if ( ${powerCycleOlt} ); then
-                  ROBOT_MISC_ARGS+=" -v power_cycle_olt:True"
-             fi
-        else
-             export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i sanity -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        fi
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        """
-      }
-    }
-
-    stage('Failure/Recovery Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        ROBOT_FILE="Voltha_FailureScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/FailureScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if ( ${powerSwitch} ); then
-             export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        else
-             export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        fi
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        """
-      }
-    }
-
-    stage('Dataplane Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        ROBOT_FILE="Voltha_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/DataplaneTests"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        export ROBOT_MISC_ARGS="--removekeywords wuks -i dataplane -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        """
-      }
-    }
-    stage('HA Tests') {
-       environment {
-       ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-       ROBOT_FILE="Voltha_ONOSHATests.robot"
-       ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ONOSHAScenarios"
-      }
-      steps {
-       sh """
-       mkdir -p $ROBOT_LOGS_DIR
-       export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v workflow:${params.workFlow} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-       ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-       make -C $WORKSPACE/voltha-system-tests voltha-test || true
-       """
-      }
-    }
-
-    stage('Error Scenario Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        ROBOT_FILE="Voltha_ErrorScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ErrorScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        """
-      }
-    }
-  }
-  post {
-    always {
-      getPodsInfo("$WORKSPACE/pods")
-      sh returnStdout: false, script: '''
-      set +e
-
-      # collect logs collected in the Robot Framework StartLogging keyword
-      cd $WORKSPACE
-      gzip *-combined.log || true
-      rm *-combined.log || true
-
-      # store information on the running pods
-      kubectl get pods --all-namespaces -o wide > $WORKSPACE/pods.txt || true
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-images.txt || true
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-imagesId.txt || true
-
-
-      # collect ETCD cluster logs
-      mkdir -p $WORKSPACE/etcd
-      printf '%s\n' $(kubectl get pods -l app=etcd -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I% bash -c "kubectl logs % > $WORKSPACE/etcd/%.log"
-      '''
-      script {
-        deployment_config.olts.each { olt ->
-          if (olt.type == null || olt.type == "" || olt.type == "openolt") {
-            sh returnStdout: false, script: """
-            sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
-            sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log  # Remove escape sequences
-            sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
-            sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log  # Remove escape sequences
-            sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/startup.log $WORKSPACE/startup-${olt.sship}.log || true
-            sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/startup-${olt.sship}.log || true # Remove escape sequences
-            sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt_process_watchdog.log $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true
-            sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true # Remove escape sequences
-            """
-          }
-        }
-      }
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: '**/log*.html',
-        otherFiles: '',
-        outputFileName: '**/output*.xml',
-        outputPath: 'RobotLogs',
-        passThreshold: 100,
-        reportFileName: '**/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true
-        ]);
-      archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/playground/voltha-physical-soak-dt-tests.groovy b/jjb/pipeline/voltha/playground/voltha-physical-soak-dt-tests.groovy
deleted file mode 100644
index 6320cfb..0000000
--- a/jjb/pipeline/voltha/playground/voltha-physical-soak-dt-tests.groovy
+++ /dev/null
@@ -1,258 +0,0 @@
-#!/usr/bin/env groovy
-// -----------------------------------------------------------------------
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// -----------------------------------------------------------------------
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-node {
-  // Need this so that deployment_config has global scope when it's read later
-  deployment_config = null
-}
-
-def volthaNamespace = "voltha"
-def infraNamespace = "infra"
-
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: "${timeout}", unit: 'MINUTES')
-  }
-
-  environment {
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-    VOLTCONFIG="$HOME/.volt/config-minimal"
-    PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-  }
-
-
-  stages {
-    stage('Clone voltha-system-tests') {
-      steps {
-        step([$class: 'WsCleanup'])
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/voltha-system-tests",
-            refspec: "${volthaSystemTestsChange}"
-          ]],
-          branches: [[ name: "${branch}", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-        script {
-          sh(script:"""
-            if [ '${volthaSystemTestsChange}' != '' ] ; then
-              cd $WORKSPACE/voltha-system-tests;
-              git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
-            fi
-            """)
-        }
-      }
-    }
-    // This checkout allows us to show changes in Jenkins
-    // we only do this on master as we don't branch all the repos for all the releases
-    // (we should compute the difference by tracking the container version, not the code)
-    stage('Download All the VOLTHA repos') {
-      when {
-        expression {
-          return "${branch}" == 'master';
-        }
-      }
-      steps {
-       checkout(changelog: true,
-         poll: false,
-         scm: [$class: 'RepoScm',
-           manifestRepositoryUrl: "${params.manifestUrl}",
-           manifestBranch: "${params.branch}",
-           currentBranch: true,
-           destinationDir: 'voltha',
-           forceSync: true,
-           resetFirst: true,
-           quiet: true,
-           jobs: 4,
-           showAllChanges: true]
-         )
-      }
-    }
-    stage ('Initialize') {
-      steps {
-        sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-           deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        }
-        installVoltctl("${branch}")
-
-	sh(returnStdout: false, script: """
-
-        mkdir -p "$WORKSPACE/bin"
-
-        # install kail
-        make -C "$WORKSPACE/voltha-system-tests" KAIL_PATH="$WORKSPACE/bin" kail
-
-        if [ "${params.branch}" == "master" ]; then
-        # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
-        # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
-        # We should change this. In the meantime here is a workaround.
-           set +e
-
-        # Remove noise from voltha-core logs
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        # Remove noise from openolt logs
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        fi
-        """)
-
-        sh("""
-        mkdir -p $WORKSPACE/voltha-pods-mem-consumption
-        cd $WORKSPACE/voltha-system-tests
-        make vst_venv
-        source ./vst_venv/bin/activate || true
-        # Collect initial memory consumption
-        python scripts/mem_consumption.py -o $WORKSPACE/voltha-pods-mem-consumption -a 0.0.0.0:31301 -n ${volthaNamespace} || true
-        """)
-      }
-    }
-
-    stage('Functional Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/FunctionalTests"
-      }
-      steps {
-        sh """
-        JENKINS_NODE_COOKIE="dontKillMe" _TAG="prometheus" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n cattle-prometheus svc/access-prometheus 31301:80; done"&
-        ps aux | grep port-forward
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ "${params.testType}" == "Functional" ]; then
-            if ( ${powerSwitch} ); then
-                 export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i soak -e dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-            else
-                 export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i soak -e dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-            fi
-            ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-            make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Failure/Recovery Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_FailureScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/FailureScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ "${params.testType}" == "Failure" ]; then
-           export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i soak -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-           ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-           make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Dataplane Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/DataplaneTests"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ "${params.testType}" == "Dataplane" ]; then
-           export ROBOT_MISC_ARGS="--removekeywords wuks -i soakDataplane -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-           ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-           make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-  }
-  post {
-    always {
-      getPodsInfo("$WORKSPACE/pods")
-      sh returnStdout: false, script: '''
-      set +e
-
-      # collect logs collected in the Robot Framework StartLogging keyword
-      cd $WORKSPACE
-      gzip *-combined.log || true
-      rm *-combined.log || true
-
-      # collect ETCD cluster logs
-      mkdir -p $WORKSPACE/etcd
-      printf '%s\n' $(kubectl get pods -l app=etcd -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I% bash -c "kubectl logs % > $WORKSPACE/etcd/%.log"
-      '''
-      script {
-        deployment_config.olts.each { olt ->
-          sh returnStdout: false, script: """
-          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
-          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log  # Remove escape sequences
-          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
-          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log  # Remove escape sequences
-          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/startup.log $WORKSPACE/startup-${olt.sship}.log || true
-          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/startup-${olt.sship}.log || true # Remove escape sequences
-          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt_process_watchdog.log $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true
-          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true # Remove escape sequences
-          """
-        }
-      }
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: '**/log*.html',
-        otherFiles: '',
-        outputFileName: '**/output*.xml',
-        outputPath: 'RobotLogs',
-        passThreshold: 100,
-        reportFileName: '**/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true
-        ]);
-      // get cpu usage by container
-      sh """
-      mkdir -p $WORKSPACE/plots || true
-      cd $WORKSPACE/voltha-system-tests
-      source ./vst_venv/bin/activate || true
-      sleep 60 # we have to wait for prometheus to collect all the information
-      python scripts/sizing.py -o $WORKSPACE/plots -a 0.0.0.0:31301 -n ${volthaNamespace} -s 3600 || true
-      # Collect memory consumption of voltha pods once all the tests are complete
-      python scripts/mem_consumption.py -o $WORKSPACE/voltha-pods-mem-consumption -a 0.0.0.0:31301 -n ${volthaNamespace} || true
-      """
-      archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt,plots/*,voltha-pods-mem-consumption/*'
-    }
-  }
-}
-
-// [EOF]
diff --git a/jjb/pipeline/voltha/playground/voltha-scale-lwc-test.groovy b/jjb/pipeline/voltha/playground/voltha-scale-lwc-test.groovy
deleted file mode 100644
index 84308ac..0000000
--- a/jjb/pipeline/voltha/playground/voltha-scale-lwc-test.groovy
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// deploy VOLTHA and performs a scale test with the LWC controller
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-// [TODO] fix path, achilles heel for testing.
-def lwc_helm_chart_path="/home/jenkins/Radisys_LWC_helm_charts"
-def value_file="/home/jenkins/lwc-values.yaml"
-def workflow="dt"
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-      timeout(time: 60, unit: 'MINUTES')
-  }
-  environment {
-    JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
-    KUBECONFIG="$HOME/.kube/config"
-    VOLTCONFIG="$HOME/.volt/config"
-    SSHPASS="karaf"
-    VOLTHA_LOG_LEVEL="${logLevel}"
-    NUM_OF_BBSIM="${olts}"
-    NUM_OF_OPENONU="${openonuAdapterReplicas}"
-    NUM_OF_ONOS="${onosReplicas}"
-    NUM_OF_ATOMIX="${atomixReplicas}"
-    EXTRA_HELM_FLAGS=" "
-    LOG_FOLDER="$WORKSPACE/logs"
-    GERRIT_PROJECT="${GERRIT_PROJECT}"
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-  }
-
-  stages {
-    stage ('Cleanup') {
-      steps {
-        script {
-          try {
-            timeout(time: 5, unit: 'MINUTES') {
-              sh returnStdout: false, script: '''
-              cd $WORKSPACE
-              rm -rf $WORKSPACE/*
-              '''
-              // removing the voltha-infra chart first
-              // if we don't ONOS might get stuck because of all the events when BBSim goes down
-              sh returnStdout: false, script: '''
-              set +x
-              helm del -n infra voltha-infra || true
-              helm del voltha-infra || true
-              echo -ne "\nWaiting for ONOS to be removed..."
-              onos=$(kubectl get pod -n default -l app=onos-classic --no-headers | wc -l)
-              while [[ $onos != 0 ]]; do
-                onos=$(kubectl get pod -n default -l app=onos-classic --no-headers | wc -l)
-                sleep 5
-                echo -ne "."
-              done
-              '''
-            }
-          } catch(org.jenkinsci.plugins.workflow.steps.FlowInterruptedException e) {
-            // if we have a timeout in the Cleanup fase most likely ONOS got stuck somewhere, thuse force remove the pods
-            sh '''
-              kubectl get pods | grep Terminating | awk '{print $1}' | xargs kubectl delete pod --force --grace-period=0
-            '''
-          }
-          timeout(time: 10, unit: 'MINUTES') {
-            script {
-              helmTeardown(["default", "voltha1", "voltha-infra"])
-            }
-            sh returnStdout: false, script: '''
-              helm repo add onf https://charts.opencord.org
-              helm repo update
-
-              # remove all persistent volume claims
-              kubectl delete pvc --all-namespaces --all
-              PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-              while [[ \$PVCS != 0 ]]; do
-                sleep 5
-                PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-              done
-
-              # remove orphaned port-forward from different namespaces
-              ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-            '''
-          }
-        }
-      }
-    }
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${release}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage('Deploy Voltha') {
-      steps {
-        timeout(time: 5, unit: 'MINUTES') {
-          installVoltctl("${release}")
-          script {
-            startComponentsLogs([
-              appsToLog: [
-                'app.kubernetes.io/name=etcd',
-                'app.kubernetes.io/name=kafka',
-                'app=lwc',
-                'app=adapter-open-onu',
-                'app=adapter-open-olt',
-                'app=rw-core',
-                'app=bbsim',
-              ]
-            ])
-          }
-        }
-        timeout(time: 10, unit: 'MINUTES') {
-          sh """
-          cd /home/jenkins/Radisys_LWC_helm_charts
-
-          helm dep update ${lwc_helm_chart_path}/voltha-infra
-          helm upgrade --install --create-namespace -n infra voltha-infra ${lwc_helm_chart_path}/voltha-infra -f examples/${workflow}-values.yaml \
-            -f ${value_file} --wait
-
-          # helm dep update ${lwc_helm_chart_path}/voltha-stack
-          helm upgrade --install --create-namespace -n voltha1 voltha1 onf/voltha-stack \
-          --set voltha.ingress.enabled=true --set voltha.ingress.enableVirtualHosts=true --set voltha.fullHostnameOverride=voltha.scale1.dev \
-          -f ${value_file} --wait
-
-          helm upgrade --install -n voltha1 bbsim0 onf/bbsim --set olt_id=10 -f examples/${workflow}-values.yaml --set pon=${pons},onu=${onus} --version 4.6.0 --set oltRebootDelay=5 --wait
-          """
-        }
-      }
-    }
-    stage('Load MIB Template') {
-      when {
-        expression {
-          return params.withMibTemplate
-        }
-      }
-      steps {
-        sh """
-        # load MIB template
-        wget ${mibTemplateUrl} -O mibTemplate.json
-        cat mibTemplate.json | kubectl exec -it -n infra \$(kubectl get pods -n infra |grep etcd-0 | awk 'NR==1{print \$1}') -- etcdctl put service/voltha/omci_mibs/go_templates/BBSM/12345123451234512345/BBSM_IMG_00001
-        """
-      }
-    }
-    stage('Run Test') {
-      steps {
-        sh """
-          mkdir -p $WORKSPACE/RobotLogs
-          cd $WORKSPACE/voltha-system-tests
-          make vst_venv
-
-          daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward -n infra svc/lwc 8182:8181 --address 0.0.0.0
-          daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward -n voltha1 svc/voltha1-voltha-api 55555 --address 0.0.0.0
-
-          source ./vst_venv/bin/activate
-          robot -d $WORKSPACE/RobotLogs \
-          --exitonfailure \
-          -v pon:${pons} -v onu:${onus} \
-          tests/scale/Voltha_Scale_Tests_lwc.robot
-
-          python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time.txt || true
-          cat $WORKSPACE/execution-time.txt
-        """
-      }
-    }
-  }
-  post {
-    always {
-      stopComponentsLogs()
-      script {
-        try {
-          step([$class: 'RobotPublisher',
-            disableArchiveOutput: false,
-            logFileName: '**/log*.html',
-            otherFiles: '',
-            outputFileName: '**/output*.xml',
-            outputPath: 'RobotLogs',
-            passThreshold: 100,
-            reportFileName: '**/report*.html',
-            onlyCritical: true,
-            unstableThreshold: 0]);
-        } catch (Exception e) {
-            println "Cannot archive Robot Logs: ${e.toString()}"
-        }
-      }
-      plot([
-        csvFileName: 'scale-test.csv',
-        csvSeries: [
-          [file: 'plots/plot-lwc-olts.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-lwc-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-lwc-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-lwc-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-        ],
-        group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus}, UNIs: ${unis})", yaxis: 'Time (s)', useDescr: true
-      ])
-      getPodsInfo("$LOG_FOLDER")
-      archiveArtifacts artifacts: 'execution-time.txt,logs/*,logs/pprof/*,RobotLogs/**/*,plots/*,etcd-metrics/*'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/playground/voltha-scale-multi-stack.groovy b/jjb/pipeline/voltha/playground/voltha-scale-multi-stack.groovy
deleted file mode 100644
index 8420da0..0000000
--- a/jjb/pipeline/voltha/playground/voltha-scale-multi-stack.groovy
+++ /dev/null
@@ -1,468 +0,0 @@
-// Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// deploy VOLTHA using kind-voltha and performs a scale test
-
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-      timeout(time: 120, unit: 'MINUTES')
-  }
-  environment {
-    JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
-    KUBECONFIG="$HOME/.kube/config"
-    SSHPASS="karaf"
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-
-    LOG_FOLDER="$WORKSPACE/logs"
-  }
-
-  stages {
-    stage ('Cleanup') {
-      steps {
-        timeout(time: 11, unit: 'MINUTES') {
-          script {
-            def namespaces = ["infra"]
-            // FIXME we may have leftovers from more VOLTHA stacks (eg: run1 had 10 stacks, run2 had 2 stacks)
-            volthaStacks.toInteger().times {
-              namespaces += "voltha${it + 1}"
-            }
-            helmTeardown(namespaces)
-          }
-          sh returnStdout: false, script: '''
-            helm repo add onf https://charts.opencord.org
-            helm repo update
-
-            # remove all persistent volume claims
-            kubectl delete pvc --all-namespaces --all
-            PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-            while [[ \$PVCS != 0 ]]; do
-              sleep 5
-              PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-            done
-
-            # remove orphaned port-forward from different namespaces
-            ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-
-            cd $WORKSPACE
-            rm -rf $WORKSPACE/*
-          '''
-        }
-      }
-    }
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${release}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage('Deploy common infrastructure') {
-      // includes monitoring
-      steps {
-        sh '''
-        if [ ${withMonitoring} = true ] ; then
-          helm install -n infra nem-monitoring cord/nem-monitoring \
-          -f $HOME/voltha-scale/grafana.yaml \
-          --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
-          --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
-        fi
-        '''
-      }
-    }
-    stage('Start logging') {
-      steps {
-        script {
-          startComponentsLogs([
-            appsToLog: [
-              'app.kubernetes.io/name=etcd',
-              'app.kubernetes.io/name=kafka',
-              'app=onos-classic',
-              'app=adapter-open-onu',
-              'app=adapter-open-olt',
-              'app=rw-core',
-              'app=ofagent',
-              'app=bbsim',
-              'app=radius',
-              'app=bbsim-sadis-server',
-              'app=onos-config-loader',
-            ]
-          ])
-        }
-      }
-    }
-    stage('Deploy VOLTHA infrastructure') {
-      steps {
-        timeout(time: 5, unit: 'MINUTES') {
-          script {
-            def localCharts = false
-            if (volthaHelmChartsChange != "" || release != "master") {
-              localCharts = true
-            }
-
-            def infraHelmFlags =
-                "--set global.log_level=${logLevel} " +
-                "--set radius.enabled=${withEapol} " +
-                "--set onos-classic.onosSshPort=30115 " +
-                "--set onos-classic.onosApiPort=30120 " +
-                params.extraHelmFlags
-
-            volthaInfraDeploy([
-              workflow: workflow,
-              infraNamespace: "infra",
-              extraHelmFlags: infraHelmFlags,
-              localCharts: localCharts,
-              onosReplica: onosReplicas,
-              atomixReplica: atomixReplicas,
-              kafkaReplica: kafkaReplicas,
-              etcdReplica: etcdReplicas,
-            ])
-          }
-        }
-      }
-    }
-    stage('Deploy Voltha') {
-      steps {
-        installVoltctl("${release}")
-        deploy_voltha_stacks(params.volthaStacks)
-      }
-    }
-    stage('Configuration') {
-      steps {
-        script {
-          sh returnStdout: false, script: """
-
-          # forward ETCD port
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG=etcd-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/etcd 9999:2379; done 2>&1 " &
-
-          # forward ONOS ports
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG=onos-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/voltha-infra-onos-classic-hs 8101:8101; done 2>&1 " &
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG=onos-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/voltha-infra-onos-classic-hs 8181:8181; done 2>&1 " &
-
-          # make sure the the port-forward has started before moving forward
-          sleep 5
-          """
-          sh returnStdout: false, script: """
-          # TODO this needs to be repeated per stack
-          # kubectl exec \$(kubectl get pods | grep -E "bbsim[0-9]" | awk 'NR==1{print \$1}') -- bbsimctl log ${logLevel.toLowerCase()} false
-
-          #Setting link discovery
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 900
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.opencord.olt.impl.Olt provisionDelay 1000
-
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.onosproject
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.opencord
-
-          # Set Flows/Ports/Meters poll frequency
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
-
-          #SR is not needed in scale tests and not currently used by operators in production, can be disabled.
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.onosproject.segmentrouting
-
-
-          if [ ${withFlows} = false ]; then
-            sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.olt
-          fi
-          """
-        }
-      }
-    }
-    stage('Setup Test') {
-      steps {
-        sh '''
-          mkdir -p $WORKSPACE/RobotLogs
-          cd $WORKSPACE/voltha-system-tests
-          make vst_venv
-        '''
-      }
-    }
-    stage('Run Test') {
-      steps {
-        test_voltha_stacks(params.volthaStacks)
-      }
-    }
-  }
-  post {
-    always {
-      stopComponentsLogs([compress: true])
-      // collect result, done in the "post" step so it's executed even in the
-      // event of a timeout in the tests
-      plot([
-        csvFileName: 'scale-test.csv',
-        csvSeries: [
-          [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-        ],
-        group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (Stacks: ${params.volthaStacks}, OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus})", yaxis: 'Time (s)', useDescr: true
-      ])
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: 'RobotLogs/**/log.html',
-        otherFiles: '',
-        outputFileName: 'RobotLogs/**/output.xml',
-        outputPath: '.',
-        passThreshold: 100,
-        reportFileName: 'RobotLogs/**/report.html',
-        onlyCritical: true,
-        unstableThreshold: 0]);
-      // get all the logs from kubernetes PODs
-      sh returnStdout: false, script: '''
-
-        # store information on running charts
-        helm ls --all-namespaces > $LOG_FOLDER/helm-list.txt || true
-
-        # store information on the running pods
-        kubectl get pods --all-namespaces -o wide > $LOG_FOLDER/pods.txt || true
-        kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
-        kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
-
-        # copy the ONOS logs directly from the container to avoid the color codes
-        printf '%s\n' $(kubectl get pods -n infra -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp -n infra #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
-
-      '''
-      // dump all the BBSim(s) ONU information
-      script {
-        for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
-          stack_ns="voltha"+i
-          sh """
-          mkdir -p \$LOG_FOLDER/${stack_ns}
-          BBSIM_IDS=\$(kubectl -n ${stack_ns} get pods | grep bbsim | grep -v server | awk '{print \$1}')
-          IDS=(\$BBSIM_IDS)
-
-          for bbsim in "\${IDS[@]}"
-          do
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl onu list > \$LOG_FOLDER/${stack_ns}/\$bbsim-device-list.txt || true
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl service list > \$LOG_FOLDER/${stack_ns}/\$bbsim-service-list.txt || true
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl olt resources GEM_PORT > \$LOG_FOLDER/${stack_ns}/\$bbsim-flows-gem-ports.txt || true
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl olt resources ALLOC_ID > \$LOG_FOLDER/${stack_ns}/\$bbsim-flows-alloc-ids.txt || true
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl olt pons > \$LOG_FOLDER/${stack_ns}/\$bbsim-pon-resources.txt || true
-          done
-          """
-        }
-      }
-      // get ONOS debug infos
-      sh '''
-
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt || true
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt || true
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt || true
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt || true
-
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 netcfg > $LOG_FOLDER/onos-netcfg.txt || true
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg get > $LOG_FOLDER/onos-cfg.txt || true
-
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt || true
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt || true
-
-        if [ ${withFlows} = true ] ; then
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt || true
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-port-status > $LOG_FOLDER/onos-volt-port-status.txt || true
-        fi
-
-        if [ ${provisionSubscribers} = true ]; then
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt || true
-        fi
-
-        if [ ${withEapol} = true ] ; then
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt || true
-        fi
-
-        if [ ${withDhcp} = true ] ; then
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt || true
-        fi
-      '''
-      // collect etcd metrics
-      sh '''
-        mkdir -p $WORKSPACE/etcd-metrics
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-backend-write-time.json || true
-      '''
-      // get VOLTHA debug infos
-      script {
-        for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
-          stack_ns="voltha"+i
-          voltcfg="~/.volt/config-voltha"+i
-          try {
-            sh """
-
-            # _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555& > /dev/null 2>&1
-            _TAG="voltha-port-forward" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555 > /dev/null 2>&1; done"&
-
-            voltctl -m 32MB device list -o json > $LOG_FOLDER/${stack_ns}/device-list.json || true
-            python -m json.tool $LOG_FOLDER/${stack_ns}/device-list.json > $LOG_FOLDER/${stack_ns}/voltha-devices-list.json || true
-            rm $LOG_FOLDER/${stack_ns}/device-list.json || true
-            voltctl -m 32MB device list > $LOG_FOLDER/${stack_ns}/voltha-devices-list.txt || true
-
-            DEVICE_LIST=
-            printf '%s\n' \$(voltctl -m 32MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB device flows # > $LOG_FOLDER/${stack_ns}/voltha-device-flows-#.txt" || true
-            printf '%s\n' \$(voltctl -m 32MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/${stack_ns}/voltha-device-ports-#.txt" || true
-
-            printf '%s\n' \$(voltctl -m 32MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB logicaldevice flows # > $LOG_FOLDER/${stack_ns}/voltha-logicaldevice-flows-#.txt" || true
-            printf '%s\n' \$(voltctl -m 32MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB logicaldevice port list # > $LOG_FOLDER/${stack_ns}/voltha-logicaldevice-ports-#.txt" || true
-
-            # remove VOLTHA port-forward
-            ps aux | grep port-forw | grep voltha-api | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
-            """
-          } catch(e) {
-            println e
-            sh '''
-            echo "Can't get device list from voltctl"
-            '''
-          }
-        }
-      }
-      // get cpu usage by container
-      sh '''
-      if [ ${withMonitoring} = true ] ; then
-        cd $WORKSPACE/voltha-system-tests
-        source ./vst_venv/bin/activate
-        sleep 60 # we have to wait for prometheus to collect all the information
-        python scripts/sizing.py -o $WORKSPACE/plots || true
-      fi
-      '''
-      archiveArtifacts artifacts: 'kind-voltha/install-*.log,execution-time-*.txt,logs/**/*.txt,logs/**/*.tar.gz,logs/**/*.tgz,RobotLogs/**/*,plots/*,etcd-metrics/*'
-    }
-  }
-}
-
-def deploy_voltha_stacks(numberOfStacks) {
-  for (int i = 1; i <= numberOfStacks.toInteger(); i++) {
-    timeout(time: 5, unit: 'MINUTES') {
-      stage("Deploy VOLTHA stack " + i) {
-
-        def localCharts = false
-        if (volthaHelmChartsChange != "" || release != "master") {
-          localCharts = true
-        }
-
-        def volthaHelmFlags =
-                "--set global.log_level=${logLevel} " +
-                "--set enablePerf=true,onu=${onus},pon=${pons} " +
-                "--set securityContext.enabled=false " +
-                params.extraHelmFlags
-
-        volthaStackDeploy([
-          bbsimReplica: olts.toInteger(),
-          infraNamespace: "infra",
-          volthaNamespace: "voltha${i}",
-          stackName: "voltha${i}",
-          stackId: i,
-          workflow: workflow,
-          extraHelmFlags: volthaHelmFlags,
-          localCharts: localCharts,
-          onosReplica: onosReplicas,
-        ])
-      }
-    }
-  }
-}
-
-def test_voltha_stacks(numberOfStacks) {
-  for (int i = 1; i <= numberOfStacks.toInteger(); i++) {
-    stage("Test VOLTHA stack " + i) {
-      timeout(time: 15, unit: 'MINUTES') {
-        sh """
-
-        # we are restarting the voltha-api port-forward for each stack, no need to have a different voltconfig file
-        voltctl -s 127.0.0.1:55555 config > $HOME/.volt/config
-        export VOLTCONFIG=$HOME/.volt/config
-
-        # _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555& > /dev/null 2>&1
-        _TAG="voltha-port-forward" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555 > /dev/null 2>&1; done"&
-
-        # wait a bit to make sure the port-forwarding has started
-        sleep 5
-
-
-          ROBOT_PARAMS="-v stackId:${i} \
-            -v olt:${olts} \
-            -v pon:${pons} \
-            -v onu:${onus} \
-            -v workflow:${workflow} \
-            -v withEapol:${withEapol} \
-            -v withDhcp:${withDhcp} \
-            -v withIgmp:${withIgmp} \
-            --noncritical non-critical \
-            -e igmp \
-            -e onu-upgrade \
-            -e teardown "
-
-          if [ ${withEapol} = false ] ; then
-            ROBOT_PARAMS+="-e authentication "
-          fi
-
-          if [ ${withDhcp} = false ] ; then
-            ROBOT_PARAMS+="-e dhcp "
-          fi
-
-          if [ ${provisionSubscribers} = false ] ; then
-            # if we're not considering subscribers then we don't care about authentication and dhcp
-            ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
-          fi
-
-          if [ ${withFlows} = false ] ; then
-            ROBOT_PARAMS+="-i setup -i activation "
-          fi
-
-          cd $WORKSPACE/voltha-system-tests
-          source ./vst_venv/bin/activate
-          robot -d $WORKSPACE/RobotLogs/voltha${i} \
-          \$ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
-
-          # collect results
-          python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/voltha${i}/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time-voltha${i}.txt || true
-          cat $WORKSPACE/execution-time-voltha${i}.txt
-        """
-        sh """
-          # remove VOLTHA port-forward
-          ps aux | grep port-forw | grep voltha-api | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 2>&1 > /dev/null || true
-        """
-      }
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/playground/voltha-scale-test.groovy b/jjb/pipeline/voltha/playground/voltha-scale-test.groovy
deleted file mode 100644
index 88d6070..0000000
--- a/jjb/pipeline/voltha/playground/voltha-scale-test.groovy
+++ /dev/null
@@ -1,933 +0,0 @@
-// Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// deploy VOLTHA and performs a scale test
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-// this function generates the correct parameters for ofAgent
-// to connect to multiple ONOS instances
-def ofAgentConnections(numOfOnos, releaseName, namespace) {
-    def params = " "
-    numOfOnos.times {
-        params += "--set voltha.services.controller[${it}].address=${releaseName}-onos-classic-${it}.${releaseName}-onos-classic-hs.${namespace}.svc:6653 "
-    }
-    return params
-}
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-      timeout(time: 60, unit: 'MINUTES')
-  }
-  environment {
-    JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
-    KUBECONFIG="$HOME/.kube/config"
-    VOLTCONFIG="$HOME/.volt/config"
-    SSHPASS="karaf"
-    VOLTHA_LOG_LEVEL="${logLevel}"
-    NUM_OF_BBSIM="${olts}"
-    NUM_OF_OPENONU="${openonuAdapterReplicas}"
-    NUM_OF_ONOS="${onosReplicas}"
-    NUM_OF_ATOMIX="${atomixReplicas}"
-    EXTRA_HELM_FLAGS=" "
-    LOG_FOLDER="$WORKSPACE/logs"
-    GERRIT_PROJECT="${GERRIT_PROJECT}"
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-  }
-
-  stages {
-    stage ('Cleanup') {
-      steps {
-        script {
-          try {
-            timeout(time: 5, unit: 'MINUTES') {
-              sh returnStdout: false, script: '''
-              cd $WORKSPACE
-              rm -rf $WORKSPACE/*
-              '''
-              // removing the voltha-infra chart first
-              // if we don't ONOS might get stuck because of all the events when BBSim goes down
-              sh returnStdout: false, script: '''
-              set +x
-              helm del voltha-infra || true
-              echo -ne "\nWaiting for ONOS to be removed..."
-              onos=$(kubectl get pod -n default -l app=onos-classic --no-headers | wc -l)
-              while [[ $onos != 0 ]]; do
-                onos=$(kubectl get pod -n default -l app=onos-classic --no-headers | wc -l)
-                sleep 5
-                echo -ne "."
-              done
-              '''
-            }
-          } catch(org.jenkinsci.plugins.workflow.steps.FlowInterruptedException e) {
-            // if we have a timeout in the Cleanup fase most likely ONOS got stuck somewhere, thuse force remove the pods
-            sh '''
-              kubectl get pods | grep Terminating | awk '{print $1}' | xargs kubectl delete pod --force --grace-period=0
-            '''
-          }
-          timeout(time: 10, unit: 'MINUTES') {
-            script {
-              helmTeardown(["default", "voltha1", "infra"])
-            }
-            sh returnStdout: false, script: '''
-              helm repo add onf https://charts.opencord.org
-              helm repo update
-
-              # remove all persistent volume claims
-              kubectl delete pvc --all-namespaces --all
-              PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-              while [[ \$PVCS != 0 ]]; do
-                sleep 5
-                PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-              done
-
-              # remove orphaned port-forward from different namespaces
-              ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-            '''
-          }
-        }
-      }
-    }
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${release}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage('Build patch') {
-      when {
-        expression {
-          return params.GERRIT_PROJECT
-        }
-      }
-      steps {
-        sh """
-        git clone https://\$GERRIT_HOST/\$GERRIT_PROJECT
-        cd \$GERRIT_PROJECT
-        git fetch https://\$GERRIT_HOST/\$GERRIT_PROJECT \$GERRIT_REFSPEC && git checkout FETCH_HEAD
-
-        DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-build
-        DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-push
-        """
-      }
-    }
-    stage('Deploy common infrastructure') {
-      steps {
-        sh '''
-        if [ ${withMonitoring} = true ] ; then
-          helm install nem-monitoring onf/nem-monitoring \
-          -f $HOME/voltha-scale/grafana.yaml \
-          --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
-          --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
-        fi
-        '''
-      }
-    }
-    stage('Deploy Voltha') {
-      steps {
-        timeout(time: 10, unit: 'MINUTES') {
-          installVoltctl("${release}")
-          script {
-            startComponentsLogs([
-              appsToLog: [
-                'app.kubernetes.io/name=etcd',
-                'app.kubernetes.io/name=kafka',
-                'app=voltha-infra-atomix',
-                'app=onos-classic',
-                'app=adapter-open-onu',
-                'app=adapter-open-olt',
-                'app=rw-core',
-                'app=ofagent',
-                'app=bbsim',
-                'app=radius',
-                'app=bbsim-sadis-server',
-                'app=onos-config-loader',
-              ]
-            ])
-            def returned_flags = sh (returnStdout: true, script: """
-
-              export EXTRA_HELM_FLAGS+=' '
-
-              # BBSim custom image handling
-              if [ '${bbsimImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'bbsim' ]; then
-                IFS=: read -r bbsimRepo bbsimTag <<< '${bbsimImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set images.bbsim.repository=\$bbsimRepo,images.bbsim.tag=\$bbsimTag "
-              fi
-
-              # VOLTHA custom image handling
-              if [ '${rwCoreImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-go' ]; then
-                IFS=: read -r rwCoreRepo rwCoreTag <<< '${rwCoreImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set voltha.images.rw_core.repository=\$rwCoreRepo,voltha.images.rw_core.tag=\$rwCoreTag "
-              fi
-
-              # ofAgent custom image handling
-              if [ '${ofAgentImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'of-agent' ]; then
-                IFS=: read -r ofAgentRepo ofAgentTag <<< '${ofAgentImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set voltha.images.ofagent.repository=\$ofAgentRepo,voltha.images.ofagent.tag=\$ofAgentTag "
-              fi
-
-              # OpenOLT custom image handling
-              if [ '${openoltAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openolt-adapter' ]; then
-                IFS=: read -r openoltAdapterRepo openoltAdapterTag <<< '${openoltAdapterImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set voltha-adapter-openolt.images.adapter_open_olt.repository=\$openoltAdapterRepo,voltha-adapter-openolt.images.adapter_open_olt.tag=\$openoltAdapterTag "
-              fi
-
-              # OpenONU custom image handling
-              if [ '${openonuAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter' ]; then
-                IFS=: read -r openonuAdapterRepo openonuAdapterTag <<< '${openonuAdapterImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set voltha-adapter-openonu.images.adapter_open_onu.repository=\$openonuAdapterRepo,voltha-adapter-openonu.images.adapter_open_onu.tag=\$openonuAdapterTag "
-              fi
-
-              # OpenONU GO custom image handling
-              if [ '${openonuAdapterGoImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter-go' ]; then
-                IFS=: read -r openonuAdapterGoRepo openonuAdapterGoTag <<< '${openonuAdapterGoImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set voltha-adapter-openonu.images.adapter_open_onu_go.repository=\$openonuAdapterGoRepo,voltha-adapter-openonu.images.adapter_open_onu_go.tag=\$openonuAdapterGoTag "
-              fi
-
-              # ONOS custom image handling
-              if [ '${onosImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-onos' ]; then
-                IFS=: read -r onosRepo onosTag <<< '${onosImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set onos-classic.image.repository=\$onosRepo,onos-classic.image.tag=\$onosTag "
-              fi
-
-              # set BBSim parameters
-              EXTRA_HELM_FLAGS+='--set enablePerf=true,pon=${pons},onu=${onus},uni=${unis} '
-
-              # disable the securityContext, this is a development cluster
-              EXTRA_HELM_FLAGS+='--set securityContext.enabled=false '
-
-              # No persistent-volume-claims in Atomix
-              EXTRA_HELM_FLAGS+="--set onos-classic.atomix.persistence.enabled=false "
-
-              # Use custom built images
-
-              if [ '\$GERRIT_PROJECT' == 'voltha-go' ]; then
-                EXTRA_HELM_FLAGS+="--set voltha.images.rw_core.repository=${dockerRegistry}/voltha/voltha-rw-core,voltha.images.rw_core.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'voltha-openolt-adapter' ]; then
-                EXTRA_HELM_FLAGS+="--set voltha-openolt-adapter.images.adapter_open_olt.repository=${dockerRegistry}/voltha/voltha-openolt-adapter,voltha-openolt-adapter.images.adapter_open_olt.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter' ]; then
-                EXTRA_HELM_FLAGS+="--set voltha-openonu-adapter.images.adapter_open_onu.repository=${dockerRegistry}/voltha/voltha-openonu-adapter,voltha-openonu-adapter.images.adapter_open_onu.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter-go' ]; then
-                EXTRA_HELM_FLAGS+="--set voltha-openonu-adapter-go.images.adapter_open_onu_go.repository=${dockerRegistry}/voltha/voltha-openonu-adapter-go,voltha-openonu-adapter-go.images.adapter_open_onu_go.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'ofagent-go' ]; then
-                EXTRA_HELM_FLAGS+="--set voltha.images.ofagent.repository=${dockerRegistry}/voltha/voltha-ofagent-go,ofagent-go.images.ofagent.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'voltha-onos' ]; then
-                EXTRA_HELM_FLAGS+="--set onos-classic.image.repository=${dockerRegistry}/voltha/voltha-onos,onos-classic.image.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'bbsim' ]; then
-                EXTRA_HELM_FLAGS+="--set images.bbsim.repository=${dockerRegistry}/voltha/bbsim,images.bbsim.tag=voltha-scale "
-              fi
-              echo \$EXTRA_HELM_FLAGS
-
-            """).trim()
-
-            def extraHelmFlags = returned_flags
-            // The added space before params.extraHelmFlags is required due to the .trim() above
-            def infraHelmFlags =
-              "--set global.log_level=${logLevel} " +
-              "--set radius.enabled=${withEapol} " +
-              "--set onos-classic.onosSshPort=30115 " +
-              "--set onos-classic.onosApiPort=30120 " +
-              extraHelmFlags + " " + params.extraHelmFlags
-
-            println "Passing the following parameters to the VOLTHA infra deploy: ${infraHelmFlags}."
-
-            def localCharts = false
-            if (volthaHelmChartsChange != "") {
-              localCharts = true
-            }
-
-            volthaInfraDeploy([
-              workflow: workflow,
-              infraNamespace: "default",
-              extraHelmFlags: infraHelmFlags,
-              localCharts: localCharts,
-              onosReplica: onosReplicas,
-              atomixReplica: atomixReplicas,
-              kafkaReplica: kafkaReplicas,
-              etcdReplica: etcdReplicas,
-            ])
-
-            stackHelmFlags = " --set onu=${onus},pon=${pons},uni=${unis} --set global.log_level=${logLevel.toLowerCase()} "
-            stackHelmFlags += " --set voltha.ingress.enabled=true --set voltha.ingress.enableVirtualHosts=true --set voltha.fullHostnameOverride=voltha.scale1.dev "
-            stackHelmFlags += extraHelmFlags + " " + params.extraHelmFlags
-
-            volthaStackDeploy([
-              bbsimReplica: olts.toInteger(),
-              infraNamespace: "default",
-              volthaNamespace: "default",
-              stackName: "voltha1", // TODO support custom charts
-              workflow: workflow,
-              extraHelmFlags: stackHelmFlags,
-              localCharts: localCharts,
-              onosReplica: onosReplicas,
-            ])
-            sh """
-              set +x
-
-              echo -ne "\nWaiting for VOLTHA and ONOS to start..."
-              voltha=\$(kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
-              onos=\$(kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l)
-              while [[ \$voltha != 0 || \$onos != 0 ]]; do
-                sleep 5
-                echo -ne "."
-                voltha=\$(kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
-                onos=\$(kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l)
-              done
-              echo -ne "\nVOLTHA and ONOS pods ready\n"
-              kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l
-              kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l
-            """
-            start_port_forward(olts)
-          }
-        }
-      }
-    }
-    stage('Configuration') {
-      steps {
-        script {
-          setOnosLogLevels([
-              onosNamespace: "default",
-              apps: [
-                'org.opencord.dhcpl2relay',
-                'org.opencord.olt',
-                'org.opencord.aaa',
-                'org.opencord.maclearner',
-                'org.onosproject.net.flowobjective.impl.FlowObjectiveManager',
-                'org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager'
-              ],
-              logLevel: logLevel
-          ])
-          def tech_prof_directory = "XGS-PON"
-          sh returnStdout: false, script: """
-          #Setting link discovery
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
-
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
-
-
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 900
-
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
-
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.opencord.olt.impl.Olt provisionDelay 1000
-
-          # BBSim logs at debug level don't slow down the system much and are very helpful while troubleshooting
-          BBSIM_IDS=\$(kubectl get pods | grep bbsim | grep -v server | awk '{print \$1}')
-          IDS=(\$BBSIM_IDS)
-
-          for bbsim in "\${IDS[@]}"
-          do
-            kubectl exec -t \$bbsim -- bbsimctl log debug false
-          done
-
-          # Set Flows/Ports/Meters/Groups poll frequency
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.group.impl.OpenFlowGroupProvider groupPollInterval ${onosGroupInterval}
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.FlowObjectiveManager numThreads ${flowObjWorkerThreads}
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager objectiveTimeoutMs 300000
-
-          if [ ${withFlows} = false ]; then
-            sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 app deactivate org.opencord.olt
-          fi
-
-          if [ '${workflow}' = 'tt' ]; then
-            etcd_container=\$(kubectl get pods --all-namespaces | grep etcd-0 | awk 'NR==1{print \$2}')
-            kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-HSIA.json \$etcd_container:/tmp/hsia.json
-            put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64')
-            kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-VoIP.json \$etcd_container:/tmp/voip.json
-            put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65')
-            kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST-AdditionalBW-NA.json \$etcd_container:/tmp/mcast.json
-            put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66')
-          fi
-
-          if [ ${withPcap} = true ] ; then
-            # Start the tcp-dump in ofagent
-            export OF_AGENT=\$(kubectl get pods -l app=ofagent -o name)
-            kubectl exec \$OF_AGENT -- apk update
-            kubectl exec \$OF_AGENT -- apk add tcpdump
-            _TAG=ofagent-tcpdump kubectl exec \$OF_AGENT -- tcpdump -nei eth0 -w out.pcap&
-
-            # Start the tcp-dump in radius
-            export RADIUS=\$(kubectl get pods -l app=radius -o name)
-            kubectl exec \$RADIUS -- apt-get update
-            kubectl exec \$RADIUS -- apt-get install -y tcpdump
-            _TAG=radius-tcpdump kubectl exec \$RADIUS -- tcpdump -w out.pcap&
-
-            # Start the tcp-dump in ONOS
-            for i in \$(seq 0 \$ONOSES); do
-              INSTANCE="onos-onos-classic-\$i"
-              kubectl exec \$INSTANCE -- apt-get update
-              kubectl exec \$INSTANCE -- apt-get install -y tcpdump
-              kubectl exec \$INSTANCE -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
-              _TAG=\$INSTANCE kubectl exec \$INSTANCE -- /usr/bin/tcpdump -nei eth0 port 1812 -w out.pcap&
-            done
-          fi
-          """
-        }
-      }
-    }
-    stage('Load MIB Template') {
-      when {
-        expression {
-          return params.withMibTemplate
-        }
-      }
-      steps {
-        sh """
-        # load MIB template
-        wget ${mibTemplateUrl} -O mibTemplate.json
-        cat mibTemplate.json | kubectl exec -it \$(kubectl get pods |grep etcd-0 | awk 'NR==1{print \$1}') -- etcdctl put service/voltha/omci_mibs/go_templates/BBSM/v0.0.1/BBSM_IMG_00001
-        """
-      }
-    }
-    stage('Run Test') {
-      steps {
-        sh '''
-          mkdir -p $WORKSPACE/RobotLogs
-          cd $WORKSPACE/voltha-system-tests
-          make vst_venv
-        '''
-        sh '''
-          if [ ${withProfiling} = true ] ; then
-            mkdir -p $LOG_FOLDER/pprof
-            echo $PATH
-            #Creating Python script for ONU Detection
-            cat << EOF > $WORKSPACE/pprof.sh
-timestamp() {
-  date +"%T"
-}
-
-i=0
-while [[ true ]]; do
-  ((i++))
-  ts=$(timestamp)
-  go tool pprof -png http://127.0.0.1:6060/debug/pprof/heap > $LOG_FOLDER/pprof/rw-core-heap-\\$i-\\$ts.png
-  go tool pprof -png http://127.0.0.1:6060/debug/pprof/goroutine > $LOG_FOLDER/pprof/rw-core-goroutine-\\$i-\\$ts.png
-  curl -o $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof http://127.0.0.1:6060/debug/pprof/profile?seconds=10
-  go tool pprof -png $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.png
-
-  go tool pprof -png http://127.0.0.1:6061/debug/pprof/heap > $LOG_FOLDER/pprof/openolt-heap-\\$i-\\$ts.png
-  go tool pprof -png http://127.0.0.1:6061/debug/pprof/goroutine > $LOG_FOLDER/pprof/openolt-goroutine-\\$i-\\$ts.png
-  curl -o $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof http://127.0.0.1:6061/debug/pprof/profile?seconds=10
-  go tool pprof -png $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.png
-
-  go tool pprof -png http://127.0.0.1:6062/debug/pprof/heap > $LOG_FOLDER/pprof/ofagent-heap-\\$i-\\$ts.png
-  go tool pprof -png http://127.0.0.1:6062/debug/pprof/goroutine > $LOG_FOLDER/pprof/ofagent-goroutine-\\$i-\\$ts.png
-  curl -o $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof http://127.0.0.1:6062/debug/pprof/profile?seconds=10
-  go tool pprof -png $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.png
-
-  sleep 10
-done
-EOF
-
-            _TAG="pprof"
-            _TAG=$_TAG bash $WORKSPACE/pprof.sh &
-          fi
-        '''
-        timeout(time: "${testTimeout.toInteger() + 5}", unit: 'MINUTES') {
-          sh '''
-            ROBOT_PARAMS="--exitonfailure \
-              -v olt:${olts} \
-              -v pon:${pons} \
-              -v onu:${onus} \
-              -v ONOS_SSH_PORT:30115 \
-              -v ONOS_REST_PORT:30120 \
-              -v workflow:${workflow} \
-              -v withEapol:${withEapol} \
-              -v withDhcp:${withDhcp} \
-              -v withIgmp:${withIgmp} \
-              -v timeout:${testTimeout}m \
-              -v withMaclearning:${withMaclearning} \
-              --noncritical non-critical \
-              -e onu-upgrade -e igmp -e teardown "
-
-            if [ ${withEapol} = false ] ; then
-              ROBOT_PARAMS+="-e authentication "
-            fi
-
-            if [ ${withDhcp} = false ] ; then
-              ROBOT_PARAMS+="-e dhcp "
-            fi
-
-            if [ ${provisionSubscribers} = false ] ; then
-              # if we're not considering subscribers then we don't care about authentication and dhcp
-              ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
-            fi
-
-            if [ ${withFlows} = false ] ; then
-              ROBOT_PARAMS+="-i setup -i activation "
-            fi
-
-            if [ ${withOnuUpgrade} = true ] ; then
-              ROBOT_PARAMS+="-e flow-before "
-            fi
-
-            cd $WORKSPACE/voltha-system-tests
-            source ./vst_venv/bin/activate
-            robot -d $WORKSPACE/RobotLogs \
-            $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
-
-            python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time.txt || true
-            cat $WORKSPACE/execution-time.txt
-          '''
-        }
-      }
-    }
-    stage('Run ONU Upgrade Tests') {
-      environment {
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/OnuUpgradeTests"
-      }
-      when {
-        expression {
-          return params.withOnuUpgrade
-        }
-      }
-      options {
-          timeout(time: "${testTimeout.toInteger() + 1}", unit: 'MINUTES')
-      }
-      steps {
-        sh '''
-          set +e
-          mkdir -p $ROBOT_LOGS_DIR
-          cd $WORKSPACE/voltha-system-tests
-          make vst_venv
-        '''
-        script {
-          Exception caughtException = null
-
-          catchError(buildResult: 'SUCCESS', stageResult: 'ABORTED') {
-            try {
-              sh '''
-                ROBOT_PARAMS="--exitonfailure \
-                  -v olt:${olts} \
-                  -v pon:${pons} \
-                  -v onu:${onus} \
-                  -v workflow:${workflow} \
-                  -v withEapol:${withEapol} \
-                  -v withDhcp:${withDhcp} \
-                  -v withIgmp:${withIgmp} \
-                  -v timeout:${testTimeout}m \
-                  -v withMaclearning:${withMaclearning}
-                  -v image_version:BBSM_IMG_00002 \
-                  -v image_url:http://bbsim0:50074/images/software-image.img \
-                  -v image_vendor:BBSM \
-                  -v image_activate_on_success:false \
-                  -v image_commit_on_success:false \
-                  -v image_crc:0 \
-                  -v ONOS_SSH_PORT:30115 \
-                  -v ONOS_REST_PORT:30120 \
-                  --noncritical non-critical \
-                  -i onu-upgrade \
-                  -e setup -e activation -e flow-before \
-                  -e authentication -e provision -e flow-after \
-                  -e dhcp -e igmp -e teardown "
-                cd $WORKSPACE/voltha-system-tests
-                source ./vst_venv/bin/activate
-                robot -d $ROBOT_LOGS_DIR \
-                $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
-              '''
-            } catch (org.jenkinsci.plugins.workflow.steps.FlowInterruptedException e) {
-              // if the error is a timeout don't mark the build as failed
-              println "ONU Upgrade test timed out"
-            } catch (Throwable e) {
-              caughtException = e
-            }
-          }
-
-          if (caughtException) {
-            error caughtException.message
-          }
-        }
-      }
-    }
-    stage('Run Igmp Tests') {
-      environment {
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/IgmpTests"
-      }
-      when {
-        expression {
-          return params.withIgmp
-        }
-      }
-      options {
-          timeout(time: "${testTimeout.toInteger() + 1}", unit: 'MINUTES')
-      }
-      steps {
-        sh returnStdout: false, script: """
-          # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.onosproject.store.group.impl
-        """
-        sh '''
-          set +e
-          mkdir -p $ROBOT_LOGS_DIR
-          cd $WORKSPACE/voltha-system-tests
-          make vst_venv
-        '''
-        script {
-          Exception caughtException = null
-
-          catchError(buildResult: 'SUCCESS', stageResult: 'ABORTED') {
-            try {
-              sh '''
-                ROBOT_PARAMS="--exitonfailure \
-                  -v olt:${olts} \
-                  -v pon:${pons} \
-                  -v onu:${onus} \
-                  -v workflow:${workflow} \
-                  -v withEapol:${withEapol} \
-                  -v withDhcp:${withDhcp} \
-                  -v withIgmp:${withIgmp} \
-                  -v timeout:${testTimeout}m \
-                  -v withMaclearning:${withMaclearning}
-                  -v ONOS_SSH_PORT:30115 \
-                  -v ONOS_REST_PORT:30120 \
-                  --noncritical non-critical \
-                  -i igmp \
-                  -e setup -e activation -e flow-before \
-                  -e authentication -e provision -e flow-after \
-                  -e dhcp -e onu-upgrade -e teardown "
-                cd $WORKSPACE/voltha-system-tests
-                source ./vst_venv/bin/activate
-                robot -d $ROBOT_LOGS_DIR \
-                $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
-              '''
-            } catch (org.jenkinsci.plugins.workflow.steps.FlowInterruptedException e) {
-              // if the error is a timeout don't mark the build as failed
-              println "IGMP test timed out"
-            } catch (Throwable e) {
-              caughtException = e
-            }
-          }
-
-          if (caughtException) {
-            error caughtException.message
-          }
-        }
-      }
-    }
-    stage("Device removal") {
-      options {
-          timeout(time: "${testTimeout.toInteger() + 5}", unit: 'MINUTES')
-      }
-      steps {
-        sh '''
-          set +e
-          mkdir -p $ROBOT_LOGS_DIR
-          cd $WORKSPACE/voltha-system-tests
-          make vst_venv
-        '''
-        script {
-          Exception caughtException = null
-
-          catchError(buildResult: 'SUCCESS', stageResult: 'ABORTED') {
-            try {
-              sh '''
-                ROBOT_PARAMS="--exitonfailure \
-                  -v olt:${olts} \
-                  -v pon:${pons} \
-                  -v onu:${onus} \
-                  -v ONOS_SSH_PORT:30115 \
-                  -v ONOS_REST_PORT:30120 \
-                  -v workflow:${workflow} \
-                  -v withEapol:${withEapol} \
-                  -v withDhcp:${withDhcp} \
-                  -v withIgmp:${withIgmp} \
-                  -v timeout:${testTimeout}m \
-                  -v withMaclearning:${withMaclearning} \
-                  --noncritical non-critical \
-                  -i teardown"
-
-                  cd $WORKSPACE/voltha-system-tests
-                  source ./vst_venv/bin/activate
-                  robot -d $WORKSPACE/RobotLogs \
-                  $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
-                '''
-            } catch (org.jenkinsci.plugins.workflow.steps.FlowInterruptedException e) {
-              // if the error is a timeout don't mark the build as failed
-              println "Cleanup test timed out"
-            } catch (Throwable e) {
-              caughtException = e
-            }
-          }
-        }
-      }
-    }
-  }
-  post {
-    always {
-      stopComponentsLogs()
-      // collect result, done in the "post" step so it's executed even in the
-      // event of a timeout in the tests
-      sh '''
-        if [ ${withPcap} = true ] ; then
-          # stop ofAgent tcpdump
-          P_ID="\$(ps e -ww -A | grep "_TAG=ofagent-tcpdump" | grep -v grep | awk '{print \$1}')"
-          if [ -n "\$P_ID" ]; then
-            kill -9 \$P_ID
-          fi
-
-          # stop radius tcpdump
-          P_ID="\$(ps e -ww -A | grep "_TAG=radius-tcpdump" | grep -v grep | awk '{print \$1}')"
-          if [ -n "\$P_ID" ]; then
-            kill -9 \$P_ID
-          fi
-
-          # stop onos tcpdump
-          LIMIT=$(($NUM_OF_ONOS - 1))
-          for i in $(seq 0 $LIMIT); do
-            INSTANCE="onos-onos-classic-$i"
-            P_ID="\$(ps e -ww -A | grep "_TAG=$INSTANCE" | grep -v grep | awk '{print \$1}')"
-            if [ -n "\$P_ID" ]; then
-              kill -9 \$P_ID
-            fi
-          done
-
-          # copy the file
-          export OF_AGENT=$(kubectl get pods -l app=ofagent | awk 'NR==2{print $1}') || true
-          kubectl cp $OF_AGENT:out.pcap $LOG_FOLDER/ofagent.pcap || true
-
-          export RADIUS=$(kubectl get pods -l app=radius | awk 'NR==2{print $1}') || true
-          kubectl cp $RADIUS:out.pcap $LOG_FOLDER/radius.pcap || true
-
-          LIMIT=$(($NUM_OF_ONOS - 1))
-          for i in $(seq 0 $LIMIT); do
-            INSTANCE="onos-onos-classic-$i"
-            kubectl cp $INSTANCE:out.pcap $LOG_FOLDER/$INSTANCE.pcap || true
-          done
-        fi
-      '''
-      sh '''
-        if [ ${withProfiling} = true ] ; then
-          _TAG="pprof"
-          P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
-          if [ -n "$P_IDS" ]; then
-            echo $P_IDS
-            for P_ID in $P_IDS; do
-              kill -9 $P_ID
-            done
-          fi
-        fi
-      '''
-      plot([
-        csvFileName: 'scale-test.csv',
-        csvSeries: [
-          [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-        ],
-        group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus}, UNIs: ${unis})", yaxis: 'Time (s)', useDescr: true
-      ])
-      script {
-        try {
-          step([$class: 'RobotPublisher',
-            disableArchiveOutput: false,
-            logFileName: '**/log*.html',
-            otherFiles: '',
-            outputFileName: '**/output*.xml',
-            outputPath: 'RobotLogs',
-            passThreshold: 100,
-            reportFileName: '**/report*.html',
-            onlyCritical: true,
-            unstableThreshold: 0]);
-        } catch (Exception e) {
-            println "Cannot archive Robot Logs: ${e.toString()}"
-        }
-      }
-
-      getPodsInfo("$LOG_FOLDER")
-      // get all the logs from kubernetes PODs
-      sh returnStdout: false, script: '''
-        # copy the ONOS logs directly from the container to avoid the color codes
-        printf '%s\n' $(kubectl get pods -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
-
-        # get ONOS cfg from the 3 nodes
-        # kubectl exec -t voltha-infra-onos-classic-0 -- sh /root/onos/${karafHome}/bin/client cfg get > ~/voltha-infra-onos-classic-0-cfg.txt || true
-        # kubectl exec -t voltha-infra-onos-classic-1 -- sh /root/onos/${karafHome}/bin/client cfg get > ~/voltha-infra-onos-classic-1-cfg.txt || true
-        # kubectl exec -t voltha-infra-onos-classic-2 -- sh /root/onos/${karafHome}/bin/client cfg get > ~/voltha-infra-onos-classic-2-cfg.txt || true
-
-        # kubectl exec -t voltha-infra-onos-classic-0 -- sh /root/onos/${karafHome}/bin/client obj-next-ids > ~/voltha-infra-onos-classic-0-next-objs.txt || true
-        # kubectl exec -t voltha-infra-onos-classic-1 -- sh /root/onos/${karafHome}/bin/client obj-next-ids > ~/voltha-infra-onos-classic-1-next-objs.txt || true
-        # kubectl exec -t voltha-infra-onos-classic-2 -- sh /root/onos/${karafHome}/bin/client obj-next-ids > ~/voltha-infra-onos-classic-2-next-objs.txt || true
-
-        # get radius logs out of the container
-        kubectl cp $(kubectl get pods -l app=radius --no-headers  | awk '{print $1}'):/var/log/freeradius/radius.log $LOG_FOLDER/radius.log || true
-      '''
-      // dump all the BBSim(s) ONU information
-      sh '''
-      BBSIM_IDS=$(kubectl get pods | grep bbsim | grep -v server | awk '{print $1}')
-      IDS=($BBSIM_IDS)
-
-      for bbsim in "${IDS[@]}"
-      do
-        kubectl exec -t $bbsim -- bbsimctl onu list > $LOG_FOLDER/$bbsim-device-list.txt || true
-        kubectl exec -t $bbsim -- bbsimctl uni list > $LOG_FOLDER/$bbsim-uni-list.txt || true
-        kubectl exec -t $bbsim -- bbsimctl service list > $LOG_FOLDER/$bbsim-service-list.txt || true
-        kubectl exec -t $bbsim -- bbsimctl olt resources GEM_PORT > $LOG_FOLDER/$bbsim-flows-gem-ports.txt || true
-        kubectl exec -t $bbsim -- bbsimctl olt resources ALLOC_ID > $LOG_FOLDER/$bbsim-flows-alloc-ids.txt || true
-        kubectl exec -t $bbsim -- bbsimctl olt pons > $LOG_FOLDER/$bbsim-pon-resources.txt || true
-      done
-      '''
-      script {
-        // first make sure the port-forward is still running,
-        // sometimes Jenkins kills it regardless of the JENKINS_NODE_COOKIE=dontKillMe
-        def running = sh (
-            script: 'ps aux | grep port-forw | grep -E "onos|voltha" | grep -v grep | wc -l',
-            returnStdout: true
-        ).trim()
-        // if any of the voltha-api, onos-rest, onos-ssh port-forwards are not there
-        // kill all and restart
-        if (running != "3") {
-          start_port_forward(olts)
-        }
-      }
-      // get ONOS debug infos
-      sh '''
-
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg get > $LOG_FOLDER/onos-cfg.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 netcfg > $LOG_FOLDER/onos-netcfg.txt
-
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt
-
-        if [ ${withFlows} = true ] ; then
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-port-status > $LOG_FOLDER/onos-volt-port-status.txt
-        fi
-
-        if [ ${provisionSubscribers} = true ]; then
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt
-        fi
-
-        if [ ${withEapol} = true ] ; then
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt
-        fi
-
-        if [ ${withDhcp} = true ] ; then
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt
-        fi
-
-        if [ ${withIgmp} = true ] ; then
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 mcast-host-routes > $LOG_FOLDER/onos-mcast-host-routes.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 mcast-host-show > $LOG_FOLDER/onos-mcast-host-show.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 groups > $LOG_FOLDER/onos-groups.txt
-        fi
-
-        if [ ${withMaclearning} = true ] ; then
-           sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 mac-learner-get-mapping > $LOG_FOLDER/onos-maclearning-host-mappings.txt
-        fi
-      '''
-      // collect etcd metrics
-      sh '''
-        mkdir -p $WORKSPACE/etcd-metrics
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-backend-write-time.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-backend-write-time-sum.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_bucket' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-backend-write-time-bucket.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_wal_fsync_duration_seconds_bucket' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-wal-fsync-time-bucket.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_network_peer_round_trip_time_seconds_bucket' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-network-peer-round-trip-time-seconds.json || true
-        etcd_namespace=\$(kubectl get pods --all-namespaces | grep etcd-0 | awk 'NR==1{print \$1}')
-        etcd_container=\$(kubectl get pods --all-namespaces | grep etcd-0 | awk 'NR==1{print \$2}')
-        kubectl exec -it -n  \$etcd_namespace \$etcd_container -- etcdctl defrag --cluster || true
-        kubectl exec -it -n  \$etcd_namespace \$etcd_container -- etcdctl endpoint status -w table > $WORKSPACE/etcd-metrics/etcd-status-table.txt || true
-
-      '''
-      // get VOLTHA debug infos
-      script {
-        try {
-          sh '''
-          voltctl -m 32MB device list -o json > $LOG_FOLDER/device-list.json || true
-          python -m json.tool $LOG_FOLDER/device-list.json > $LOG_FOLDER/voltha-devices-list.json || true
-          rm $LOG_FOLDER/device-list.json || true
-          voltctl -m 32MB device list > $LOG_FOLDER/voltha-devices-list.txt || true
-
-          printf '%s\n' $(voltctl -m 32MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB device flows # > $LOG_FOLDER/voltha-device-flows-#.txt" || true
-              printf '%s\n' $(voltctl -m 32MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/voltha-device-ports-#.txt" || true
-
-          printf '%s\n' $(voltctl -m 32MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB logicaldevice flows # > $LOG_FOLDER/voltha-logicaldevice-flows-#.txt" || true
-          printf '%s\n' $(voltctl -m 32MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB logicaldevice port list # > $LOG_FOLDER/voltha-logicaldevice-ports-#.txt" || true
-          '''
-        } catch(e) {
-          sh '''
-          echo "Can't get device list from voltclt"
-          '''
-        }
-      }
-      // get cpu usage by container
-      sh '''
-      if [ ${withMonitoring} = true ] ; then
-        cd $WORKSPACE/voltha-system-tests
-        make vst_venv
-        source ./vst_venv/bin/activate || true
-        sleep 60 # we have to wait for prometheus to collect all the information
-        python scripts/sizing.py -o $WORKSPACE/plots || true
-      fi
-      '''
-      archiveArtifacts artifacts: 'execution-time.txt,logs/*,logs/pprof/*,RobotLogs/**/*,plots/*,etcd-metrics/*'
-    }
-  }
-}
-
-def start_port_forward(olts) {
-  sh """
-  bbsimRestPortFwd=50071
-  for i in {0..${olts.toInteger() - 1}}; do
-    daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward --address 0.0.0.0 -n default svc/bbsim\${i} \${bbsimRestPortFwd}:50071
-    ((bbsimRestPortFwd++))
-  done
-  """
-}
diff --git a/jjb/pipeline/voltha/playground/voltha-tt-physical-functional-tests.groovy b/jjb/pipeline/voltha/playground/voltha-tt-physical-functional-tests.groovy
deleted file mode 100644
index 9a3c84e..0000000
--- a/jjb/pipeline/voltha/playground/voltha-tt-physical-functional-tests.groovy
+++ /dev/null
@@ -1,354 +0,0 @@
-// -*- groovy -*-
-// -----------------------------------------------------------------------
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// -----------------------------------------------------------------------
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-node {
-  // Need this so that deployment_config has global scope when it's read later
-  deployment_config = null
-}
-
-// -----------------------------------------------------------------------
-// -----------------------------------------------------------------------
-def getIam(String func)
-{
-    // Cannot rely on a stack trace due to jenkins manipulation
-    String src = [
-        'jjb',
-        'pipeline',
-        'voltha',
-        'playground',
-        'voltha-tt-physical-functional-tests.groovy'
-    ].join('/')
-
-    String iam = [src, func].join('::')
-    iam += sprintf('[ver:1.0]')
-    return iam
-}
-
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-
-pipeline
-{
-    /* no label, executor is determined by JJB */
-    agent
-    {
-        label "${params.buildNode}"
-    }
-
-    options
-    {
-        timeout(time: "${timeout}", unit: 'MINUTES')
-    }
-
-    environment
-    {
-        KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-        VOLTCONFIG="$HOME/.volt/config-minimal"
-        PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-    }
-
-    stages {
-
-        // -----------------------------------------------------------------------
-        // -----------------------------------------------------------------------
-        stage('Clone voltha-system-tests')
-        {
-            steps
-            {
-		iam
-		{
-                    enter = true
-                    label = getIam()
-		}
-
-                step([$class: 'WsCleanup'])
-                checkout([
-                    $class: 'GitSCM',
-                    userRemoteConfigs: [[
-                        url: "https://gerrit.opencord.org/voltha-system-tests",
-                        refspec: "${volthaSystemTestsChange}"
-                    ]],
-                    branches: [[ name: "${branch}" ]],
-                    extensions: [
-                        [$class: 'WipeWorkspace'],
-                        [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
-                        [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-                    ],
-                ]) // checkout
-
-                script
-                {
-                    sh(
-                        returnStatus: true,
-                        // returnStdout: true,
-                        script: """
-            if [ '${volthaSystemTestsChange}' != '' ] ; then
-              cd "$WORKSPACE/voltha-system-tests"
-              git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange}
-              git checkout FETCH_HEAD
-              exit 1  # verify fail
-            fi
-            """)
-                } // step
-
-                iam
-                {
-                    leave = true
-                    label = getIam()
-                }
-            } // steps
-        } // stage
-
-        // -----------------------------------------------------------------------
-        // This checkout allows us to show changes in Jenkins
-        // we only do this on master as we don't branch all the repos for all the releases
-        // (we should compute the difference by tracking the container version, not the code)
-        // -----------------------------------------------------------------------
-        stage('Download All the VOLTHA repos')
-        {
-            when {
-                expression { return "${branch}" == 'master'; }
-            }
-
-            steps {
-                checkout(changelog: true,
-                         poll: false,
-                         scm: [$class: 'RepoScm',
-                               manifestRepositoryUrl: "${params.manifestUrl}",
-                               manifestBranch: "${params.branch}",
-                               currentBranch: true,
-                               destinationDir: 'voltha',
-                               forceSync: true,
-                               resetFirst: true,
-                               quiet: true,
-                               jobs: 4,
-                               showAllChanges: true]
-                )
-            }
-        }
-
-        // -----------------------------------------------------------------------
-        // -----------------------------------------------------------------------
-        stage ('Initialize')
-        {
-            steps
-            {
-                sh(
-                    returnStatus: true,
-                    returnStdout: false,
-                    script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-                )
-                script
-                {
-                    deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-                }
-
-                installVoltctl("${branch}")
-
-                sh(
-                    returnStatus: true,
-                    returnStdout: false,
-                    script: """
-        mkdir -p "$WORKSPACE/bin"
-
-        # install kail
-        make -C "$WORKSPACE/voltha-system-tests" KAIL_PATH="$WORKSPACE/bin" kail
-
-        # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
-        # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
-        # We should change this. In the meantime here is a workaround.
-        if [ "${params.branch}" == "master" ]; then
-           set +e
-
-        # Remove noise from voltha-core logs
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        # Remove noise from openolt logs
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        fi
-        """)
-            } // step
-        } // stage
-
-        // -----------------------------------------------------------------------
-        // -----------------------------------------------------------------------
-        stage('Functional Tests')
-        {
-            environment
-            {
-                ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-                ROBOT_FILE="Voltha_TT_PODTests.robot"
-                ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/FunctionalTests"
-            }
-
-            steps {
-                sh("""
-        mkdir -p $ROBOT_LOGS_DIR
-        if ( ${powerSwitch} ); then
-             export ROBOT_MISC_ARGS="--removekeywords wuks -i functionalTT -i PowerSwitch -i sanityTT -i sanityTT-MCAST -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-             if ( ${powerCycleOlt} ); then
-                  ROBOT_MISC_ARGS+=" -v power_cycle_olt:True"
-             fi
-        else
-             export ROBOT_MISC_ARGS="--removekeywords wuks -i functionalTT -e PowerSwitch -i sanityTT -i sanityTT-MCAST -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        fi
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-tt-test
-        """)
-      }
-        } // stage
-
-        // -----------------------------------------------------------------------
-        // -----------------------------------------------------------------------
-        stage('Failure/Recovery Tests')
-        {
-            environment
-            {
-                ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-                ROBOT_FILE="Voltha_TT_FailureScenarios.robot"
-                ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/FailureScenarios"
-            }
-
-            steps
-            {
-                sh("""
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.enableMultiUni} = false ]; then
-          if ( ${powerSwitch} ); then
-               export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          else
-               export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          fi
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
-        fi
-        """)
-            }
-        } // stage
-
-        // -----------------------------------------------------------------------
-        // -----------------------------------------------------------------------
-        stage('Multi-Tcont Tests')
-        {
-            environment
-            {
-                ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-                ROBOT_FILE="Voltha_TT_MultiTcontTests.robot"
-                ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/MultiTcontScenarios"
-                ROBOT_TEST_INPUT_FILE="$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-TT-multi-tcont-tests-input.yaml"
-            }
-
-            steps
-            {
-                sh("""
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.enableMultiUni} = false ]; then
-          if ( ${powerSwitch} ); then
-            export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel} -V $ROBOT_TEST_INPUT_FILE"
-          else
-            export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel} -V $ROBOT_TEST_INPUT_FILE"
-          fi
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
-        fi
-        """)
-            }
-        }
-
-        // -----------------------------------------------------------------------
-        // -----------------------------------------------------------------------
-        stage('Multicast Tests')
-        {
-            environment
-            {
-                ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-                ROBOT_FILE="Voltha_TT_MulticastTests.robot"
-                ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/MulticastTests"
-                ROBOT_TEST_INPUT_FILE="$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-TT-multicast-tests-input.yaml"
-            }
-
-            steps
-            {
-                sh("""
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.enableMultiUni} = true ]; then
-          if ( ${powerSwitch} ); then
-             export ROBOT_MISC_ARGS="--removekeywords wuks -i multicastTT -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel} -V $ROBOT_TEST_INPUT_FILE"
-          else
-             export ROBOT_MISC_ARGS="--removekeywords wuks -i multicastTT -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel} -V $ROBOT_TEST_INPUT_FILE"
-          fi
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
-        fi
-        """)
-            }
-        }
-    }
-
-    post
-    {
-        always
-        {
-            getPodsInfo("$WORKSPACE/pods")
-            sh(returnStdout: false, script: '''
-      set +e
-
-      # collect logs collected in the Robot Framework StartLogging keyword
-      cd $WORKSPACE
-      gzip *-combined.log || true
-      rm *-combined.log || true
-      ''')
-
-            script {
-        deployment_config.olts.each { olt ->
-        if (olt.type == null || olt.type == "" || olt.type == "openolt")
-        {
-                sh(returnStdout: false, script: """
-             sshpass -p ${olt.pass} scp ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
-             sshpass -p ${olt.pass} scp ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
-             sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log  # Remove escape sequences
-             sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log  # Remove escape sequences
-             """)
-                    }
-                }
-            }
-
-            step([$class: 'RobotPublisher',
-                  disableArchiveOutput: false,
-                  logFileName: '**/log*.html',
-                  otherFiles: '',
-                  outputFileName: '**/output*.xml',
-                  outputPath: 'RobotLogs',
-                  passThreshold: 100,
-                  reportFileName: '**/report*.html',
-                  unstableThreshold: 0,
-                  onlyCritical: true
-            ]);
-            archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
-        } // always
-    } // post
-} // pipeline
-
-// [EOF]
diff --git a/jjb/pipeline/voltha/master/software-upgrades.groovy b/jjb/pipeline/voltha/software-upgrades.groovy
similarity index 100%
rename from jjb/pipeline/voltha/master/software-upgrades.groovy
rename to jjb/pipeline/voltha/software-upgrades.groovy
diff --git a/jjb/pipeline/voltha/master/tucson-build-and-test.groovy b/jjb/pipeline/voltha/tucson-build-and-test.groovy
similarity index 100%
rename from jjb/pipeline/voltha/master/tucson-build-and-test.groovy
rename to jjb/pipeline/voltha/tucson-build-and-test.groovy
diff --git a/jjb/pipeline/voltha/voltha-2.11/bbsim-tests.groovy b/jjb/pipeline/voltha/voltha-2.11/bbsim-tests.groovy
deleted file mode 100644
index 79f648a..0000000
--- a/jjb/pipeline/voltha/voltha-2.11/bbsim-tests.groovy
+++ /dev/null
@@ -1,421 +0,0 @@
-// Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// voltha-2.x e2e tests for openonu-go
-// uses bbsim to simulate OLT/ONUs
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-def clusterName = "kind-ci"
-
-// -----------------------------------------------------------------------
-// Intent:
-// -----------------------------------------------------------------------
-String branchName() {
-    String name = 'voltha-2.11'
-
-    // [TODO] Sanity check the target branch
-    // if (name != jenkins.branch) { fatal }
-    return(name)
-}
-
-// -----------------------------------------------------------------------
-// Intent: Difficult at times to determine when pipeline jobs have
-//   regenerated.  Hardcode a version string that can be assigned
-//   per-script to be sure latest repository changes are being used.
-// -----------------------------------------------------------------------
-String pipelineVer() {
-    String version = '5addce3fac89095d103ac5c6eedff2bb02e9ec63'
-    return(version)
-}
-
-// -----------------------------------------------------------------------
-// Intent: Due to lack of a reliable stack trace, construct a literal.
-//         Jenkins will re-write the call stack for serialization.S
-// -----------------------------------------------------------------------
-// Note: Hardcoded version string used to visualize changes in jenkins UI
-// -----------------------------------------------------------------------
-String getIam(String func) {
-    String branchName = branchName()
-    String version    = pipelineVer()
-    String src = [
-        'ci-management',
-        'jjb',
-        'pipeline',
-        'voltha',
-        branchName,
-        'bbsim-tests.groovy'
-    ].join('/')
-
-    String name = [src, version, func].join('::')
-    return(name)
-}
-
-def execute_test(testTarget, workflow, testLogging, teardown, testSpecificHelmFlags = "")
-{
-    def infraNamespace = "default"
-    def volthaNamespace = "voltha"
-    def logsDir = "$WORKSPACE/${testTarget}"
-
-    stage('IAM')
-    {
-	script
-	{
-	    String iam = [
-		'ci-management',
-		'jjb',
-		'pipeline',
-		'voltha',
-		'voltha-2.11',              // release-delta
-		'bbsim-tests.groovy'
-	    ].join('/')
-            println("** ${iam}: ENTER")
-
-	    String cmd = "which pkill"
-	    def stream = sh(
-		returnStatus:false,
-		returnStdout: true,
-		script: cmd)
-	    println(" ** ${cmd}:\n${stream}")
-
-            println("** ${iam}: LEAVE")
-	}
-    }
-
-    stage('Cleanup') {
-	if (teardown) {
-	    timeout(15) {
-		script {
-		    helmTeardown(["default", infraNamespace, volthaNamespace])
-		}
-	    timeout(1) {
-		    sh returnStdout: false, script: '''
-          # remove orphaned port-forward from different namespaces
-          ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-          '''
-		}
-	    }
-	}
-    }
-
-    stage ('Initialize')
-    {
-        steps
-        {
-            script
-            {
-                String iam = getIam('Initialize')
-                println("${iam}: ENTER")
-
-	            // VOL-4926 - Is voltha-system-tests available ?
-	            String cmd = [
-	                'make',
-	                '-C', "$WORKSPACE/voltha-system-tests",
-	                "KAIL_PATH=\"$WORKSPACE/bin\"",
-	                'kail',
-	            ].join(' ')
-	            println(" ** Running: ${cmd}:\n")
-                sh("${cmd}")
-
-                println("${iam}: LEAVE")
-            } // script
-        } // steps
-    } // stage
-
-    stage('Deploy common infrastructure') {
-	sh '''
-    helm repo add onf https://charts.opencord.org
-    helm repo update
-    if [ ${withMonitoring} = true ] ; then
-      helm install nem-monitoring onf/nem-monitoring \
-      --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
-      --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
-    fi
-    '''
-    }
-
-    stage('Deploy Voltha') {
-    if (teardown) {
-      timeout(10) {
-        script {
-
-          sh """
-          mkdir -p ${logsDir}
-          _TAG=kail-startup kail -n ${infraNamespace} -n ${volthaNamespace} > ${logsDir}/onos-voltha-startup-combined.log &
-          """
-
-          // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
-          def localCharts = false
-	  if (volthaHelmChartsChange != ""
-	      || gerritProject == "voltha-helm-charts"
-	      || branch != 'master'
-	  ) {
-            localCharts = true
-          }
-
-          // NOTE temporary workaround expose ONOS node ports
-          def localHelmFlags = extraHelmFlags.trim() + " --set global.log_level=${logLevel.toUpperCase()} " +
-          " --set onos-classic.onosSshPort=30115 " +
-          " --set onos-classic.onosApiPort=30120 " +
-          " --set onos-classic.onosOfPort=31653 " +
-          " --set onos-classic.individualOpenFlowNodePorts=true " + testSpecificHelmFlags
-
-          if (gerritProject != "") {
-            localHelmFlags = "${localHelmFlags} " + getVolthaImageFlags("${gerritProject}")
-          }
-
-          volthaDeploy([
-            infraNamespace: infraNamespace,
-            volthaNamespace: volthaNamespace,
-            workflow: workflow.toLowerCase(),
-            withMacLearning: enableMacLearning.toBoolean(),
-            extraHelmFlags: localHelmFlags,
-            localCharts: localCharts,
-            bbsimReplica: olts.toInteger(),
-            dockerRegistry: registry,
-            ])
-        }
-
-        // stop logging
-        sh """
-          P_IDS="\$(ps e -ww -A | grep "_TAG=kail-startup" | grep -v grep | awk '{print \$1}')"
-          if [ -n "\$P_IDS" ]; then
-            echo \$P_IDS
-            for P_ID in \$P_IDS; do
-              kill -9 \$P_ID
-            done
-          fi
-          cd ${logsDir}
-          gzip -k onos-voltha-startup-combined.log
-          rm onos-voltha-startup-combined.log
-        """
-      }
-      sh """
-      JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
-      JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-infra-etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd 2379:2379; done"&
-      JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-infra-kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
-      bbsimDmiPortFwd=50075
-      for i in {0..${olts.toInteger() - 1}}; do
-        JENKINS_NODE_COOKIE="dontKillMe" _TAG="bbsim\${i}" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/bbsim\${i} \${bbsimDmiPortFwd}:50075; done"&
-        ((bbsimDmiPortFwd++))
-      done
-      if [ ${withMonitoring} = true ] ; then
-        JENKINS_NODE_COOKIE="dontKillMe" _TAG="nem-monitoring-prometheus-server" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n default svc/nem-monitoring-prometheus-server 31301:80; done"&
-      fi
-      ps aux | grep port-forward
-      """
-      // setting ONOS log level
-      script {
-        setOnosLogLevels([
-          onosNamespace: infraNamespace,
-          apps: [
-            'org.opencord.dhcpl2relay',
-            'org.opencord.olt',
-            'org.opencord.aaa',
-            'org.opencord.maclearner',
-            'org.onosproject.net.flowobjective.impl.FlowObjectiveManager',
-            'org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager'
-          ],
-          logLevel: logLevel
-        ])
-      }
-    }
-  }
-
-  stage('Run test ' + testTarget + ' on ' + workflow + ' workFlow') {
-    sh """
-    if [ ${withMonitoring} = true ] ; then
-      mkdir -p "$WORKSPACE/voltha-pods-mem-consumption-${workflow}"
-      cd "$WORKSPACE/voltha-system-tests"
-      make vst_venv
-      source ./vst_venv/bin/activate || true
-      # Collect initial memory consumption
-      python scripts/mem_consumption.py -o $WORKSPACE/voltha-pods-mem-consumption-${workflow} -a 0.0.0.0:31301 -n ${volthaNamespace} || true
-    fi
-    """
-    sh """
-    mkdir -p ${logsDir}
-    export ROBOT_MISC_ARGS="-d ${logsDir} ${params.extraRobotArgs} "
-    ROBOT_MISC_ARGS+="-v ONOS_SSH_PORT:30115 -v ONOS_REST_PORT:30120 -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace} -v container_log_dir:${logsDir} -v logging:${testLogging}"
-    export KVSTOREPREFIX=voltha/voltha_voltha
-
-    make -C "$WORKSPACE/voltha-system-tests" ${testTarget} || true
-    """
-    getPodsInfo("${logsDir}")
-    sh """
-      set +e
-      # collect logs collected in the Robot Framework StartLogging keyword
-      cd ${logsDir}
-      gzip *-combined.log || true
-      rm *-combined.log || true
-    """
-    sh """
-    if [ ${withMonitoring} = true ] ; then
-      cd "$WORKSPACE/voltha-system-tests"
-      source ./vst_venv/bin/activate || true
-      # Collect memory consumption of voltha pods once all the tests are complete
-      python scripts/mem_consumption.py -o $WORKSPACE/voltha-pods-mem-consumption-${workflow} -a 0.0.0.0:31301 -n ${volthaNamespace} || true
-    fi
-    """
-  }
-}
-
-def collectArtifacts(exitStatus) {
-  getPodsInfo("$WORKSPACE/${exitStatus}")
-  sh """
-  kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/${exitStatus}/voltha.log || true
-  """
-  archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.txt,**/*.html,**/voltha-pods-mem-consumption-att/*,**/voltha-pods-mem-consumption-dt/*,**/voltha-pods-mem-consumption-tt/*'
-  sh '''
-    sync
-    pkill kail || true
-    which voltctl
-    md5sum $(which voltctl)
-  '''
-  step([$class: 'RobotPublisher',
-    disableArchiveOutput: false,
-    logFileName: "**/*/log*.html",
-    otherFiles: '',
-    outputFileName: "**/*/output*.xml",
-    outputPath: '.',
-    passThreshold: 100,
-    reportFileName: "**/*/report*.html",
-    unstableThreshold: 0,
-    onlyCritical: true]);
-}
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: "${timeout}", unit: 'MINUTES')
-  }
-  environment {
-    KUBECONFIG="$HOME/.kube/kind-${clusterName}"
-    VOLTCONFIG="$HOME/.volt/config"
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-    DIAGS_PROFILE="VOLTHA_PROFILE"
-    SSHPASS="karaf"
-  }
-
-  stages {
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${branch}",
-          gerritProject: "${gerritProject}",
-          gerritRefspec: "${gerritRefspec}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-
-    stage('Build patch') {
-      // build the patch only if gerritProject is specified
-      when {
-        expression {
-          return !gerritProject.isEmpty()
-        }
-      }
-      steps {
-        // NOTE that the correct patch has already been checked out
-        // during the getVolthaCode step
-        buildVolthaComponent("${gerritProject}")
-      }
-    }
-    stage('Create K8s Cluster') {
-      steps {
-        script {
-          def clusterExists = sh returnStdout: true, script: """
-          kind get clusters | grep ${clusterName} | wc -l
-          """
-          if (clusterExists.trim() == "0") {
-            createKubernetesCluster([nodes: 3, name: clusterName])
-          }
-        }
-      }
-    }
-    stage('Replace voltctl') {
-      // if the project is voltctl override the downloaded one with the built one
-      when {
-        expression {
-          return gerritProject == "voltctl"
-        }
-      }
-      steps{
-        sh """
-        # [TODO] - why is this platform specific (?)
-        # [TODO] - revisit, command alteration has masked an error (see: voltha-2.11).
-        #          find will fail when no filsystem matches are found.
-        #          mv(ls) succeded simply by accident/invoked at a different time.
-        # find "$WORKSPACE/voltctl/release" -name 'voltctl-*-linux-amd*' \
-        #     -exec mv {} $WORKSPACE/bin/voltctl ;
-        mv `ls $WORKSPACE/voltctl/release/voltctl-*-linux-amd*` $WORKSPACE/bin/voltctl
-        chmod +x $WORKSPACE/bin/voltctl
-        """
-      }
-    }
-    stage('Load image in kind nodes') {
-      when {
-        expression {
-          return !gerritProject.isEmpty()
-        }
-      }
-      steps {
-        loadToKind()
-      }
-    }
-    stage('Parse and execute tests') {
-        steps {
-          script {
-            def tests = readYaml text: testTargets
-
-            for(int i = 0;i<tests.size();i++) {
-              def test = tests[i]
-              def target = test["target"]
-              def workflow = test["workflow"]
-              def flags = test["flags"]
-              def teardown = test["teardown"].toBoolean()
-              def logging = test["logging"].toBoolean()
-              def testLogging = 'False'
-              if (logging) {
-                  testLogging = 'True'
-              }
-              println "Executing test ${target} on workflow ${workflow} with logging ${testLogging} and extra flags ${flags}"
-              execute_test(target, workflow, testLogging, teardown, flags)
-            }
-          }
-        }
-    }
-  }
-  post {
-    aborted {
-      collectArtifacts("aborted")
-    }
-    failure {
-      collectArtifacts("failed")
-    }
-    always {
-      collectArtifacts("always")
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.11/device-management-mock-tests.groovy b/jjb/pipeline/voltha/voltha-2.11/device-management-mock-tests.groovy
deleted file mode 100644
index 8362a08..0000000
--- a/jjb/pipeline/voltha/voltha-2.11/device-management-mock-tests.groovy
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-def localCharts = false
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: 90, unit: 'MINUTES')
-  }
-  environment {
-    KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-    ROBOT_MISC_ARGS="-d $WORKSPACE/RobotLogs"
-  }
-
-  stages {
-
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${branch}",
-          gerritProject: "${gerritProject}",
-          gerritRefspec: "${gerritRefspec}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage('Build Redfish Importer Image') {
-      steps {
-        sh """
-           make -C $WORKSPACE/device-management/\$1 DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build-importer
-           """
-      }
-    }
-    stage('Build demo_test Image') {
-      steps {
-        sh """
-           make -C $WORKSPACE/device-management/\$1/demo_test DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build
-           """
-      }
-    }
-    stage('Build mock-redfish-server  Image') {
-      steps {
-        sh """
-           make -C $WORKSPACE/device-management/\$1/mock-redfish-server DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build
-           """
-      }
-    }
-    stage('Create K8s Cluster') {
-      steps {
-        createKubernetesCluster([nodes: 3])
-      }
-    }
-    stage('Load image in kind nodes') {
-      steps {
-        loadToKind()
-      }
-    }
-    stage('Deploy Voltha') {
-      steps {
-        script {
-          if (branch != "master" || volthaHelmChartsChange != "") {
-            // if we're using a release or testing changes in the charts, then use the local clone
-            localCharts = true
-          }
-        }
-        volthaDeploy([
-          workflow: "att",
-          extraHelmFlags: extraHelmFlags,
-          dockerRegistry: "mirror.registry.opennetworking.org",
-          localCharts: localCharts,
-        ])
-        // start logging
-        sh """
-        mkdir -p $WORKSPACE/att
-        _TAG=kail-att kail -n infra -n voltha -n default > $WORKSPACE/att/onos-voltha-combined.log &
-        """
-        // forward ONOS and VOLTHA ports
-        sh """
-        _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8101:8101&
-        _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8181:8181&
-        _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha svc/voltha-voltha-api 55555:55555&
-        """
-      }
-    }
-
-    stage('Run E2E Tests') {
-      steps {
-        sh '''
-           mkdir -p $WORKSPACE/RobotLogs
-
-           # tell the kubernetes script to use images tagged citest and pullPolicy:Never
-           sed -i 's/master/citest/g' $WORKSPACE/device-management/kubernetes/deploy*.yaml
-           sed -i 's/imagePullPolicy: Always/imagePullPolicy: Never/g' $WORKSPACE/device-management/kubernetes/deploy*.yaml
-           make -C $WORKSPACE/device-management functional-mock-test || true
-           '''
-      }
-    }
-  }
-
-  post {
-    always {
-      sh '''
-         set +e
-         kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
-         kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
-         kubectl get nodes -o wide
-         kubectl get pods -o wide --all-namespaces
-
-         sync
-         pkill kail || true
-
-         ## Pull out errors from log files
-         extract_errors_go() {
-           echo
-           echo "Error summary for $1:"
-           grep $1 $WORKSPACE/att/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
-           echo
-         }
-
-         extract_errors_python() {
-           echo
-           echo "Error summary for $1:"
-           grep $1 $WORKSPACE/att/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
-           echo
-         }
-
-         extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
-         extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
-         extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
-         extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
-
-         gzip $WORKSPACE/att/onos-voltha-combined.log
-         '''
-         step([$class: 'RobotPublisher',
-            disableArchiveOutput: false,
-            logFileName: 'RobotLogs/log*.html',
-            otherFiles: '',
-            outputFileName: 'RobotLogs/output*.xml',
-            outputPath: '.',
-            passThreshold: 80,
-            reportFileName: 'RobotLogs/report*.html',
-            unstableThreshold: 0]);
-         archiveArtifacts artifacts: '**/*.log,**/*.gz'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.11/dmi-build-and-test.groovy b/jjb/pipeline/voltha/voltha-2.11/dmi-build-and-test.groovy
deleted file mode 100755
index 6d66a53..0000000
--- a/jjb/pipeline/voltha/voltha-2.11/dmi-build-and-test.groovy
+++ /dev/null
@@ -1,355 +0,0 @@
-#!/usr/bin/env groovy
-// -----------------------------------------------------------------------
-// Copyright 2022-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// -----------------------------------------------------------------------
-// Intent: used to deploy VOLTHA and configure ONOS physical PODs
-//
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-// -----------------------------------------------------------------------
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-
-def deploy_custom_chart(namespace, name, chart, extraHelmFlags) {
-  sh """
-    helm install --create-namespace --set defaults.image_pullPolicy=Always --namespace ${namespace} ${extraHelmFlags} ${name} ${chart}
-   """
-}
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: 45, unit: 'MINUTES')
-  }
-  environment {
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-    VOLTCONFIG="$HOME/.volt/config-minimal"
-    LOG_FOLDER="$WORKSPACE/dmi/"
-    APPS_TO_LOG="${OltDevMgr}"
-  }
-
-  stages{
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${branch}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage ("Parse deployment configuration file") {
-      steps {
-        sh returnStdout: true, script: "rm -rf ${configBaseDir}"
-        sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-          if ( params.workFlow == "DT" ) {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-          }
-          else if ( params.workFlow == "TT" )
-          {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-          }
-          else
-          {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-          }
-        }
-      }
-    }
-    stage('Clean up') {
-      steps {
-        timeout(15) {
-          script {
-            helmTeardown(["default", infraNamespace, volthaNamespace])
-          }
-          timeout(1) {
-            sh returnStdout: false, script: '''
-            # remove orphaned port-forward from different namespaces
-            ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-            '''
-          }
-        }
-      }
-    }
-    stage('Install Voltha')  {
-      steps {
-        timeout(20) {
-          installVoltctl("${branch}")
-          script {
-            // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
-            def localCharts = false
-            if (volthaHelmChartsChange != "") {
-              localCharts = true
-            }
-
-            // should the config file be suffixed with the workflow? see "deployment_config"
-            def localHelmFlags = "-f $WORKSPACE/${configBaseDir}/${configKubernetesDir}/voltha/${configFileName}.yml --set global.log_level=${logLevel} "
-
-            if (workFlow.toLowerCase() == "dt") {
-              localHelmFlags += " --set radius.enabled=false "
-            }
-            if (workFlow.toLowerCase() == "tt") {
-              localHelmFlags += " --set radius.enabled=false --set global.incremental_evto_update=true "
-                if (enableMultiUni.toBoolean()) {
-                    localHelmFlags += " --set voltha-adapter-openonu.adapter_open_onu.uni_port_mask=${uniPortMask} "
-                }
-            }
-
-            // NOTE temporary workaround expose ONOS node ports (pod-config needs to be updated to contain these values)
-            // and to connect the ofagent to all instances of ONOS
-            localHelmFlags = localHelmFlags + " --set onos-classic.onosSshPort=30115 " +
-            "--set onos-classic.onosApiPort=30120 " +
-            "--set onos-classic.onosOfPort=31653 " +
-            "--set onos-classic.individualOpenFlowNodePorts=true " +
-            "--set voltha.onos_classic.replicas=${params.NumOfOnos}"
-
-            if (bbsimReplicas.toInteger() != 0) {
-              localHelmFlags = localHelmFlags + " --set onu=${onuNumber},pon=${ponNumber} "
-            }
-
-            // adding user specified helm flags at the end so they'll have priority over everything else
-            localHelmFlags = localHelmFlags + " ${extraHelmFlags}"
-
-            def numberOfAdaptersToWait = 2
-
-            if(openoltAdapterChart != "onf/voltha-adapter-openolt") {
-              localHelmFlags = localHelmFlags + " --set voltha-adapter-openolt.enabled=false"
-              // We skip waiting for adapters in the volthaDeploy step because it's already waiting for
-              // both of them after the deployment of the custom olt adapter. See line 156.
-              numberOfAdaptersToWait = 0
-            }
-
-            volthaDeploy([
-              workflow: workFlow.toLowerCase(),
-              extraHelmFlags: localHelmFlags,
-              localCharts: localCharts,
-              kubeconfig: "$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf",
-              onosReplica: params.NumOfOnos,
-              atomixReplica: params.NumOfAtomix,
-              kafkaReplica: params.NumOfKafka,
-              etcdReplica: params.NumOfEtcd,
-              bbsimReplica: bbsimReplicas.toInteger(),
-              adaptersToWait: numberOfAdaptersToWait,
-              withVolthaInfra: installVolthaInfra.toBoolean(),
-              withVolthaStack: installVolthaStack.toBoolean(),
-              ])
-
-            if(installVolthaStack.toBoolean()) {
-              if(openoltAdapterChart != "onf/voltha-adapter-openolt"){
-                extraHelmFlags = extraHelmFlags + " --set global.log_level=${logLevel}"
-                deploy_custom_chart(volthaNamespace, oltAdapterReleaseName, openoltAdapterChart, extraHelmFlags)
-                waitForAdapters([
-                  adaptersToWait: 2
-                ])
-              }
-            }
-          }
-          sh """
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd ${params.VolthaEtcdPort}:2379; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
-          ps aux | grep port-forward
-          """
-          getPodsInfo("$WORKSPACE")
-        }
-      }
-    }
-    stage('Deploy Device Manager Interface Chart') {
-      steps {
-        script {
-          deploy_custom_chart('default', 'olt-device-manager', dmiChart, extraHelmFlags)
-        }
-        println "Wait for olt-device-manager to start"
-        sh """
-            set +x
-            devmgr=\$(kubectl get pods -l app.kubernetes.io/name=${params.OltDevMgr} --no-headers | grep "0/" | wc -l)
-            while [[ \$devmgr != 0 ]]; do
-              sleep 5
-              devmgr=\$(kubectl get pods -l app.kubernetes.io/name=${params.OltDevMgr} --no-headers | grep "0/" | wc -l)
-            done
-        """
-        sh """
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="${params.OltDevMgr}" bash -c "while true; do kubectl port-forward --address 0.0.0.0 svc/${params.OltDevMgr} 50051; done"&
-          ps aux | grep port-forward
-        """
-      }
-    }
-	stage('Start logging')
-	{
-	    steps
-	    {
-		// Install kail
-		sh("""make -C "$WORKSPACE/voltha-system-tests" KAIL_PATH="$WORKSPACE/bin" kail""")
-
-		sh returnStdout: false, script: '''
-          # start logging with kail
-          cd $WORKSPACE
-          mkdir -p $LOG_FOLDER
-          list=($APPS_TO_LOG)
-          for app in "${list[@]}"
-          do
-            echo "Starting logs for: ${app}"
-            _TAG=kail-$app kail -l app.kubernetes.io/name=$app --since 1h > $LOG_FOLDER/$app.log&
-          done
-        '''
-	    }
-	}
-
-	stage('Reinstall OLT software') {
-      steps {
-        script {
-          if ( params.reinstallOlt ) {
-            for(int i=0; i < deployment_config.olts.size(); i++) {
-              sh returnStdout: true, script: """
-              ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
-              if [ "${params.inBandManagement}" == "true" ]; then
-                sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'kill -9 `pgrep -f "[b]ash /opt/openolt/openolt_dev_mgmt_daemon_process_watchdog"` || true'
-              fi
-              sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} "dpkg --install ${deployment_config.olts[i].oltDebVersion}"
-              sleep 10
-              """
-              timeout(5) {
-                waitUntil {
-                  olt_sw_present = sh returnStdout: true, script: """
-                  if [[ "${deployment_config.olts[i].oltDebVersion}" == *"asfvolt16"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep asfvolt16 | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"asgvolt64"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep asgvolt64 | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-1600x-w"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-1600x-w | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-1600g-w"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-1600g-w | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-3200g-w"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-3200g-w | wc -l'
-                  else
-                    echo Unknown Debian package for openolt
-                  fi
-                  if (${deployment_config.olts[i].fortygig}); then
-                    if [[ "${params.inBandManagement}" == "true" ]]; then
-                      ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'mkdir -p /opt/openolt/'
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp /root/watchdog-script/* /opt/openolt/'
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp /root/bal_cli_appl/example_user_appl /broadcom'
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp in-band-startup-script/* /etc/init.d/'
-                    fi
-                  fi
-                  """
-                  return olt_sw_present.toInteger() > 0
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-    stage('Restart OLT processes') {
-      steps {
-        script {
-          if ( params.restartOlt ) {
-            //rebooting OLTs
-            for(int i=0; i < deployment_config.olts.size(); i++) {
-              timeout(15) {
-                sh returnStdout: true, script: """
-                ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
-                sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; rm -f /var/log/openolt_process_watchdog.log; reboot > /dev/null &' || true
-                """
-              }
-            }
-            sh returnStdout: true, script: """
-            sleep ${params.waitTimerForOltUp}
-            """
-            //Checking dev_management_deamon and openoltprocesses
-            for(int i=0; i < deployment_config.olts.size(); i++) {
-              if ( params.oltAdapterReleaseName != "open-olt" ) {
-                timeout(15) {
-                  waitUntil {
-                    devprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep dev_mgmt_daemon | wc -l'"
-                    return devprocess.toInteger() > 0
-                  }
-                }
-                timeout(15) {
-                  waitUntil {
-                    openoltprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep openolt | wc -l'"
-                    return openoltprocess.toInteger() > 0
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-    stage('Run Device Management Interface Tests') {
-      environment {
-        ROBOT_FILE="dmi-hw-management.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs"
-        ROBOT_CONFIG_FILE="$WORKSPACE/voltha-system-tests/tests/data/dmi-components-adtran.yaml"
-      }
-      steps {
-        sh """
-          mkdir -p $ROBOT_LOGS_DIR
-          export ROBOT_MISC_ARGS="--removekeywords wuks -e notreadyDMI -i functionalDMI -d $ROBOT_LOGS_DIR"
-          make -C $WORKSPACE/voltha-system-tests voltha-dmi-test || true
-        """
-      }
-    }
-  }
-
-  post {
-    always {
-      getPodsInfo("$WORKSPACE")
-      sh '''
-      # stop the kail processes
-      list=($APPS_TO_LOG)
-      for app in "${list[@]}"
-      do
-        echo "Stopping logs for: ${app}"
-        _TAG="kail-$app"
-        P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
-        if [ -n "$P_IDS" ]; then
-          echo $P_IDS
-          for P_ID in $P_IDS; do
-            kill -9 $P_ID
-          done
-        fi
-      done
-      '''
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: 'RobotLogs/log*.html',
-        otherFiles: '',
-        outputFileName: 'RobotLogs/output*.xml',
-        outputPath: '.',
-        passThreshold: 100,
-        reportFileName: 'RobotLogs/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true]);
-      archiveArtifacts artifacts: '**/*.txt,**/*.gz,*.gz,**/*.log'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.11/physical-build.groovy b/jjb/pipeline/voltha/voltha-2.11/physical-build.groovy
deleted file mode 100755
index 1e52f25..0000000
--- a/jjb/pipeline/voltha/voltha-2.11/physical-build.groovy
+++ /dev/null
@@ -1,432 +0,0 @@
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// used to deploy VOLTHA and configure ONOS physical PODs
-
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-// -----------------------------------------------------------------------
-// -----------------------------------------------------------------------
-def getIam(String func)
-{
-    // Cannot rely on a stack trace due to jenkins manipulation
-    String src = 'jjb/pipeline/votlha-2.11/physical-build.groovy'
-    String iam = [src, func].join('::')
-    return iam
-}
-
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-
-// -----------------------------------------------------------------------
-// -----------------------------------------------------------------------
-def deploy_custom_oltAdapterChart(namespace, name, chart, extraHelmFlags) {
-    String iam = getIam('deploy_custom_oltAdapterChart')
-    println("** ${iam}: ENTER")
-
-    sh """
-    helm install --create-namespace --set defaults.image_pullPolicy=Always --namespace ${namespace} ${extraHelmFlags} ${name} ${chart}
-   """
-
-    println("** ${iam}: LEAVE")
-    return
-}
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: 35, unit: 'MINUTES')
-  }
-  environment {
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-  }
-
-  stages{
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${branch}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage ("Parse deployment configuration file") {
-      steps {
-        sh returnStdout: true, script: "rm -rf ${configBaseDir}"
-        sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-          if ( params.workFlow == "DT" ) {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-          }
-          else if ( params.workFlow == "TT" )
-          {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-          }
-          else
-          {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-          }
-        }
-      }
-    }
-    stage('Clean up') {
-      steps {
-        timeout(15) {
-          script {
-            helmTeardown(["default", infraNamespace, volthaNamespace])
-          }
-          timeout(1) {
-            sh returnStdout: false, script: '''
-            # remove orphaned port-forward from different namespaces
-            ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-            '''
-          }
-        }
-      }
-    }
-    stage('Install Voltha')  {
-      steps {
-        timeout(20) {
-          installVoltctl("${branch}")
-          script {
-            // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
-            def localCharts = false
-            if (volthaHelmChartsChange != "") {
-              localCharts = true
-            }
-
-            // should the config file be suffixed with the workflow? see "deployment_config"
-            def localHelmFlags = "-f $WORKSPACE/${configBaseDir}/${configKubernetesDir}/voltha/${configFileName}.yml --set global.log_level=${logLevel} "
-
-            if (workFlow.toLowerCase() == "dt") {
-              localHelmFlags += " --set radius.enabled=false "
-            }
-            if (workFlow.toLowerCase() == "tt") {
-              localHelmFlags += " --set radius.enabled=false --set global.incremental_evto_update=true "
-                if (enableMultiUni.toBoolean()) {
-                    localHelmFlags += " --set voltha-adapter-openonu.adapter_open_onu.uni_port_mask=${uniPortMask} "
-                }
-            }
-
-            // NOTE temporary workaround expose ONOS node ports (pod-config needs to be updated to contain these values)
-            // and to connect the ofagent to all instances of ONOS
-            localHelmFlags = localHelmFlags + " --set onos-classic.onosSshPort=30115 " +
-            "--set onos-classic.onosApiPort=30120 " +
-            "--set onos-classic.onosOfPort=31653 " +
-            "--set onos-classic.individualOpenFlowNodePorts=true " +
-            "--set voltha.onos_classic.replicas=${params.NumOfOnos}"
-
-            if (bbsimReplicas.toInteger() != 0) {
-              localHelmFlags = localHelmFlags + " --set onu=${onuNumber},pon=${ponNumber} "
-            }
-
-            // adding user specified helm flags at the end so they'll have priority over everything else
-            localHelmFlags = localHelmFlags + " ${extraHelmFlags}"
-
-            def numberOfAdaptersToWait = 2
-
-            if(openoltAdapterChart != "onf/voltha-adapter-openolt") {
-              localHelmFlags = localHelmFlags + " --set voltha-adapter-openolt.enabled=false"
-              // We skip waiting for adapters in the volthaDeploy step because it's already waiting for
-              // both of them after the deployment of the custom olt adapter. See line 156.
-              numberOfAdaptersToWait = 0
-            }
-
-            volthaDeploy([
-              workflow: workFlow.toLowerCase(),
-              extraHelmFlags: localHelmFlags,
-              localCharts: localCharts,
-              kubeconfig: "$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf",
-              onosReplica: params.NumOfOnos,
-              atomixReplica: params.NumOfAtomix,
-              kafkaReplica: params.NumOfKafka,
-              etcdReplica: params.NumOfEtcd,
-              bbsimReplica: bbsimReplicas.toInteger(),
-              withFttb: withFttb.toBoolean(),
-              adaptersToWait: numberOfAdaptersToWait,
-              ])
-
-            if(openoltAdapterChart != "onf/voltha-adapter-openolt"){
-              extraHelmFlags = extraHelmFlags + " --set global.log_level=${logLevel}"
-              deploy_custom_oltAdapterChart(volthaNamespace, oltAdapterReleaseName, openoltAdapterChart, extraHelmFlags)
-              waitForAdapters([
-                adaptersToWait: 2
-              ])
-            }
-          }
-          sh """
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd ${params.VolthaEtcdPort}:2379; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
-          ps aux | grep port-forward
-          """
-          getPodsInfo("$WORKSPACE")
-        }
-      }
-    }
-    stage('Push Tech-Profile') {
-      steps {
-        script {
-          if ( params.configurePod && params.profile != "Default" ) {
-            for(int i=0; i < deployment_config.olts.size(); i++) {
-              def tech_prof_directory = "XGS-PON"
-              if (deployment_config.olts[i].containsKey("board_technology")){
-                tech_prof_directory = deployment_config.olts[i]["board_technology"]
-              }
-              timeout(1) {
-                sh returnStatus: true, script: """
-                export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
-                etcd_container=\$(kubectl get pods -n ${infraNamespace} -l app.kubernetes.io/name=etcd --no-headers | awk 'NR==1{print \$1}')
-                if [[ "${workFlow}" == "TT" ]]; then
-                   kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-HSIA.json \$etcd_container:/tmp/hsia.json
-                   kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64'
-                   kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-VoIP.json \$etcd_container:/tmp/voip.json
-                   kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65'
-                   if [[ "${params.enableMultiUni}" == "true" ]]; then
-                      kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-MCAST-AdditionalBW-None.json \$etcd_container:/tmp/mcast_additionalBW_none.json
-                      kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_none.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66'
-                      kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-MCAST-AdditionalBW-NA.json \$etcd_container:/tmp/mcast_additionalBW_na.json
-                      kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_na.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/67'
-                   else
-                      kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST-AdditionalBW-None.json \$etcd_container:/tmp/mcast_additionalBW_none.json
-                      kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_none.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66'
-                      kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST-AdditionalBW-NA.json \$etcd_container:/tmp/mcast_additionalBW_na.json
-                      kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_na.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/67'
-                   fi
-                else
-                   kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-${profile}.json \$etcd_container:/tmp/flexpod.json
-                   kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64'
-                fi
-                """
-              }
-              timeout(1) {
-                sh returnStatus: true, script: """
-                export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
-                etcd_container=\$(kubectl get pods -n ${infraNamespace} -l app.kubernetes.io/name=etcd --no-headers | awk 'NR==1{print \$1}')
-                kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'ETCDCTL_API=3 etcdctl get --prefix service/voltha/technology_profiles/${tech_prof_directory}/64'
-                """
-              }
-            }
-          }
-        }
-      }
-    }
-    stage('Push MIB templates') {
-      steps {
-        sh """
-        export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
-        etcd_container=\$(kubectl get pods -n ${infraNamespace} -l app.kubernetes.io/name=etcd --no-headers | awk 'NR==1{print \$1}')
-        kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/MIB_Alpha.json \$etcd_container:/tmp/MIB_Alpha.json
-        kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Alpha.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/go_templates/BRCM/BVM4K00BRA0915-0083/5023_020O02414'
-        kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Alpha.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/templates/BRCM/BVM4K00BRA0915-0083/5023_020O02414'
-        kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/MIB_Scom.json \$etcd_container:/tmp/MIB_Scom.json
-        kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Scom.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/go_templates/SCOM/Glasfaser-Modem/090140.1.0.304'
-        kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Scom.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/templates/SCOM/Glasfaser-Modem/090140.1.0.304'
-        """
-      }
-    }
-    stage('Push Sadis-config') {
-      steps {
-        timeout(1) {
-          sh returnStatus: true, script: """
-          if [[ "${workFlow}" == "DT" ]]; then
-            curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-DT.json
-          elif [[ "${workFlow}" == "TT" ]]; then
-            curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-TT.json
-          else
-            # this is the ATT case, rename the file in *-sadis-ATT.json so that we can avoid special cases and just load the file
-            curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis.json
-          fi
-          """
-        }
-      }
-    }
-    stage('Switch Configurations in ONOS') {
-      steps {
-        script {
-          if ( deployment_config.fabric_switches.size() > 0 ) {
-            timeout(1) {
-              def netcfg = "$WORKSPACE/${configBaseDir}/${configToscaDir}/voltha/${configFileName}-onos-netcfg-switch.json"
-              if (params.inBandManagement){
-                netcfg = "$WORKSPACE/${configBaseDir}/${configToscaDir}/voltha/${configFileName}-onos-netcfg-switch-inband.json"
-              }
-              sh """
-              curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @${netcfg}
-              curl -sSL --user karaf:karaf -X POST http://${deployment_config.nodes[0].ip}:30120/onos/v1/applications/org.onosproject.segmentrouting/active
-              """
-            }
-            timeout(3) {
-              setOnosLogLevels([
-                  onosNamespace: infraNamespace,
-                  apps: [
-                    'org.opencord.dhcpl2relay',
-                    'org.opencord.olt',
-                    'org.opencord.aaa',
-                    'org.opencord.maclearner',
-                    'org.onosproject.net.flowobjective.impl.FlowObjectiveManager',
-                    'org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager'
-                  ]
-              ])
-              waitUntil {
-                sr_active_out = sh returnStatus: true, script: """
-                curl -sSL --user karaf:karaf -X GET http://${deployment_config.nodes[0].ip}:30120/onos/v1/applications/org.onosproject.segmentrouting | jq '.state' | grep ACTIVE
-                sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled false"
-                sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "cfg set org.onosproject.net.flow.impl.FlowRuleManager purgeOnDisconnection false"
-                sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "cfg set org.onosproject.net.meter.impl.MeterManager purgeOnDisconnection false"
-                """
-                return sr_active_out == 0
-              }
-            }
-            timeout(8) {
-              for(int i=0; i < deployment_config.hosts.src.size(); i++) {
-                for(int j=0; j < deployment_config.olts.size(); j++) {
-                  def aggPort = -1
-                  if(deployment_config.olts[j].serial == deployment_config.hosts.src[i].olt){
-                      aggPort = deployment_config.olts[j].aggPort
-                      if(aggPort == -1){
-                        throw new Exception("Upstream port for the olt is not configured, field aggPort is empty")
-                      }
-                      sh """
-                      sleep 10 # NOTE why are we sleeping?
-                      curl -X POST --user karaf:karaf --header 'Content-Type: application/json' --header 'Accept: application/json' -d '{"deviceId": "${deployment_config.fabric_switches[0].device_id}", "vlanId": "${deployment_config.hosts.src[i].s_tag}", "endpoints": [${deployment_config.fabric_switches[0].bngPort},${aggPort}]}' 'http://${deployment_config.nodes[0].ip}:30120/onos/segmentrouting/xconnect'
-                      """
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-    stage('Reinstall OLT software') {
-      steps {
-        script {
-          if ( params.reinstallOlt ) {
-            for(int i=0; i < deployment_config.olts.size(); i++) {
-              // NOTE what is oltDebVersion23? is that for VOLTHA-2.3? do we still need this differentiation?
-              sh returnStdout: true, script: """
-              ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
-              if [ "${params.inBandManagement}" == "true" ]; then
-                sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'kill -9 `pgrep -f "[b]ash /opt/openolt/openolt_dev_mgmt_daemon_process_watchdog"` || true'
-              fi
-              sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} "dpkg --install ${deployment_config.olts[i].oltDebVersion}"
-              sleep 10
-              """
-              timeout(5) {
-                waitUntil {
-                  olt_sw_present = sh returnStdout: true, script: """
-                  if [[ "${deployment_config.olts[i].oltDebVersion}" == *"asfvolt16"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep asfvolt16 | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"asgvolt64"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep asgvolt64 | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-1600x-w"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-1600x-w | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-1600g-w"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-1600g-w | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-3200g-w"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-3200g-w | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"sda3016ss"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep sda3016ss | wc -l'
-                  else
-                    echo Unknown Debian package for openolt
-                  fi
-                  if (${deployment_config.olts[i].fortygig}); then
-                    if [[ "${params.inBandManagement}" == "true" ]]; then
-                      ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'mkdir -p /opt/openolt/'
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp /root/watchdog-script/* /opt/openolt/'
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp /root/bal_cli_appl/example_user_appl /broadcom'
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp in-band-startup-script/* /etc/init.d/'
-                    fi
-                  fi
-                  """
-                  return olt_sw_present.toInteger() > 0
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-    stage('Restart OLT processes') {
-      steps {
-        script {
-          //rebooting OLTs
-          for(int i=0; i < deployment_config.olts.size(); i++) {
-            timeout(15) {
-              sh returnStdout: true, script: """
-              ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
-              sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; rm -f /var/log/openolt_process_watchdog.log; reboot > /dev/null &' || true
-              """
-            }
-          }
-          sh returnStdout: true, script: """
-          sleep ${params.waitTimerForOltUp}
-          """
-          //Checking dev_management_deamon and openoltprocesses
-          for(int i=0; i < deployment_config.olts.size(); i++) {
-            if ( params.oltAdapterReleaseName != "open-olt" ) {
-              timeout(15) {
-                waitUntil {
-                  devprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep dev_mgmt_daemon | wc -l'"
-                  return devprocess.toInteger() > 0
-                }
-              }
-              timeout(15) {
-                waitUntil {
-                  openoltprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep openolt | wc -l'"
-                  return openoltprocess.toInteger() > 0
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-  }
-
-  post {
-    aborted {
-      getPodsInfo("$WORKSPACE/failed")
-      sh """
-      kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.log || true
-      """
-      archiveArtifacts artifacts: '**/*.log,**/*.txt'
-    }
-    failure {
-      getPodsInfo("$WORKSPACE/failed")
-      sh """
-      kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.logs || true
-      """
-      archiveArtifacts artifacts: '**/*.log,**/*.txt'
-    }
-    always {
-      archiveArtifacts artifacts: '*.txt'
-    }
-  }
-}
-
-// [EOF]
diff --git a/jjb/pipeline/voltha/voltha-2.11/software-upgrades.groovy b/jjb/pipeline/voltha/voltha-2.11/software-upgrades.groovy
deleted file mode 100755
index 1238a53..0000000
--- a/jjb/pipeline/voltha/voltha-2.11/software-upgrades.groovy
+++ /dev/null
@@ -1,308 +0,0 @@
-// Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// voltha-2.x e2e tests
-// uses bbsim to simulate OLT/ONUs
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-// fetches the versions/tags of the voltha component
-// returns the deployment version which is one less than the latest available tag of the repo, first voltha stack gets deployed using this;
-// returns the test version which is the latest tag of the repo, the component upgrade gets tested on this.
-// Note: if there is a major version change between deployment and test tags, then deployment tag will be same as test tag, i.e. both as latest.
-def get_voltha_comp_versions(component, base_deploy_tag) {
-    def comp_test_tag = sh (
-      script: "git ls-remote --refs --tags https://github.com/opencord/${component} | cut --delimiter='/' --fields=3 | tr '-' '~' | sort --version-sort | tail --lines=1 | sed 's/v//'",
-      returnStdout: true
-    ).trim()
-    def comp_deploy_tag = sh (
-      script: "git ls-remote --refs --tags https://github.com/opencord/${component} | cut --delimiter='/' --fields=3 | tr '-' '~' | sort --version-sort | tail --lines=2 | head -n 1 | sed 's/v//'",
-      returnStdout: true
-    ).trim()
-    def comp_deploy_major = comp_deploy_tag.substring(0, comp_deploy_tag.indexOf('.'))
-    def comp_test_major = comp_test_tag.substring(0, comp_test_tag.indexOf('.'))
-    if ( "${comp_deploy_major.trim()}" != "${comp_test_major.trim()}") {
-      comp_deploy_tag = comp_test_tag
-    }
-    if ( "${comp_test_tag.trim()}" == "${base_deploy_tag.trim()}") {
-      comp_deploy_tag = comp_test_tag
-      comp_test_tag = "master"
-    }
-    println "${component}: deploy_tag: ${comp_deploy_tag}, test_tag: ${comp_test_tag}"
-    return [comp_deploy_tag, comp_test_tag]
-}
-
-def test_software_upgrade(name) {
-  def infraNamespace = "infra"
-  def volthaNamespace = "voltha"
-  def openolt_adapter_deploy_tag = ""
-  def openolt_adapter_test_tag = ""
-  def openonu_adapter_deploy_tag = ""
-  def openonu_adapter_test_tag = ""
-  def rw_core_deploy_tag = ""
-  def rw_core_test_tag = ""
-  def ofagent_deploy_tag = ""
-  def ofagent_test_tag = ""
-  def logsDir = "$WORKSPACE/${name}"
-  stage('Deploy Voltha - '+ name) {
-    timeout(10) {
-      // start logging
-      sh """
-      rm -rf ${logsDir} || true
-      mkdir -p ${logsDir}
-      _TAG=kail-${name} kail -n ${infraNamespace} -n ${volthaNamespace} > ${logsDir}/onos-voltha-startup-combined.log &
-      """
-      def extraHelmFlags = extraHelmFlags.trim()
-      if ("${name}" == "onos-app-upgrade" || "${name}" == "onu-software-upgrade" || "${name}" == "onu-software-upgrade-omci-extended-msg" || "${name}" == "voltha-component-upgrade" || "${name}" == "voltha-component-rolling-upgrade") {
-          extraHelmFlags = " --set global.log_level=${logLevel.toUpperCase()},onu=1,pon=1 --set onos-classic.replicas=3,onos-classic.atomix.replicas=3 " + extraHelmFlags
-      }
-      if ("${name}" == "onu-software-upgrade" || "${name}" == "onu-software-upgrade-omci-extended-msg") {
-          extraHelmFlags = " --set global.extended_omci_support.enabled=true " + extraHelmFlags
-      }
-      if ("${name}" == "onu-software-upgrade-omci-extended-msg") {
-          extraHelmFlags = " --set omccVersion=180 " + extraHelmFlags
-      }
-      if ("${name}" == "onu-image-dwl-simultaneously") {
-          extraHelmFlags = " --set global.log_level=${logLevel.toUpperCase()},onu=2,pon=2 --set onos-classic.replicas=3,onos-classic.atomix.replicas=3 " + extraHelmFlags
-      }
-      if ("${name}" == "onos-app-upgrade" || "${name}" == "onu-software-upgrade" || "${name}" == "onu-software-upgrade-omci-extended-msg" || "${name}" == "onu-image-dwl-simultaneously") {
-          extraHelmFlags = " --set global.image_tag=master --set onos-classic.image.tag=master " + extraHelmFlags
-      }
-      if ("${name}" == "voltha-component-upgrade" || "${name}" == "voltha-component-rolling-upgrade") {
-          extraHelmFlags = " --set images.onos_config_loader.tag=master-onos-config-loader --set onos-classic.image.tag=master " + extraHelmFlags
-      }
-      extraHelmFlags = extraHelmFlags + " --set onos-classic.onosSshPort=30115 --set onos-classic.onosApiPort=30120 "
-      extraHelmFlags = extraHelmFlags + " --set voltha.onos_classic.replicas=3"
-      //ONOS custom image handling
-      if ( onosImg.trim() != '' ) {
-         String[] split;
-         onosImg = onosImg.trim()
-         split = onosImg.split(':')
-        extraHelmFlags = extraHelmFlags + " --set onos-classic.image.repository=" + split[0] +",onos-classic.image.tag=" + split[1] + " "
-      }
-      Integer olts = 1
-      if ("${name}" == "onu-image-dwl-simultaneously") {
-          olts = 2
-      }
-      if ("${name}" == "voltha-component-upgrade" || "${name}" == "voltha-component-rolling-upgrade") {
-        // fetch voltha components versions/tags
-        (openolt_adapter_deploy_tag, openolt_adapter_test_tag) = get_voltha_comp_versions("voltha-openolt-adapter", openoltAdapterDeployBaseTag.trim())
-        extraHelmFlags = extraHelmFlags + " --set voltha-adapter-openolt.images.adapter_open_olt.tag=${openolt_adapter_deploy_tag} "
-        (openonu_adapter_deploy_tag, openonu_adapter_test_tag) = get_voltha_comp_versions("voltha-openonu-adapter-go", openonuAdapterDeployBaseTag.trim())
-        extraHelmFlags = extraHelmFlags + " --set voltha-adapter-openonu.images.adapter_open_onu_go.tag=${openonu_adapter_deploy_tag} "
-        (rw_core_deploy_tag, rw_core_test_tag) = get_voltha_comp_versions("voltha-go", rwCoreDeployBaseTag.trim())
-        extraHelmFlags = extraHelmFlags + " --set voltha.images.rw_core.tag=${rw_core_deploy_tag} "
-        (ofagent_deploy_tag, ofagent_test_tag) = get_voltha_comp_versions("ofagent-go", ofagentDeployBaseTag.trim())
-        extraHelmFlags = extraHelmFlags + " --set voltha.images.ofagent.tag=${ofagent_deploy_tag} "
-      }
-      def localCharts = false
-      // Currently only testing with ATT workflow
-      // TODO: Support for other workflows
-      volthaDeploy([bbsimReplica: olts.toInteger(), workflow: "att", extraHelmFlags: extraHelmFlags, localCharts: localCharts])
-      // stop logging
-      sh """
-        P_IDS="\$(ps e -ww -A | grep "_TAG=kail-${name}" | grep -v grep | awk '{print \$1}')"
-        if [ -n "\$P_IDS" ]; then
-          echo \$P_IDS
-          for P_ID in \$P_IDS; do
-            kill -9 \$P_ID
-          done
-        fi
-        cd ${logsDir}
-        gzip -k onos-voltha-startup-combined.log
-        rm onos-voltha-startup-combined.log
-      """
-      // forward ONOS and VOLTHA ports
-      sh """
-      JENKINS_NODE_COOKIE="dontKillMe" _TAG=onos-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/voltha-infra-onos-classic-hs 8101:8101; done 2>&1 " &
-      JENKINS_NODE_COOKIE="dontKillMe" _TAG=onos-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/voltha-infra-onos-classic-hs 8181:8181; done 2>&1 " &
-      JENKINS_NODE_COOKIE="dontKillMe" _TAG=port-forward-voltha-api /bin/bash -c "while true; do kubectl -n voltha port-forward --address 0.0.0.0 service/voltha-voltha-api 55555:55555; done 2>&1 " &
-      """
-      sh """
-      sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 log:set DEBUG org.opencord
-      """
-    }
-  }
-  stage('Test - '+ name) {
-    timeout(75) {
-      sh """
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/${name}"
-        mkdir -p \$ROBOT_LOGS_DIR
-        if [[ ${name} == 'onos-app-upgrade' ]]; then
-          export ONOS_APPS_UNDER_TEST+=''
-          if [ ${aaaVer.trim()} != '' ] && [ ${aaaOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.aaa,${aaaVer.trim()},${aaaOarUrl.trim()}*"
-          fi
-          if [ ${oltVer.trim()} != '' ] && [ ${oltOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.olt,${oltVer.trim()},${oltOarUrl.trim()}*"
-          fi
-          if [ ${dhcpl2relayVer.trim()} != '' ] && [ ${dhcpl2relayOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.dhcpl2relay,${dhcpl2relayVer.trim()},${dhcpl2relayOarUrl.trim()}*"
-          fi
-          if [ ${igmpproxyVer.trim()} != '' ] && [ ${igmpproxyOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.igmpproxy,${igmpproxyVer.trim()},${igmpproxyOarUrl.trim()}*"
-          fi
-          if [ ${sadisVer.trim()} != '' ] && [ ${sadisOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.sadis,${sadisVer.trim()},${sadisOarUrl.trim()}*"
-          fi
-          if [ ${mcastVer.trim()} != '' ] && [ ${mcastOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.mcast,${mcastVer.trim()},${mcastOarUrl.trim()}*"
-          fi
-          if [ ${kafkaVer.trim()} != '' ] && [ ${kafkaOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.kafka,${kafkaVer.trim()},${kafkaOarUrl.trim()}*"
-          fi
-          export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v onos_apps_under_test:\$ONOS_APPS_UNDER_TEST -e PowerSwitch"
-          export TARGET=onos-app-upgrade-test
-        fi
-        if [ ${name} == 'voltha-component-upgrade' ] || [ ${name} == 'voltha-component-rolling-upgrade' ]; then
-          export VOLTHA_COMPS_UNDER_TEST+=''
-          VOLTHA_COMPS_UNDER_TEST+="adapter-open-olt,adapter-open-olt,voltha/voltha-openolt-adapter:${openolt_adapter_test_tag}*"
-          VOLTHA_COMPS_UNDER_TEST+="adapter-open-onu,adapter-open-onu,voltha/voltha-openonu-adapter-go:${openonu_adapter_test_tag}*"
-          VOLTHA_COMPS_UNDER_TEST+="rw-core,voltha,voltha/voltha-rw-core:${rw_core_test_tag}*"
-          VOLTHA_COMPS_UNDER_TEST+="ofagent,ofagent,voltha/voltha-ofagent-go:${ofagent_test_tag}*"
-          export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v voltha_comps_under_test:\$VOLTHA_COMPS_UNDER_TEST -e PowerSwitch"
-        fi
-        if [[ ${name} == 'voltha-component-upgrade' ]]; then
-          export TARGET=voltha-comp-upgrade-test
-        fi
-        if [[ ${name} == 'voltha-component-rolling-upgrade' ]]; then
-          export TARGET=voltha-comp-rolling-upgrade-test
-        fi
-        if [ ${name} == 'onu-software-upgrade' ] || [ ${name} == 'onu-software-upgrade-omci-extended-msg' ]; then
-          export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v image_version:${onuImageVersion.trim()} -v image_url:${onuImageUrl.trim()} -v image_vendor:${onuImageVendor.trim()} -v image_activate_on_success:${onuImageActivateOnSuccess.trim()} -v image_commit_on_success:${onuImageCommitOnSuccess.trim()} -v image_crc:${onuImageCrc.trim()} -e PowerSwitch"
-          export TARGET=onu-upgrade-test
-        fi
-        if [[ ${name} == 'onu-image-dwl-simultaneously' ]]; then
-          export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v image_version:${onuImageVersion.trim()} -v image_url:${onuImageUrl.trim()} -v image_vendor:${onuImageVendor.trim()} -v image_activate_on_success:${onuImageActivateOnSuccess.trim()} -v image_commit_on_success:${onuImageCommitOnSuccess.trim()} -v image_crc:${onuImageCrc.trim()} -e PowerSwitch"
-          export TARGET=onu-upgrade-test-multiolt-kind-att
-        fi
-        testLogging='False'
-        if [ ${logging} = true ]; then
-          testLogging='True'
-        fi
-        export VOLTCONFIG=$HOME/.volt/config-minimal
-        export KUBECONFIG=$HOME/.kube/kind-config-voltha-minimal
-        ROBOT_MISC_ARGS+=" -v ONOS_SSH_PORT:30115 -v ONOS_REST_PORT:30120 -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace} -v container_log_dir:${logsDir} -v logging:\$testLogging"
-        # Run the specified tests
-        make -C $WORKSPACE/voltha-system-tests \$TARGET || true
-      """
-      // remove port-forwarding
-      sh """
-        # remove orphaned port-forward from different namespaces
-        ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
-      """
-      // collect pod details
-      get_pods_info("$WORKSPACE/${name}")
-      sh """
-        set +e
-        # collect logs collected in the Robot Framework StartLogging keyword
-        cd ${logsDir}
-        gzip *-combined.log || true
-        rm *-combined.log || true
-      """
-      helmTeardown(['infra', 'voltha'])
-    }
-  }
-}
-def get_pods_info(dest) {
-  // collect pod details, this is here in case of failure
-  sh """
-  mkdir -p ${dest} || true
-  kubectl get pods --all-namespaces -o wide > ${dest}/pods.txt || true
-  kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee ${dest}/pod-images.txt || true
-  kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee ${dest}/pod-imagesId.txt || true
-  kubectl describe pods --all-namespaces -l app.kubernetes.io/part-of=voltha > ${dest}/voltha-pods-describe.txt
-  kubectl describe pods -n infra -l app=onos-classic > ${dest}/onos-pods-describe.txt
-  helm ls --all-namespaces > ${dest}/helm-charts.txt
-  """
-  sh '''
-  # copy the ONOS logs directly from the container to avoid the color codes
-  printf '%s\\n' $(kubectl get pods -n infra -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c 'kubectl -n infra cp #:apache-karaf-4.2.14/data/log/karaf.log ''' + dest + '''/#.log' || true
-  '''
-}
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: 220, unit: 'MINUTES')
-  }
-  environment {
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-    KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
-    SSHPASS="karaf"
-  }
-  stages{
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${branch}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage('Cleanup') {
-      steps {
-        // remove port-forwarding
-        sh """
-          # remove orphaned port-forward from different namespaces
-          ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
-        """
-        helmTeardown(['infra', 'voltha'])
-      }
-    }
-    stage('Create K8s Cluster') {
-      steps {
-        createKubernetesCluster([nodes: 3])
-      }
-    }
-    stage('Run Test') {
-      steps {
-        test_software_upgrade("onos-app-upgrade")
-        test_software_upgrade("voltha-component-upgrade")
-        test_software_upgrade("voltha-component-rolling-upgrade")
-        test_software_upgrade("onu-software-upgrade")
-        test_software_upgrade("onu-software-upgrade-omci-extended-msg")
-        test_software_upgrade("onu-image-dwl-simultaneously")
-      }
-    }
-  }
-  post {
-    aborted {
-      get_pods_info("$WORKSPACE/failed")
-    }
-    failure {
-      get_pods_info("$WORKSPACE/failed")
-    }
-    always {
-      step([$class: 'RobotPublisher',
-         disableArchiveOutput: false,
-         logFileName: 'RobotLogs/*/log*.html',
-         otherFiles: '',
-         outputFileName: 'RobotLogs/*/output*.xml',
-         outputPath: '.',
-         passThreshold: 100,
-         reportFileName: 'RobotLogs/*/report*.html',
-         unstableThreshold: 0,
-         onlyCritical: true]);
-      archiveArtifacts artifacts: '*.log,**/*.log,**/*.gz,*.gz,*.txt,**/*.txt'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.11/tucson-build-and-test.groovy b/jjb/pipeline/voltha/voltha-2.11/tucson-build-and-test.groovy
deleted file mode 100644
index 81b26ab..0000000
--- a/jjb/pipeline/voltha/voltha-2.11/tucson-build-and-test.groovy
+++ /dev/null
@@ -1,367 +0,0 @@
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// used to deploy VOLTHA and configure ONOS physical PODs
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-def clusterName = "kind-ci"
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: 120, unit: 'MINUTES')
-  }
-  environment {
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-    KUBECONFIG="$HOME/.kube/kind-${clusterName}"
-    VOLTCONFIG="$HOME/.volt/config"
-    LOG_FOLDER="$WORKSPACE/${workflow}/"
-    APPS_TO_LOG="etcd kafka onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server onos-config-loader"
-
-  }
-  stages{
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${branch}",
-          gerritProject: "${gerritProject}",
-          gerritRefspec: "${gerritRefspec}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage ("Parse deployment configuration file") {
-      steps {
-        sh returnStdout: true, script: "rm -rf ${configBaseDir}"
-        sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-
-          if (params.workflow.toUpperCase() == "TT") {
-            error("The Tucson POD does not support TT workflow at the moment")
-          }
-
-          if ( params.workflow.toUpperCase() == "DT" ) {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-          }
-          else if ( params.workflow.toUpperCase() == "TT" ) {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-          }
-          else {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-          }
-        }
-      }
-    }
-    stage('Clean up') {
-      steps {
-        timeout(15) {
-          script {
-            helmTeardown(["default", infraNamespace, volthaNamespace])
-          }
-          timeout(1) {
-            sh returnStdout: false, script: '''
-            # remove orphaned port-forward from different namespaces
-            ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-            '''
-          }
-        }
-      }
-    }
-    stage('Build patch') {
-      steps {
-        // NOTE that the correct patch has already been checked out
-        // during the getVolthaCode step
-        buildVolthaComponent("${gerritProject}")
-      }
-    }
-    stage('Create K8s Cluster') {
-      steps {
-        script {
-          def clusterExists = sh returnStdout: true, script: """
-          kind get clusters | grep ${clusterName} | wc -l
-          """
-          if (clusterExists.trim() == "0") {
-            createKubernetesCluster([nodes: 3, name: clusterName])
-          }
-        }
-      }
-    }
-    stage('Load image in kind nodes') {
-      steps {
-        loadToKind()
-      }
-    }
-    stage('Install Voltha')  {
-      steps {
-        timeout(20) {
-          script {
-            imageFlags = getVolthaImageFlags(gerritProject)
-            // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
-            def localCharts = false
-            if (volthaHelmChartsChange != "" || gerritProject == "voltha-helm-charts") {
-              localCharts = true
-            }
-            def flags = "-f $WORKSPACE/${configBaseDir}/${configKubernetesDir}/voltha/${configFileName}.yml ${imageFlags} "
-            // NOTE temporary workaround expose ONOS node ports (pod-config needs to be updated to contain these values)
-            flags = flags + "--set onos-classic.onosSshPort=30115 " +
-            "--set onos-classic.onosApiPort=30120 " +
-            "--set onos-classic.onosOfPort=31653 " +
-            "--set onos-classic.individualOpenFlowNodePorts=true " + extraHelmFlags
-            volthaDeploy([
-              workflow: workFlow.toLowerCase(),
-              extraHelmFlags: flags,
-              localCharts: localCharts,
-              kubeconfig: "$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf",
-              onosReplica: 3,
-              atomixReplica: 3,
-              kafkaReplica: 3,
-              etcdReplica: 3,
-              ])
-          }
-          // start logging
-          sh """
-          rm -rf $WORKSPACE/${workFlow}/
-          mkdir -p $WORKSPACE/${workFlow}
-          _TAG=kail-${workFlow} kail -n infra -n voltha > $WORKSPACE/${workFlow}/onos-voltha-combined.log &
-          """
-          sh returnStdout: false, script: '''
-          # start logging with kail
-
-          mkdir -p $LOG_FOLDER
-
-          list=($APPS_TO_LOG)
-          for app in "${list[@]}"
-          do
-            echo "Starting logs for: ${app}"
-            _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
-          done
-          '''
-          sh """
-          JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd 2379:2379; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
-          ps aux | grep port-forward
-          """
-          getPodsInfo("$WORKSPACE")
-        }
-      }
-    }
-    stage('Deploy Kafka Dump Chart') {
-      steps {
-        script {
-          sh returnStdout: false, script: """
-              helm repo add cord https://charts.opencord.org
-              helm repo update
-              if helm version -c --short|grep v2 -q; then
-                helm install -n voltha-kafka-dump cord/voltha-kafka-dump
-              else
-                helm install voltha-kafka-dump cord/voltha-kafka-dump
-              fi
-          """
-        }
-      }
-    }
-    stage('Push Tech-Profile') {
-      when {
-        expression { params.profile != "Default" }
-      }
-      steps {
-        sh returnStdout: false, script: """
-        etcd_container=\$(kubectl get pods -n voltha | grep voltha-etcd-cluster | awk 'NR==1{print \$1}')
-        kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-${profile}.json voltha/\$etcd_container:/tmp/flexpod.json
-        kubectl exec -it \$etcd_container -n voltha -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/XGS-PON/64'
-        """
-      }
-    }
-
-    stage('Push Sadis-config') {
-      steps {
-        sh returnStdout: false, script: """
-        ssh-keygen -R [${deployment_config.nodes[0].ip}]:30115
-        ssh-keyscan -p 30115 -H ${deployment_config.nodes[0].ip} >> ~/.ssh/known_hosts
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.dhcpl2relay"
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.aaa"
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.olt"
-        #TRACE in the pipeliner is too chatty, moving to DEBUG
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.opencord.olt.driver"
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.FlowObjectiveManager"
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager"
-
-        if [[ "${workFlow.toUpperCase()}" == "DT" ]]; then
-          curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-DT.json
-        elif [[ "${workFlow.toUpperCase()}" == "TT" ]]; then
-          curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-TT.json
-        else
-          # this is the ATT case, rename the file in *-sadis-ATT.json so that we can avoid special cases and just load the file
-          curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis.json
-        fi
-        """
-      }
-    }
-    stage('Reinstall OLT software') {
-      when {
-        expression { params.reinstallOlt }
-      }
-      steps {
-        script {
-          deployment_config.olts.each { olt ->
-            sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --remove asfvolt16 && dpkg --purge asfvolt16'"
-            waitUntil {
-              olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
-              return olt_sw_present.toInteger() == 0
-            }
-            if ( params.branch == 'voltha-2.3' ) {
-              oltDebVersion = oltDebVersionVoltha23
-            } else {
-              oltDebVersion = oltDebVersionMaster
-            }
-            sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --install ${oltDebVersion}'"
-            waitUntil {
-              olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
-              return olt_sw_present.toInteger() == 1
-            }
-            if ( olt.fortygig ) {
-              // If the OLT is connected to a 40G switch interface, set the NNI port to be downgraded
-              sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'echo port ce128 sp=40000 >> /broadcom/qax.soc ; /opt/bcm68620/svk_init.sh'"
-            }
-          }
-        }
-      }
-    }
-
-    stage('Restart OLT processes') {
-      steps {
-        script {
-          deployment_config.olts.each { olt ->
-            sh returnStdout: false, script: """
-            ssh-keyscan -H ${olt.ip} >> ~/.ssh/known_hosts
-            sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; reboot'
-            sleep 120
-            """
-            waitUntil {
-              onu_discovered = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'grep \"onu discover indication\" /var/log/openolt.log | wc -l'"
-              return onu_discovered.toInteger() > 0
-            }
-          }
-        }
-      }
-    }
-    stage('Run E2E Tests') {
-      steps {
-        script {
-          // different workflows need different make targets and different robot files
-          if ( params.workflow.toUpperCase() == "DT" ) {
-            robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-            robotFile = "Voltha_DT_PODTests.robot"
-            makeTarget = "voltha-dt-test"
-            robotFunctionalKeyword = "-i functionalDt"
-            robotDataplaneKeyword = "-i dataplaneDt"
-          }
-          else if ( params.workflow.toUpperCase() == "TT" ) {
-            // TODO the TT tests have diffent tags, address once/if TT is support on the Tucson POD
-            robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-            robotFile = "Voltha_TT_PODTests.robot"
-            makeTarget = "voltha-tt-test"
-            robotFunctionalKeyword = "-i functionalTt"
-            robotDataplaneKeyword = "-i dataplaneTt"
-          }
-          else {
-            robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-            robotFile = "Voltha_PODTests.robot"
-            makeTarget = "voltha-test"
-            robotFunctionalKeyword = "-i functional"
-            robotDataplaneKeyword = "-i dataplane"
-          }
-        }
-        sh returnStdout: false, script: """
-        mkdir -p $WORKSPACE/RobotLogs
-
-        export ROBOT_CONFIG_FILE="$WORKSPACE/${robotConfigFile}"
-        export ROBOT_MISC_ARGS="${params.extraRobotArgs} --removekeywords wuks -d $WORKSPACE/RobotLogs -v container_log_dir:$WORKSPACE "
-        export ROBOT_FILE="${robotFile}"
-
-        # If the Gerrit comment contains a line with "functional tests" then run the full
-        # functional test suite.  This covers tests tagged either 'sanity' or 'functional'.
-        # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
-        REGEX="functional tests"
-        if [[ "${gerritComment}" =~ \$REGEX ]]; then
-          ROBOT_MISC_ARGS+="${robotFunctionalKeyword} "
-        fi
-        # Likewise for dataplane tests
-        REGEX="dataplane tests"
-        if [[ "${gerritComment}" =~ \$REGEX ]]; then
-          ROBOT_MISC_ARGS+="${robotDataplaneKeyword}"
-        fi
-
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests ${makeTarget} || true
-        """
-      }
-    }
-  }
-  post {
-    always {
-      // stop logging
-      sh """
-        P_IDS="\$(ps e -ww -A | grep "_TAG=kail-${workFlow}" | grep -v grep | awk '{print \$1}')"
-        if [ -n "\$P_IDS" ]; then
-          echo \$P_IDS
-          for P_ID in \$P_IDS; do
-            kill -9 \$P_ID
-          done
-        fi
-        gzip $WORKSPACE/${workFlow}/onos-voltha-combined.log || true
-      """
-      sh '''
-      # stop the kail processes
-      list=($APPS_TO_LOG)
-      for app in "${list[@]}"
-      do
-        echo "Stopping logs for: ${app}"
-        _TAG="kail-$app"
-        P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
-        if [ -n "$P_IDS" ]; then
-          echo $P_IDS
-          for P_ID in $P_IDS; do
-            kill -9 $P_ID
-          done
-        fi
-      done
-      '''
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: 'RobotLogs/log*.html',
-        otherFiles: '',
-        outputFileName: 'RobotLogs/output*.xml',
-        outputPath: '.',
-        passThreshold: 100,
-        reportFileName: 'RobotLogs/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true]);
-      archiveArtifacts artifacts: '**/*.txt,**/*.gz,*.gz,**/*.log'
-    }
-  }
-}
-
-// refs/changes/06/24206/5
diff --git a/jjb/pipeline/voltha/voltha-2.11/voltha-dt-physical-functional-tests.groovy b/jjb/pipeline/voltha/voltha-2.11/voltha-dt-physical-functional-tests.groovy
deleted file mode 100644
index 5a14eab..0000000
--- a/jjb/pipeline/voltha/voltha-2.11/voltha-dt-physical-functional-tests.groovy
+++ /dev/null
@@ -1,332 +0,0 @@
-#!/usr/bin/env groovy
-// -----------------------------------------------------------------------
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// -----------------------------------------------------------------------
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-node {
-  // Need this so that deployment_config has global scope when it's read later
-  deployment_config = null
-}
-
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: "${timeout}", unit: 'MINUTES')
-  }
-
-  environment {
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-    VOLTCONFIG="$HOME/.volt/config-minimal"
-    PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-  }
-
-  stages {
-    stage('Clone voltha-system-tests') {
-      steps {
-        step([$class: 'WsCleanup'])
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/voltha-system-tests",
-            refspec: "${volthaSystemTestsChange}"
-          ]],
-          branches: [[ name: "${branch}", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-        script {
-          sh(script:"""
-            if [ '${volthaSystemTestsChange}' != '' ] ; then
-              cd $WORKSPACE/voltha-system-tests;
-              git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
-            fi
-            """)
-        }
-      }
-    }
-    // This checkout allows us to show changes in Jenkins
-    // we only do this on master as we don't branch all the repos for all the releases
-    // (we should compute the difference by tracking the container version, not the code)
-    stage('Download All the VOLTHA repos') {
-      when {
-        expression {
-          return "${branch}" == 'master';
-        }
-      }
-      steps {
-       checkout(changelog: true,
-         poll: false,
-         scm: [$class: 'RepoScm',
-           manifestRepositoryUrl: "${params.manifestUrl}",
-           manifestBranch: "${params.branch}",
-           currentBranch: true,
-           destinationDir: 'voltha',
-           forceSync: true,
-           resetFirst: true,
-           quiet: true,
-           jobs: 4,
-           showAllChanges: true]
-         )
-      }
-    }
-    stage ('Initialize') {
-      steps {
-        sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-           deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        }
-        installVoltctl("${branch}")
-        sh """
-        ps -ef | grep port-forward
-        """
-
-        sh returnStdout: false, script: '''
-        # remove orphaned port-forward from different namespaces
-        ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-        '''
-        sh """
-        JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
-        JENKINS_NODE_COOKIE="dontKillMe" _TAG="etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd ${params.VolthaEtcdPort}:2379; done"&
-        JENKINS_NODE_COOKIE="dontKillMe" _TAG="kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
-        ps aux | grep port-forward
-        """
-
-        sh("""ps -ef | grep port-forward""")
-
-        sh(returnStdout: false, script: """
-        mkdir -p "$WORKSPACE/bin"
-
-        # install kail
-        make -C "$WORKSPACE/voltha-system-tests" KAIL_PATH="$WORKSPACE/bin" kail
-
-        # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
-        # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
-        # We should change this. In the meantime here is a workaround.
-           set +e
-
-        # Remove noise from voltha-core logs
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        # Remove noise from openolt logs
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        """)
-      }
-    }
-
-    stage('Functional Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/FunctionalTests"
-      }
-      steps {
-        sh """
-        ps -ef | grep port-forward
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.withFttb} = false ]; then
-          if ( ${powerSwitch} ); then
-               export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanityDt -i functionalDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-               if ( ${powerCycleOlt} ); then
-                    ROBOT_MISC_ARGS+=" -v power_cycle_olt:True"
-               fi
-          else
-               export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i sanityDt -i functionalDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          fi
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('FTTB Functional Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_FTTB_Tests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/FunctionalTests"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.withFttb} = true ]; then
-          export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i sanityDtFttb -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace} -v has_dataplane:False"
-          make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Failure/Recovery Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_FailureScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/FailureScenarios"
-      }
-      steps {
-        sh """
-        ps -ef | grep port-forward
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.withFttb} = false ]; then
-          if ( ${powerSwitch} ); then
-               export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalDt -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          else
-               export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalDt -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          fi
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Dataplane Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/DataplaneTests"
-      }
-      steps {
-        sh """
-        ps -ef | grep port-forward
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.withFttb} = false ]; then
-          export ROBOT_MISC_ARGS="--removekeywords wuks -i dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-    stage('HA Tests') {
-       environment {
-       ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-       ROBOT_FILE="Voltha_ONOSHATests.robot"
-       ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ONOSHAScenarios"
-      }
-      steps {
-        sh """
-        ps -ef | grep port-forward
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.withFttb} = false ]; then
-          export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v workflow:${params.workFlow} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Multiple OLT Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_MultiOLT_Tests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/MultipleOLTScenarios"
-      }
-      steps {
-        sh """
-        ps -ef | grep port-forward
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.withFttb} = false ]; then
-          if ( ${powerSwitch} ); then
-               export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalDt -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          else
-               export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalDt -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          fi
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-
-    stage('Error Scenario Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_ErrorScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/ErrorScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.withFttb} = false ]; then
-          export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v workflow:${params.workFlow} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        fi
-        """
-      }
-    }
-  }
-  post {
-    always {
-      getPodsInfo("$WORKSPACE/pods")
-      sh returnStdout: false, script: '''
-      set +e
-
-      # collect logs collected in the Robot Framework StartLogging keyword
-      cd $WORKSPACE
-      gzip *-combined.log || true
-      rm *-combined.log || true
-      '''
-      script {
-        deployment_config.olts.each { olt ->
-            if (olt.type == null || olt.type == "" || olt.type == "openolt") {
-              sh returnStdout: false, script: """
-              sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
-              sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log  # Remove escape sequences
-              sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
-              sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log  # Remove escape sequences
-              sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/startup.log $WORKSPACE/startup-${olt.sship}.log || true
-              sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/startup-${olt.sship}.log || true # Remove escape sequences
-              sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt_process_watchdog.log $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true
-              sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true # Remove escape sequences
-              """
-            }
-        }
-      }
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: '**/log*.html',
-        otherFiles: '',
-        outputFileName: '**/output*.xml',
-        outputPath: 'RobotLogs',
-        passThreshold: 100,
-        reportFileName: '**/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true
-        ]);
-      archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.11/voltha-physical-functional-tests.groovy b/jjb/pipeline/voltha/voltha-2.11/voltha-physical-functional-tests.groovy
deleted file mode 100644
index 8565148..0000000
--- a/jjb/pipeline/voltha/voltha-2.11/voltha-physical-functional-tests.groovy
+++ /dev/null
@@ -1,269 +0,0 @@
-#!/usr/bin/env groovy
-// -----------------------------------------------------------------------
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// -----------------------------------------------------------------------
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-
-node {
-  // Need this so that deployment_config has global scope when it's read later
-  deployment_config = null
-}
-
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: "${timeout}", unit: 'MINUTES')
-  }
-
-  environment {
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-    VOLTCONFIG="$HOME/.volt/config-minimal"
-    PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-  }
-  stages {
-    stage('Clone voltha-system-tests') {
-      steps {
-        step([$class: 'WsCleanup'])
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/voltha-system-tests",
-            refspec: "${volthaSystemTestsChange}"
-          ]],
-          branches: [[ name: "${branch}", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-        script {
-          sh(script:"""
-            if [ '${volthaSystemTestsChange}' != '' ] ; then
-              cd $WORKSPACE/voltha-system-tests;
-              git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
-            fi
-            """)
-        }
-      }
-    }
-    stage('Download All the VOLTHA repos') {
-      when {
-        expression {
-          return "${branch}" == 'master';
-        }
-      }
-      steps {
-       checkout(changelog: true,
-         poll: false,
-         scm: [$class: 'RepoScm',
-           manifestRepositoryUrl: "${params.manifestUrl}",
-           manifestBranch: "${params.branch}",
-           currentBranch: true,
-           destinationDir: 'voltha',
-           forceSync: true,
-           resetFirst: true,
-           quiet: true,
-           jobs: 4,
-           showAllChanges: true]
-         )
-      }
-    }
-    stage ('Initialize') {
-      steps {
-        sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-          deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        }
-	installVoltctl("${branch}")
-	
-	sh(returnStdout: false, script: """
-
-        mkdir -p "$WORKSPACE/bin"
-
-        # install kail
-        make -C "$WORKSPACE/voltha-system-tests" KAIL_PATH="$WORKSPACE/bin" kail
-
-        if [ "${params.branch}" == "master" ]; then
-        # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
-        # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
-        # We should change this. In the meantime here is a workaround.
-           set +e
-
-        # Remove noise from voltha-core logs
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        # Remove noise from openolt logs
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        fi
-        """)
-      }
-    }
-
-    stage('Functional Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        ROBOT_FILE="Voltha_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/FunctionalTests"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if ( ${powerSwitch} ); then
-             export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanity -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-             if ( ${powerCycleOlt} ); then
-                  ROBOT_MISC_ARGS+=" -v power_cycle_olt:True"
-             fi
-        else
-             export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i sanity -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        fi
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        """
-      }
-    }
-
-    stage('Failure/Recovery Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        ROBOT_FILE="Voltha_FailureScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/FailureScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if ( ${powerSwitch} ); then
-             export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        else
-             export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        fi
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        """
-      }
-    }
-
-    stage('Dataplane Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        ROBOT_FILE="Voltha_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/DataplaneTests"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        export ROBOT_MISC_ARGS="--removekeywords wuks -i dataplane -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        """
-      }
-    }
-    stage('HA Tests') {
-       environment {
-       ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-       ROBOT_FILE="Voltha_ONOSHATests.robot"
-       ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ONOSHAScenarios"
-      }
-      steps {
-       sh """
-       mkdir -p $ROBOT_LOGS_DIR
-       export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v workflow:${params.workFlow} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-       ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-       make -C $WORKSPACE/voltha-system-tests voltha-test || true
-       """
-      }
-    }
-
-    stage('Error Scenario Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        ROBOT_FILE="Voltha_ErrorScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ErrorScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        """
-      }
-    }
-  }
-  post {
-    always {
-      getPodsInfo("$WORKSPACE/pods")
-      sh returnStdout: false, script: '''
-      set +e
-
-      # collect logs collected in the Robot Framework StartLogging keyword
-      cd $WORKSPACE
-      gzip *-combined.log || true
-      rm *-combined.log || true
-
-      # store information on the running pods
-      kubectl get pods --all-namespaces -o wide > $WORKSPACE/pods.txt || true
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-images.txt || true
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-imagesId.txt || true
-
-
-      # collect ETCD cluster logs
-      mkdir -p $WORKSPACE/etcd
-      printf '%s\n' $(kubectl get pods -l app=etcd -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I% bash -c "kubectl logs % > $WORKSPACE/etcd/%.log"
-      '''
-      script {
-        deployment_config.olts.each { olt ->
-          if (olt.type == null || olt.type == "" || olt.type == "openolt") {
-            sh returnStdout: false, script: """
-            sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
-            sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log  # Remove escape sequences
-            sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
-            sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log  # Remove escape sequences
-            sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/startup.log $WORKSPACE/startup-${olt.sship}.log || true
-            sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/startup-${olt.sship}.log || true # Remove escape sequences
-            sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt_process_watchdog.log $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true
-            sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true # Remove escape sequences
-            """
-          }
-        }
-      }
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: '**/log*.html',
-        otherFiles: '',
-        outputFileName: '**/output*.xml',
-        outputPath: 'RobotLogs',
-        passThreshold: 100,
-        reportFileName: '**/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true
-        ]);
-      archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.11/voltha-physical-soak-dt-tests.groovy b/jjb/pipeline/voltha/voltha-2.11/voltha-physical-soak-dt-tests.groovy
deleted file mode 100644
index 6320cfb..0000000
--- a/jjb/pipeline/voltha/voltha-2.11/voltha-physical-soak-dt-tests.groovy
+++ /dev/null
@@ -1,258 +0,0 @@
-#!/usr/bin/env groovy
-// -----------------------------------------------------------------------
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// -----------------------------------------------------------------------
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-node {
-  // Need this so that deployment_config has global scope when it's read later
-  deployment_config = null
-}
-
-def volthaNamespace = "voltha"
-def infraNamespace = "infra"
-
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: "${timeout}", unit: 'MINUTES')
-  }
-
-  environment {
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-    VOLTCONFIG="$HOME/.volt/config-minimal"
-    PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-  }
-
-
-  stages {
-    stage('Clone voltha-system-tests') {
-      steps {
-        step([$class: 'WsCleanup'])
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/voltha-system-tests",
-            refspec: "${volthaSystemTestsChange}"
-          ]],
-          branches: [[ name: "${branch}", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-        script {
-          sh(script:"""
-            if [ '${volthaSystemTestsChange}' != '' ] ; then
-              cd $WORKSPACE/voltha-system-tests;
-              git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
-            fi
-            """)
-        }
-      }
-    }
-    // This checkout allows us to show changes in Jenkins
-    // we only do this on master as we don't branch all the repos for all the releases
-    // (we should compute the difference by tracking the container version, not the code)
-    stage('Download All the VOLTHA repos') {
-      when {
-        expression {
-          return "${branch}" == 'master';
-        }
-      }
-      steps {
-       checkout(changelog: true,
-         poll: false,
-         scm: [$class: 'RepoScm',
-           manifestRepositoryUrl: "${params.manifestUrl}",
-           manifestBranch: "${params.branch}",
-           currentBranch: true,
-           destinationDir: 'voltha',
-           forceSync: true,
-           resetFirst: true,
-           quiet: true,
-           jobs: 4,
-           showAllChanges: true]
-         )
-      }
-    }
-    stage ('Initialize') {
-      steps {
-        sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-           deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        }
-        installVoltctl("${branch}")
-
-	sh(returnStdout: false, script: """
-
-        mkdir -p "$WORKSPACE/bin"
-
-        # install kail
-        make -C "$WORKSPACE/voltha-system-tests" KAIL_PATH="$WORKSPACE/bin" kail
-
-        if [ "${params.branch}" == "master" ]; then
-        # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
-        # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
-        # We should change this. In the meantime here is a workaround.
-           set +e
-
-        # Remove noise from voltha-core logs
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        # Remove noise from openolt logs
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        fi
-        """)
-
-        sh("""
-        mkdir -p $WORKSPACE/voltha-pods-mem-consumption
-        cd $WORKSPACE/voltha-system-tests
-        make vst_venv
-        source ./vst_venv/bin/activate || true
-        # Collect initial memory consumption
-        python scripts/mem_consumption.py -o $WORKSPACE/voltha-pods-mem-consumption -a 0.0.0.0:31301 -n ${volthaNamespace} || true
-        """)
-      }
-    }
-
-    stage('Functional Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/FunctionalTests"
-      }
-      steps {
-        sh """
-        JENKINS_NODE_COOKIE="dontKillMe" _TAG="prometheus" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n cattle-prometheus svc/access-prometheus 31301:80; done"&
-        ps aux | grep port-forward
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ "${params.testType}" == "Functional" ]; then
-            if ( ${powerSwitch} ); then
-                 export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i soak -e dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-            else
-                 export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i soak -e dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-            fi
-            ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-            make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Failure/Recovery Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_FailureScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/FailureScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ "${params.testType}" == "Failure" ]; then
-           export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i soak -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-           ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-           make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Dataplane Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/DataplaneTests"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ "${params.testType}" == "Dataplane" ]; then
-           export ROBOT_MISC_ARGS="--removekeywords wuks -i soakDataplane -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-           ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-           make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-  }
-  post {
-    always {
-      getPodsInfo("$WORKSPACE/pods")
-      sh returnStdout: false, script: '''
-      set +e
-
-      # collect logs collected in the Robot Framework StartLogging keyword
-      cd $WORKSPACE
-      gzip *-combined.log || true
-      rm *-combined.log || true
-
-      # collect ETCD cluster logs
-      mkdir -p $WORKSPACE/etcd
-      printf '%s\n' $(kubectl get pods -l app=etcd -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I% bash -c "kubectl logs % > $WORKSPACE/etcd/%.log"
-      '''
-      script {
-        deployment_config.olts.each { olt ->
-          sh returnStdout: false, script: """
-          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
-          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log  # Remove escape sequences
-          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
-          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log  # Remove escape sequences
-          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/startup.log $WORKSPACE/startup-${olt.sship}.log || true
-          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/startup-${olt.sship}.log || true # Remove escape sequences
-          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt_process_watchdog.log $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true
-          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true # Remove escape sequences
-          """
-        }
-      }
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: '**/log*.html',
-        otherFiles: '',
-        outputFileName: '**/output*.xml',
-        outputPath: 'RobotLogs',
-        passThreshold: 100,
-        reportFileName: '**/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true
-        ]);
-      // get cpu usage by container
-      sh """
-      mkdir -p $WORKSPACE/plots || true
-      cd $WORKSPACE/voltha-system-tests
-      source ./vst_venv/bin/activate || true
-      sleep 60 # we have to wait for prometheus to collect all the information
-      python scripts/sizing.py -o $WORKSPACE/plots -a 0.0.0.0:31301 -n ${volthaNamespace} -s 3600 || true
-      # Collect memory consumption of voltha pods once all the tests are complete
-      python scripts/mem_consumption.py -o $WORKSPACE/voltha-pods-mem-consumption -a 0.0.0.0:31301 -n ${volthaNamespace} || true
-      """
-      archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt,plots/*,voltha-pods-mem-consumption/*'
-    }
-  }
-}
-
-// [EOF]
diff --git a/jjb/pipeline/voltha/voltha-2.11/voltha-scale-lwc-test.groovy b/jjb/pipeline/voltha/voltha-2.11/voltha-scale-lwc-test.groovy
deleted file mode 100644
index 84308ac..0000000
--- a/jjb/pipeline/voltha/voltha-2.11/voltha-scale-lwc-test.groovy
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// deploy VOLTHA and performs a scale test with the LWC controller
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-// [TODO] fix path, achilles heel for testing.
-def lwc_helm_chart_path="/home/jenkins/Radisys_LWC_helm_charts"
-def value_file="/home/jenkins/lwc-values.yaml"
-def workflow="dt"
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-      timeout(time: 60, unit: 'MINUTES')
-  }
-  environment {
-    JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
-    KUBECONFIG="$HOME/.kube/config"
-    VOLTCONFIG="$HOME/.volt/config"
-    SSHPASS="karaf"
-    VOLTHA_LOG_LEVEL="${logLevel}"
-    NUM_OF_BBSIM="${olts}"
-    NUM_OF_OPENONU="${openonuAdapterReplicas}"
-    NUM_OF_ONOS="${onosReplicas}"
-    NUM_OF_ATOMIX="${atomixReplicas}"
-    EXTRA_HELM_FLAGS=" "
-    LOG_FOLDER="$WORKSPACE/logs"
-    GERRIT_PROJECT="${GERRIT_PROJECT}"
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-  }
-
-  stages {
-    stage ('Cleanup') {
-      steps {
-        script {
-          try {
-            timeout(time: 5, unit: 'MINUTES') {
-              sh returnStdout: false, script: '''
-              cd $WORKSPACE
-              rm -rf $WORKSPACE/*
-              '''
-              // removing the voltha-infra chart first
-              // if we don't ONOS might get stuck because of all the events when BBSim goes down
-              sh returnStdout: false, script: '''
-              set +x
-              helm del -n infra voltha-infra || true
-              helm del voltha-infra || true
-              echo -ne "\nWaiting for ONOS to be removed..."
-              onos=$(kubectl get pod -n default -l app=onos-classic --no-headers | wc -l)
-              while [[ $onos != 0 ]]; do
-                onos=$(kubectl get pod -n default -l app=onos-classic --no-headers | wc -l)
-                sleep 5
-                echo -ne "."
-              done
-              '''
-            }
-          } catch(org.jenkinsci.plugins.workflow.steps.FlowInterruptedException e) {
-            // if we have a timeout in the Cleanup fase most likely ONOS got stuck somewhere, thuse force remove the pods
-            sh '''
-              kubectl get pods | grep Terminating | awk '{print $1}' | xargs kubectl delete pod --force --grace-period=0
-            '''
-          }
-          timeout(time: 10, unit: 'MINUTES') {
-            script {
-              helmTeardown(["default", "voltha1", "voltha-infra"])
-            }
-            sh returnStdout: false, script: '''
-              helm repo add onf https://charts.opencord.org
-              helm repo update
-
-              # remove all persistent volume claims
-              kubectl delete pvc --all-namespaces --all
-              PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-              while [[ \$PVCS != 0 ]]; do
-                sleep 5
-                PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-              done
-
-              # remove orphaned port-forward from different namespaces
-              ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-            '''
-          }
-        }
-      }
-    }
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${release}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage('Deploy Voltha') {
-      steps {
-        timeout(time: 5, unit: 'MINUTES') {
-          installVoltctl("${release}")
-          script {
-            startComponentsLogs([
-              appsToLog: [
-                'app.kubernetes.io/name=etcd',
-                'app.kubernetes.io/name=kafka',
-                'app=lwc',
-                'app=adapter-open-onu',
-                'app=adapter-open-olt',
-                'app=rw-core',
-                'app=bbsim',
-              ]
-            ])
-          }
-        }
-        timeout(time: 10, unit: 'MINUTES') {
-          sh """
-          cd /home/jenkins/Radisys_LWC_helm_charts
-
-          helm dep update ${lwc_helm_chart_path}/voltha-infra
-          helm upgrade --install --create-namespace -n infra voltha-infra ${lwc_helm_chart_path}/voltha-infra -f examples/${workflow}-values.yaml \
-            -f ${value_file} --wait
-
-          # helm dep update ${lwc_helm_chart_path}/voltha-stack
-          helm upgrade --install --create-namespace -n voltha1 voltha1 onf/voltha-stack \
-          --set voltha.ingress.enabled=true --set voltha.ingress.enableVirtualHosts=true --set voltha.fullHostnameOverride=voltha.scale1.dev \
-          -f ${value_file} --wait
-
-          helm upgrade --install -n voltha1 bbsim0 onf/bbsim --set olt_id=10 -f examples/${workflow}-values.yaml --set pon=${pons},onu=${onus} --version 4.6.0 --set oltRebootDelay=5 --wait
-          """
-        }
-      }
-    }
-    stage('Load MIB Template') {
-      when {
-        expression {
-          return params.withMibTemplate
-        }
-      }
-      steps {
-        sh """
-        # load MIB template
-        wget ${mibTemplateUrl} -O mibTemplate.json
-        cat mibTemplate.json | kubectl exec -it -n infra \$(kubectl get pods -n infra |grep etcd-0 | awk 'NR==1{print \$1}') -- etcdctl put service/voltha/omci_mibs/go_templates/BBSM/12345123451234512345/BBSM_IMG_00001
-        """
-      }
-    }
-    stage('Run Test') {
-      steps {
-        sh """
-          mkdir -p $WORKSPACE/RobotLogs
-          cd $WORKSPACE/voltha-system-tests
-          make vst_venv
-
-          daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward -n infra svc/lwc 8182:8181 --address 0.0.0.0
-          daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward -n voltha1 svc/voltha1-voltha-api 55555 --address 0.0.0.0
-
-          source ./vst_venv/bin/activate
-          robot -d $WORKSPACE/RobotLogs \
-          --exitonfailure \
-          -v pon:${pons} -v onu:${onus} \
-          tests/scale/Voltha_Scale_Tests_lwc.robot
-
-          python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time.txt || true
-          cat $WORKSPACE/execution-time.txt
-        """
-      }
-    }
-  }
-  post {
-    always {
-      stopComponentsLogs()
-      script {
-        try {
-          step([$class: 'RobotPublisher',
-            disableArchiveOutput: false,
-            logFileName: '**/log*.html',
-            otherFiles: '',
-            outputFileName: '**/output*.xml',
-            outputPath: 'RobotLogs',
-            passThreshold: 100,
-            reportFileName: '**/report*.html',
-            onlyCritical: true,
-            unstableThreshold: 0]);
-        } catch (Exception e) {
-            println "Cannot archive Robot Logs: ${e.toString()}"
-        }
-      }
-      plot([
-        csvFileName: 'scale-test.csv',
-        csvSeries: [
-          [file: 'plots/plot-lwc-olts.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-lwc-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-lwc-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-lwc-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-        ],
-        group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus}, UNIs: ${unis})", yaxis: 'Time (s)', useDescr: true
-      ])
-      getPodsInfo("$LOG_FOLDER")
-      archiveArtifacts artifacts: 'execution-time.txt,logs/*,logs/pprof/*,RobotLogs/**/*,plots/*,etcd-metrics/*'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.11/voltha-scale-multi-stack.groovy b/jjb/pipeline/voltha/voltha-2.11/voltha-scale-multi-stack.groovy
deleted file mode 100644
index 8420da0..0000000
--- a/jjb/pipeline/voltha/voltha-2.11/voltha-scale-multi-stack.groovy
+++ /dev/null
@@ -1,468 +0,0 @@
-// Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// deploy VOLTHA using kind-voltha and performs a scale test
-
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-      timeout(time: 120, unit: 'MINUTES')
-  }
-  environment {
-    JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
-    KUBECONFIG="$HOME/.kube/config"
-    SSHPASS="karaf"
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-
-    LOG_FOLDER="$WORKSPACE/logs"
-  }
-
-  stages {
-    stage ('Cleanup') {
-      steps {
-        timeout(time: 11, unit: 'MINUTES') {
-          script {
-            def namespaces = ["infra"]
-            // FIXME we may have leftovers from more VOLTHA stacks (eg: run1 had 10 stacks, run2 had 2 stacks)
-            volthaStacks.toInteger().times {
-              namespaces += "voltha${it + 1}"
-            }
-            helmTeardown(namespaces)
-          }
-          sh returnStdout: false, script: '''
-            helm repo add onf https://charts.opencord.org
-            helm repo update
-
-            # remove all persistent volume claims
-            kubectl delete pvc --all-namespaces --all
-            PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-            while [[ \$PVCS != 0 ]]; do
-              sleep 5
-              PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-            done
-
-            # remove orphaned port-forward from different namespaces
-            ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-
-            cd $WORKSPACE
-            rm -rf $WORKSPACE/*
-          '''
-        }
-      }
-    }
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${release}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage('Deploy common infrastructure') {
-      // includes monitoring
-      steps {
-        sh '''
-        if [ ${withMonitoring} = true ] ; then
-          helm install -n infra nem-monitoring cord/nem-monitoring \
-          -f $HOME/voltha-scale/grafana.yaml \
-          --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
-          --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
-        fi
-        '''
-      }
-    }
-    stage('Start logging') {
-      steps {
-        script {
-          startComponentsLogs([
-            appsToLog: [
-              'app.kubernetes.io/name=etcd',
-              'app.kubernetes.io/name=kafka',
-              'app=onos-classic',
-              'app=adapter-open-onu',
-              'app=adapter-open-olt',
-              'app=rw-core',
-              'app=ofagent',
-              'app=bbsim',
-              'app=radius',
-              'app=bbsim-sadis-server',
-              'app=onos-config-loader',
-            ]
-          ])
-        }
-      }
-    }
-    stage('Deploy VOLTHA infrastructure') {
-      steps {
-        timeout(time: 5, unit: 'MINUTES') {
-          script {
-            def localCharts = false
-            if (volthaHelmChartsChange != "" || release != "master") {
-              localCharts = true
-            }
-
-            def infraHelmFlags =
-                "--set global.log_level=${logLevel} " +
-                "--set radius.enabled=${withEapol} " +
-                "--set onos-classic.onosSshPort=30115 " +
-                "--set onos-classic.onosApiPort=30120 " +
-                params.extraHelmFlags
-
-            volthaInfraDeploy([
-              workflow: workflow,
-              infraNamespace: "infra",
-              extraHelmFlags: infraHelmFlags,
-              localCharts: localCharts,
-              onosReplica: onosReplicas,
-              atomixReplica: atomixReplicas,
-              kafkaReplica: kafkaReplicas,
-              etcdReplica: etcdReplicas,
-            ])
-          }
-        }
-      }
-    }
-    stage('Deploy Voltha') {
-      steps {
-        installVoltctl("${release}")
-        deploy_voltha_stacks(params.volthaStacks)
-      }
-    }
-    stage('Configuration') {
-      steps {
-        script {
-          sh returnStdout: false, script: """
-
-          # forward ETCD port
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG=etcd-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/etcd 9999:2379; done 2>&1 " &
-
-          # forward ONOS ports
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG=onos-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/voltha-infra-onos-classic-hs 8101:8101; done 2>&1 " &
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG=onos-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/voltha-infra-onos-classic-hs 8181:8181; done 2>&1 " &
-
-          # make sure the the port-forward has started before moving forward
-          sleep 5
-          """
-          sh returnStdout: false, script: """
-          # TODO this needs to be repeated per stack
-          # kubectl exec \$(kubectl get pods | grep -E "bbsim[0-9]" | awk 'NR==1{print \$1}') -- bbsimctl log ${logLevel.toLowerCase()} false
-
-          #Setting link discovery
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 900
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.opencord.olt.impl.Olt provisionDelay 1000
-
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.onosproject
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.opencord
-
-          # Set Flows/Ports/Meters poll frequency
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
-
-          #SR is not needed in scale tests and not currently used by operators in production, can be disabled.
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.onosproject.segmentrouting
-
-
-          if [ ${withFlows} = false ]; then
-            sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.olt
-          fi
-          """
-        }
-      }
-    }
-    stage('Setup Test') {
-      steps {
-        sh '''
-          mkdir -p $WORKSPACE/RobotLogs
-          cd $WORKSPACE/voltha-system-tests
-          make vst_venv
-        '''
-      }
-    }
-    stage('Run Test') {
-      steps {
-        test_voltha_stacks(params.volthaStacks)
-      }
-    }
-  }
-  post {
-    always {
-      stopComponentsLogs([compress: true])
-      // collect result, done in the "post" step so it's executed even in the
-      // event of a timeout in the tests
-      plot([
-        csvFileName: 'scale-test.csv',
-        csvSeries: [
-          [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-        ],
-        group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (Stacks: ${params.volthaStacks}, OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus})", yaxis: 'Time (s)', useDescr: true
-      ])
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: 'RobotLogs/**/log.html',
-        otherFiles: '',
-        outputFileName: 'RobotLogs/**/output.xml',
-        outputPath: '.',
-        passThreshold: 100,
-        reportFileName: 'RobotLogs/**/report.html',
-        onlyCritical: true,
-        unstableThreshold: 0]);
-      // get all the logs from kubernetes PODs
-      sh returnStdout: false, script: '''
-
-        # store information on running charts
-        helm ls --all-namespaces > $LOG_FOLDER/helm-list.txt || true
-
-        # store information on the running pods
-        kubectl get pods --all-namespaces -o wide > $LOG_FOLDER/pods.txt || true
-        kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
-        kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
-
-        # copy the ONOS logs directly from the container to avoid the color codes
-        printf '%s\n' $(kubectl get pods -n infra -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp -n infra #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
-
-      '''
-      // dump all the BBSim(s) ONU information
-      script {
-        for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
-          stack_ns="voltha"+i
-          sh """
-          mkdir -p \$LOG_FOLDER/${stack_ns}
-          BBSIM_IDS=\$(kubectl -n ${stack_ns} get pods | grep bbsim | grep -v server | awk '{print \$1}')
-          IDS=(\$BBSIM_IDS)
-
-          for bbsim in "\${IDS[@]}"
-          do
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl onu list > \$LOG_FOLDER/${stack_ns}/\$bbsim-device-list.txt || true
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl service list > \$LOG_FOLDER/${stack_ns}/\$bbsim-service-list.txt || true
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl olt resources GEM_PORT > \$LOG_FOLDER/${stack_ns}/\$bbsim-flows-gem-ports.txt || true
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl olt resources ALLOC_ID > \$LOG_FOLDER/${stack_ns}/\$bbsim-flows-alloc-ids.txt || true
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl olt pons > \$LOG_FOLDER/${stack_ns}/\$bbsim-pon-resources.txt || true
-          done
-          """
-        }
-      }
-      // get ONOS debug infos
-      sh '''
-
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt || true
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt || true
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt || true
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt || true
-
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 netcfg > $LOG_FOLDER/onos-netcfg.txt || true
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg get > $LOG_FOLDER/onos-cfg.txt || true
-
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt || true
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt || true
-
-        if [ ${withFlows} = true ] ; then
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt || true
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-port-status > $LOG_FOLDER/onos-volt-port-status.txt || true
-        fi
-
-        if [ ${provisionSubscribers} = true ]; then
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt || true
-        fi
-
-        if [ ${withEapol} = true ] ; then
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt || true
-        fi
-
-        if [ ${withDhcp} = true ] ; then
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt || true
-        fi
-      '''
-      // collect etcd metrics
-      sh '''
-        mkdir -p $WORKSPACE/etcd-metrics
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-backend-write-time.json || true
-      '''
-      // get VOLTHA debug infos
-      script {
-        for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
-          stack_ns="voltha"+i
-          voltcfg="~/.volt/config-voltha"+i
-          try {
-            sh """
-
-            # _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555& > /dev/null 2>&1
-            _TAG="voltha-port-forward" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555 > /dev/null 2>&1; done"&
-
-            voltctl -m 32MB device list -o json > $LOG_FOLDER/${stack_ns}/device-list.json || true
-            python -m json.tool $LOG_FOLDER/${stack_ns}/device-list.json > $LOG_FOLDER/${stack_ns}/voltha-devices-list.json || true
-            rm $LOG_FOLDER/${stack_ns}/device-list.json || true
-            voltctl -m 32MB device list > $LOG_FOLDER/${stack_ns}/voltha-devices-list.txt || true
-
-            DEVICE_LIST=
-            printf '%s\n' \$(voltctl -m 32MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB device flows # > $LOG_FOLDER/${stack_ns}/voltha-device-flows-#.txt" || true
-            printf '%s\n' \$(voltctl -m 32MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/${stack_ns}/voltha-device-ports-#.txt" || true
-
-            printf '%s\n' \$(voltctl -m 32MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB logicaldevice flows # > $LOG_FOLDER/${stack_ns}/voltha-logicaldevice-flows-#.txt" || true
-            printf '%s\n' \$(voltctl -m 32MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB logicaldevice port list # > $LOG_FOLDER/${stack_ns}/voltha-logicaldevice-ports-#.txt" || true
-
-            # remove VOLTHA port-forward
-            ps aux | grep port-forw | grep voltha-api | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
-            """
-          } catch(e) {
-            println e
-            sh '''
-            echo "Can't get device list from voltctl"
-            '''
-          }
-        }
-      }
-      // get cpu usage by container
-      sh '''
-      if [ ${withMonitoring} = true ] ; then
-        cd $WORKSPACE/voltha-system-tests
-        source ./vst_venv/bin/activate
-        sleep 60 # we have to wait for prometheus to collect all the information
-        python scripts/sizing.py -o $WORKSPACE/plots || true
-      fi
-      '''
-      archiveArtifacts artifacts: 'kind-voltha/install-*.log,execution-time-*.txt,logs/**/*.txt,logs/**/*.tar.gz,logs/**/*.tgz,RobotLogs/**/*,plots/*,etcd-metrics/*'
-    }
-  }
-}
-
-def deploy_voltha_stacks(numberOfStacks) {
-  for (int i = 1; i <= numberOfStacks.toInteger(); i++) {
-    timeout(time: 5, unit: 'MINUTES') {
-      stage("Deploy VOLTHA stack " + i) {
-
-        def localCharts = false
-        if (volthaHelmChartsChange != "" || release != "master") {
-          localCharts = true
-        }
-
-        def volthaHelmFlags =
-                "--set global.log_level=${logLevel} " +
-                "--set enablePerf=true,onu=${onus},pon=${pons} " +
-                "--set securityContext.enabled=false " +
-                params.extraHelmFlags
-
-        volthaStackDeploy([
-          bbsimReplica: olts.toInteger(),
-          infraNamespace: "infra",
-          volthaNamespace: "voltha${i}",
-          stackName: "voltha${i}",
-          stackId: i,
-          workflow: workflow,
-          extraHelmFlags: volthaHelmFlags,
-          localCharts: localCharts,
-          onosReplica: onosReplicas,
-        ])
-      }
-    }
-  }
-}
-
-def test_voltha_stacks(numberOfStacks) {
-  for (int i = 1; i <= numberOfStacks.toInteger(); i++) {
-    stage("Test VOLTHA stack " + i) {
-      timeout(time: 15, unit: 'MINUTES') {
-        sh """
-
-        # we are restarting the voltha-api port-forward for each stack, no need to have a different voltconfig file
-        voltctl -s 127.0.0.1:55555 config > $HOME/.volt/config
-        export VOLTCONFIG=$HOME/.volt/config
-
-        # _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555& > /dev/null 2>&1
-        _TAG="voltha-port-forward" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555 > /dev/null 2>&1; done"&
-
-        # wait a bit to make sure the port-forwarding has started
-        sleep 5
-
-
-          ROBOT_PARAMS="-v stackId:${i} \
-            -v olt:${olts} \
-            -v pon:${pons} \
-            -v onu:${onus} \
-            -v workflow:${workflow} \
-            -v withEapol:${withEapol} \
-            -v withDhcp:${withDhcp} \
-            -v withIgmp:${withIgmp} \
-            --noncritical non-critical \
-            -e igmp \
-            -e onu-upgrade \
-            -e teardown "
-
-          if [ ${withEapol} = false ] ; then
-            ROBOT_PARAMS+="-e authentication "
-          fi
-
-          if [ ${withDhcp} = false ] ; then
-            ROBOT_PARAMS+="-e dhcp "
-          fi
-
-          if [ ${provisionSubscribers} = false ] ; then
-            # if we're not considering subscribers then we don't care about authentication and dhcp
-            ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
-          fi
-
-          if [ ${withFlows} = false ] ; then
-            ROBOT_PARAMS+="-i setup -i activation "
-          fi
-
-          cd $WORKSPACE/voltha-system-tests
-          source ./vst_venv/bin/activate
-          robot -d $WORKSPACE/RobotLogs/voltha${i} \
-          \$ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
-
-          # collect results
-          python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/voltha${i}/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time-voltha${i}.txt || true
-          cat $WORKSPACE/execution-time-voltha${i}.txt
-        """
-        sh """
-          # remove VOLTHA port-forward
-          ps aux | grep port-forw | grep voltha-api | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 2>&1 > /dev/null || true
-        """
-      }
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.11/voltha-scale-test.groovy b/jjb/pipeline/voltha/voltha-2.11/voltha-scale-test.groovy
deleted file mode 100644
index 88d6070..0000000
--- a/jjb/pipeline/voltha/voltha-2.11/voltha-scale-test.groovy
+++ /dev/null
@@ -1,933 +0,0 @@
-// Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// deploy VOLTHA and performs a scale test
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-// this function generates the correct parameters for ofAgent
-// to connect to multiple ONOS instances
-def ofAgentConnections(numOfOnos, releaseName, namespace) {
-    def params = " "
-    numOfOnos.times {
-        params += "--set voltha.services.controller[${it}].address=${releaseName}-onos-classic-${it}.${releaseName}-onos-classic-hs.${namespace}.svc:6653 "
-    }
-    return params
-}
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-      timeout(time: 60, unit: 'MINUTES')
-  }
-  environment {
-    JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
-    KUBECONFIG="$HOME/.kube/config"
-    VOLTCONFIG="$HOME/.volt/config"
-    SSHPASS="karaf"
-    VOLTHA_LOG_LEVEL="${logLevel}"
-    NUM_OF_BBSIM="${olts}"
-    NUM_OF_OPENONU="${openonuAdapterReplicas}"
-    NUM_OF_ONOS="${onosReplicas}"
-    NUM_OF_ATOMIX="${atomixReplicas}"
-    EXTRA_HELM_FLAGS=" "
-    LOG_FOLDER="$WORKSPACE/logs"
-    GERRIT_PROJECT="${GERRIT_PROJECT}"
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-  }
-
-  stages {
-    stage ('Cleanup') {
-      steps {
-        script {
-          try {
-            timeout(time: 5, unit: 'MINUTES') {
-              sh returnStdout: false, script: '''
-              cd $WORKSPACE
-              rm -rf $WORKSPACE/*
-              '''
-              // removing the voltha-infra chart first
-              // if we don't ONOS might get stuck because of all the events when BBSim goes down
-              sh returnStdout: false, script: '''
-              set +x
-              helm del voltha-infra || true
-              echo -ne "\nWaiting for ONOS to be removed..."
-              onos=$(kubectl get pod -n default -l app=onos-classic --no-headers | wc -l)
-              while [[ $onos != 0 ]]; do
-                onos=$(kubectl get pod -n default -l app=onos-classic --no-headers | wc -l)
-                sleep 5
-                echo -ne "."
-              done
-              '''
-            }
-          } catch(org.jenkinsci.plugins.workflow.steps.FlowInterruptedException e) {
-            // if we have a timeout in the Cleanup fase most likely ONOS got stuck somewhere, thuse force remove the pods
-            sh '''
-              kubectl get pods | grep Terminating | awk '{print $1}' | xargs kubectl delete pod --force --grace-period=0
-            '''
-          }
-          timeout(time: 10, unit: 'MINUTES') {
-            script {
-              helmTeardown(["default", "voltha1", "infra"])
-            }
-            sh returnStdout: false, script: '''
-              helm repo add onf https://charts.opencord.org
-              helm repo update
-
-              # remove all persistent volume claims
-              kubectl delete pvc --all-namespaces --all
-              PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-              while [[ \$PVCS != 0 ]]; do
-                sleep 5
-                PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-              done
-
-              # remove orphaned port-forward from different namespaces
-              ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-            '''
-          }
-        }
-      }
-    }
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${release}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage('Build patch') {
-      when {
-        expression {
-          return params.GERRIT_PROJECT
-        }
-      }
-      steps {
-        sh """
-        git clone https://\$GERRIT_HOST/\$GERRIT_PROJECT
-        cd \$GERRIT_PROJECT
-        git fetch https://\$GERRIT_HOST/\$GERRIT_PROJECT \$GERRIT_REFSPEC && git checkout FETCH_HEAD
-
-        DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-build
-        DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-push
-        """
-      }
-    }
-    stage('Deploy common infrastructure') {
-      steps {
-        sh '''
-        if [ ${withMonitoring} = true ] ; then
-          helm install nem-monitoring onf/nem-monitoring \
-          -f $HOME/voltha-scale/grafana.yaml \
-          --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
-          --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
-        fi
-        '''
-      }
-    }
-    stage('Deploy Voltha') {
-      steps {
-        timeout(time: 10, unit: 'MINUTES') {
-          installVoltctl("${release}")
-          script {
-            startComponentsLogs([
-              appsToLog: [
-                'app.kubernetes.io/name=etcd',
-                'app.kubernetes.io/name=kafka',
-                'app=voltha-infra-atomix',
-                'app=onos-classic',
-                'app=adapter-open-onu',
-                'app=adapter-open-olt',
-                'app=rw-core',
-                'app=ofagent',
-                'app=bbsim',
-                'app=radius',
-                'app=bbsim-sadis-server',
-                'app=onos-config-loader',
-              ]
-            ])
-            def returned_flags = sh (returnStdout: true, script: """
-
-              export EXTRA_HELM_FLAGS+=' '
-
-              # BBSim custom image handling
-              if [ '${bbsimImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'bbsim' ]; then
-                IFS=: read -r bbsimRepo bbsimTag <<< '${bbsimImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set images.bbsim.repository=\$bbsimRepo,images.bbsim.tag=\$bbsimTag "
-              fi
-
-              # VOLTHA custom image handling
-              if [ '${rwCoreImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-go' ]; then
-                IFS=: read -r rwCoreRepo rwCoreTag <<< '${rwCoreImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set voltha.images.rw_core.repository=\$rwCoreRepo,voltha.images.rw_core.tag=\$rwCoreTag "
-              fi
-
-              # ofAgent custom image handling
-              if [ '${ofAgentImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'of-agent' ]; then
-                IFS=: read -r ofAgentRepo ofAgentTag <<< '${ofAgentImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set voltha.images.ofagent.repository=\$ofAgentRepo,voltha.images.ofagent.tag=\$ofAgentTag "
-              fi
-
-              # OpenOLT custom image handling
-              if [ '${openoltAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openolt-adapter' ]; then
-                IFS=: read -r openoltAdapterRepo openoltAdapterTag <<< '${openoltAdapterImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set voltha-adapter-openolt.images.adapter_open_olt.repository=\$openoltAdapterRepo,voltha-adapter-openolt.images.adapter_open_olt.tag=\$openoltAdapterTag "
-              fi
-
-              # OpenONU custom image handling
-              if [ '${openonuAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter' ]; then
-                IFS=: read -r openonuAdapterRepo openonuAdapterTag <<< '${openonuAdapterImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set voltha-adapter-openonu.images.adapter_open_onu.repository=\$openonuAdapterRepo,voltha-adapter-openonu.images.adapter_open_onu.tag=\$openonuAdapterTag "
-              fi
-
-              # OpenONU GO custom image handling
-              if [ '${openonuAdapterGoImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter-go' ]; then
-                IFS=: read -r openonuAdapterGoRepo openonuAdapterGoTag <<< '${openonuAdapterGoImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set voltha-adapter-openonu.images.adapter_open_onu_go.repository=\$openonuAdapterGoRepo,voltha-adapter-openonu.images.adapter_open_onu_go.tag=\$openonuAdapterGoTag "
-              fi
-
-              # ONOS custom image handling
-              if [ '${onosImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-onos' ]; then
-                IFS=: read -r onosRepo onosTag <<< '${onosImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set onos-classic.image.repository=\$onosRepo,onos-classic.image.tag=\$onosTag "
-              fi
-
-              # set BBSim parameters
-              EXTRA_HELM_FLAGS+='--set enablePerf=true,pon=${pons},onu=${onus},uni=${unis} '
-
-              # disable the securityContext, this is a development cluster
-              EXTRA_HELM_FLAGS+='--set securityContext.enabled=false '
-
-              # No persistent-volume-claims in Atomix
-              EXTRA_HELM_FLAGS+="--set onos-classic.atomix.persistence.enabled=false "
-
-              # Use custom built images
-
-              if [ '\$GERRIT_PROJECT' == 'voltha-go' ]; then
-                EXTRA_HELM_FLAGS+="--set voltha.images.rw_core.repository=${dockerRegistry}/voltha/voltha-rw-core,voltha.images.rw_core.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'voltha-openolt-adapter' ]; then
-                EXTRA_HELM_FLAGS+="--set voltha-openolt-adapter.images.adapter_open_olt.repository=${dockerRegistry}/voltha/voltha-openolt-adapter,voltha-openolt-adapter.images.adapter_open_olt.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter' ]; then
-                EXTRA_HELM_FLAGS+="--set voltha-openonu-adapter.images.adapter_open_onu.repository=${dockerRegistry}/voltha/voltha-openonu-adapter,voltha-openonu-adapter.images.adapter_open_onu.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter-go' ]; then
-                EXTRA_HELM_FLAGS+="--set voltha-openonu-adapter-go.images.adapter_open_onu_go.repository=${dockerRegistry}/voltha/voltha-openonu-adapter-go,voltha-openonu-adapter-go.images.adapter_open_onu_go.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'ofagent-go' ]; then
-                EXTRA_HELM_FLAGS+="--set voltha.images.ofagent.repository=${dockerRegistry}/voltha/voltha-ofagent-go,ofagent-go.images.ofagent.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'voltha-onos' ]; then
-                EXTRA_HELM_FLAGS+="--set onos-classic.image.repository=${dockerRegistry}/voltha/voltha-onos,onos-classic.image.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'bbsim' ]; then
-                EXTRA_HELM_FLAGS+="--set images.bbsim.repository=${dockerRegistry}/voltha/bbsim,images.bbsim.tag=voltha-scale "
-              fi
-              echo \$EXTRA_HELM_FLAGS
-
-            """).trim()
-
-            def extraHelmFlags = returned_flags
-            // The added space before params.extraHelmFlags is required due to the .trim() above
-            def infraHelmFlags =
-              "--set global.log_level=${logLevel} " +
-              "--set radius.enabled=${withEapol} " +
-              "--set onos-classic.onosSshPort=30115 " +
-              "--set onos-classic.onosApiPort=30120 " +
-              extraHelmFlags + " " + params.extraHelmFlags
-
-            println "Passing the following parameters to the VOLTHA infra deploy: ${infraHelmFlags}."
-
-            def localCharts = false
-            if (volthaHelmChartsChange != "") {
-              localCharts = true
-            }
-
-            volthaInfraDeploy([
-              workflow: workflow,
-              infraNamespace: "default",
-              extraHelmFlags: infraHelmFlags,
-              localCharts: localCharts,
-              onosReplica: onosReplicas,
-              atomixReplica: atomixReplicas,
-              kafkaReplica: kafkaReplicas,
-              etcdReplica: etcdReplicas,
-            ])
-
-            stackHelmFlags = " --set onu=${onus},pon=${pons},uni=${unis} --set global.log_level=${logLevel.toLowerCase()} "
-            stackHelmFlags += " --set voltha.ingress.enabled=true --set voltha.ingress.enableVirtualHosts=true --set voltha.fullHostnameOverride=voltha.scale1.dev "
-            stackHelmFlags += extraHelmFlags + " " + params.extraHelmFlags
-
-            volthaStackDeploy([
-              bbsimReplica: olts.toInteger(),
-              infraNamespace: "default",
-              volthaNamespace: "default",
-              stackName: "voltha1", // TODO support custom charts
-              workflow: workflow,
-              extraHelmFlags: stackHelmFlags,
-              localCharts: localCharts,
-              onosReplica: onosReplicas,
-            ])
-            sh """
-              set +x
-
-              echo -ne "\nWaiting for VOLTHA and ONOS to start..."
-              voltha=\$(kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
-              onos=\$(kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l)
-              while [[ \$voltha != 0 || \$onos != 0 ]]; do
-                sleep 5
-                echo -ne "."
-                voltha=\$(kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
-                onos=\$(kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l)
-              done
-              echo -ne "\nVOLTHA and ONOS pods ready\n"
-              kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l
-              kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l
-            """
-            start_port_forward(olts)
-          }
-        }
-      }
-    }
-    stage('Configuration') {
-      steps {
-        script {
-          setOnosLogLevels([
-              onosNamespace: "default",
-              apps: [
-                'org.opencord.dhcpl2relay',
-                'org.opencord.olt',
-                'org.opencord.aaa',
-                'org.opencord.maclearner',
-                'org.onosproject.net.flowobjective.impl.FlowObjectiveManager',
-                'org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager'
-              ],
-              logLevel: logLevel
-          ])
-          def tech_prof_directory = "XGS-PON"
-          sh returnStdout: false, script: """
-          #Setting link discovery
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
-
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
-
-
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 900
-
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
-
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.opencord.olt.impl.Olt provisionDelay 1000
-
-          # BBSim logs at debug level don't slow down the system much and are very helpful while troubleshooting
-          BBSIM_IDS=\$(kubectl get pods | grep bbsim | grep -v server | awk '{print \$1}')
-          IDS=(\$BBSIM_IDS)
-
-          for bbsim in "\${IDS[@]}"
-          do
-            kubectl exec -t \$bbsim -- bbsimctl log debug false
-          done
-
-          # Set Flows/Ports/Meters/Groups poll frequency
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.group.impl.OpenFlowGroupProvider groupPollInterval ${onosGroupInterval}
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.FlowObjectiveManager numThreads ${flowObjWorkerThreads}
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager objectiveTimeoutMs 300000
-
-          if [ ${withFlows} = false ]; then
-            sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 app deactivate org.opencord.olt
-          fi
-
-          if [ '${workflow}' = 'tt' ]; then
-            etcd_container=\$(kubectl get pods --all-namespaces | grep etcd-0 | awk 'NR==1{print \$2}')
-            kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-HSIA.json \$etcd_container:/tmp/hsia.json
-            put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64')
-            kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-VoIP.json \$etcd_container:/tmp/voip.json
-            put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65')
-            kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST-AdditionalBW-NA.json \$etcd_container:/tmp/mcast.json
-            put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66')
-          fi
-
-          if [ ${withPcap} = true ] ; then
-            # Start the tcp-dump in ofagent
-            export OF_AGENT=\$(kubectl get pods -l app=ofagent -o name)
-            kubectl exec \$OF_AGENT -- apk update
-            kubectl exec \$OF_AGENT -- apk add tcpdump
-            _TAG=ofagent-tcpdump kubectl exec \$OF_AGENT -- tcpdump -nei eth0 -w out.pcap&
-
-            # Start the tcp-dump in radius
-            export RADIUS=\$(kubectl get pods -l app=radius -o name)
-            kubectl exec \$RADIUS -- apt-get update
-            kubectl exec \$RADIUS -- apt-get install -y tcpdump
-            _TAG=radius-tcpdump kubectl exec \$RADIUS -- tcpdump -w out.pcap&
-
-            # Start the tcp-dump in ONOS
-            for i in \$(seq 0 \$ONOSES); do
-              INSTANCE="onos-onos-classic-\$i"
-              kubectl exec \$INSTANCE -- apt-get update
-              kubectl exec \$INSTANCE -- apt-get install -y tcpdump
-              kubectl exec \$INSTANCE -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
-              _TAG=\$INSTANCE kubectl exec \$INSTANCE -- /usr/bin/tcpdump -nei eth0 port 1812 -w out.pcap&
-            done
-          fi
-          """
-        }
-      }
-    }
-    stage('Load MIB Template') {
-      when {
-        expression {
-          return params.withMibTemplate
-        }
-      }
-      steps {
-        sh """
-        # load MIB template
-        wget ${mibTemplateUrl} -O mibTemplate.json
-        cat mibTemplate.json | kubectl exec -it \$(kubectl get pods |grep etcd-0 | awk 'NR==1{print \$1}') -- etcdctl put service/voltha/omci_mibs/go_templates/BBSM/v0.0.1/BBSM_IMG_00001
-        """
-      }
-    }
-    stage('Run Test') {
-      steps {
-        sh '''
-          mkdir -p $WORKSPACE/RobotLogs
-          cd $WORKSPACE/voltha-system-tests
-          make vst_venv
-        '''
-        sh '''
-          if [ ${withProfiling} = true ] ; then
-            mkdir -p $LOG_FOLDER/pprof
-            echo $PATH
-            #Creating Python script for ONU Detection
-            cat << EOF > $WORKSPACE/pprof.sh
-timestamp() {
-  date +"%T"
-}
-
-i=0
-while [[ true ]]; do
-  ((i++))
-  ts=$(timestamp)
-  go tool pprof -png http://127.0.0.1:6060/debug/pprof/heap > $LOG_FOLDER/pprof/rw-core-heap-\\$i-\\$ts.png
-  go tool pprof -png http://127.0.0.1:6060/debug/pprof/goroutine > $LOG_FOLDER/pprof/rw-core-goroutine-\\$i-\\$ts.png
-  curl -o $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof http://127.0.0.1:6060/debug/pprof/profile?seconds=10
-  go tool pprof -png $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.png
-
-  go tool pprof -png http://127.0.0.1:6061/debug/pprof/heap > $LOG_FOLDER/pprof/openolt-heap-\\$i-\\$ts.png
-  go tool pprof -png http://127.0.0.1:6061/debug/pprof/goroutine > $LOG_FOLDER/pprof/openolt-goroutine-\\$i-\\$ts.png
-  curl -o $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof http://127.0.0.1:6061/debug/pprof/profile?seconds=10
-  go tool pprof -png $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.png
-
-  go tool pprof -png http://127.0.0.1:6062/debug/pprof/heap > $LOG_FOLDER/pprof/ofagent-heap-\\$i-\\$ts.png
-  go tool pprof -png http://127.0.0.1:6062/debug/pprof/goroutine > $LOG_FOLDER/pprof/ofagent-goroutine-\\$i-\\$ts.png
-  curl -o $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof http://127.0.0.1:6062/debug/pprof/profile?seconds=10
-  go tool pprof -png $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.png
-
-  sleep 10
-done
-EOF
-
-            _TAG="pprof"
-            _TAG=$_TAG bash $WORKSPACE/pprof.sh &
-          fi
-        '''
-        timeout(time: "${testTimeout.toInteger() + 5}", unit: 'MINUTES') {
-          sh '''
-            ROBOT_PARAMS="--exitonfailure \
-              -v olt:${olts} \
-              -v pon:${pons} \
-              -v onu:${onus} \
-              -v ONOS_SSH_PORT:30115 \
-              -v ONOS_REST_PORT:30120 \
-              -v workflow:${workflow} \
-              -v withEapol:${withEapol} \
-              -v withDhcp:${withDhcp} \
-              -v withIgmp:${withIgmp} \
-              -v timeout:${testTimeout}m \
-              -v withMaclearning:${withMaclearning} \
-              --noncritical non-critical \
-              -e onu-upgrade -e igmp -e teardown "
-
-            if [ ${withEapol} = false ] ; then
-              ROBOT_PARAMS+="-e authentication "
-            fi
-
-            if [ ${withDhcp} = false ] ; then
-              ROBOT_PARAMS+="-e dhcp "
-            fi
-
-            if [ ${provisionSubscribers} = false ] ; then
-              # if we're not considering subscribers then we don't care about authentication and dhcp
-              ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
-            fi
-
-            if [ ${withFlows} = false ] ; then
-              ROBOT_PARAMS+="-i setup -i activation "
-            fi
-
-            if [ ${withOnuUpgrade} = true ] ; then
-              ROBOT_PARAMS+="-e flow-before "
-            fi
-
-            cd $WORKSPACE/voltha-system-tests
-            source ./vst_venv/bin/activate
-            robot -d $WORKSPACE/RobotLogs \
-            $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
-
-            python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time.txt || true
-            cat $WORKSPACE/execution-time.txt
-          '''
-        }
-      }
-    }
-    stage('Run ONU Upgrade Tests') {
-      environment {
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/OnuUpgradeTests"
-      }
-      when {
-        expression {
-          return params.withOnuUpgrade
-        }
-      }
-      options {
-          timeout(time: "${testTimeout.toInteger() + 1}", unit: 'MINUTES')
-      }
-      steps {
-        sh '''
-          set +e
-          mkdir -p $ROBOT_LOGS_DIR
-          cd $WORKSPACE/voltha-system-tests
-          make vst_venv
-        '''
-        script {
-          Exception caughtException = null
-
-          catchError(buildResult: 'SUCCESS', stageResult: 'ABORTED') {
-            try {
-              sh '''
-                ROBOT_PARAMS="--exitonfailure \
-                  -v olt:${olts} \
-                  -v pon:${pons} \
-                  -v onu:${onus} \
-                  -v workflow:${workflow} \
-                  -v withEapol:${withEapol} \
-                  -v withDhcp:${withDhcp} \
-                  -v withIgmp:${withIgmp} \
-                  -v timeout:${testTimeout}m \
-                  -v withMaclearning:${withMaclearning}
-                  -v image_version:BBSM_IMG_00002 \
-                  -v image_url:http://bbsim0:50074/images/software-image.img \
-                  -v image_vendor:BBSM \
-                  -v image_activate_on_success:false \
-                  -v image_commit_on_success:false \
-                  -v image_crc:0 \
-                  -v ONOS_SSH_PORT:30115 \
-                  -v ONOS_REST_PORT:30120 \
-                  --noncritical non-critical \
-                  -i onu-upgrade \
-                  -e setup -e activation -e flow-before \
-                  -e authentication -e provision -e flow-after \
-                  -e dhcp -e igmp -e teardown "
-                cd $WORKSPACE/voltha-system-tests
-                source ./vst_venv/bin/activate
-                robot -d $ROBOT_LOGS_DIR \
-                $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
-              '''
-            } catch (org.jenkinsci.plugins.workflow.steps.FlowInterruptedException e) {
-              // if the error is a timeout don't mark the build as failed
-              println "ONU Upgrade test timed out"
-            } catch (Throwable e) {
-              caughtException = e
-            }
-          }
-
-          if (caughtException) {
-            error caughtException.message
-          }
-        }
-      }
-    }
-    stage('Run Igmp Tests') {
-      environment {
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/IgmpTests"
-      }
-      when {
-        expression {
-          return params.withIgmp
-        }
-      }
-      options {
-          timeout(time: "${testTimeout.toInteger() + 1}", unit: 'MINUTES')
-      }
-      steps {
-        sh returnStdout: false, script: """
-          # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.onosproject.store.group.impl
-        """
-        sh '''
-          set +e
-          mkdir -p $ROBOT_LOGS_DIR
-          cd $WORKSPACE/voltha-system-tests
-          make vst_venv
-        '''
-        script {
-          Exception caughtException = null
-
-          catchError(buildResult: 'SUCCESS', stageResult: 'ABORTED') {
-            try {
-              sh '''
-                ROBOT_PARAMS="--exitonfailure \
-                  -v olt:${olts} \
-                  -v pon:${pons} \
-                  -v onu:${onus} \
-                  -v workflow:${workflow} \
-                  -v withEapol:${withEapol} \
-                  -v withDhcp:${withDhcp} \
-                  -v withIgmp:${withIgmp} \
-                  -v timeout:${testTimeout}m \
-                  -v withMaclearning:${withMaclearning}
-                  -v ONOS_SSH_PORT:30115 \
-                  -v ONOS_REST_PORT:30120 \
-                  --noncritical non-critical \
-                  -i igmp \
-                  -e setup -e activation -e flow-before \
-                  -e authentication -e provision -e flow-after \
-                  -e dhcp -e onu-upgrade -e teardown "
-                cd $WORKSPACE/voltha-system-tests
-                source ./vst_venv/bin/activate
-                robot -d $ROBOT_LOGS_DIR \
-                $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
-              '''
-            } catch (org.jenkinsci.plugins.workflow.steps.FlowInterruptedException e) {
-              // if the error is a timeout don't mark the build as failed
-              println "IGMP test timed out"
-            } catch (Throwable e) {
-              caughtException = e
-            }
-          }
-
-          if (caughtException) {
-            error caughtException.message
-          }
-        }
-      }
-    }
-    stage("Device removal") {
-      options {
-          timeout(time: "${testTimeout.toInteger() + 5}", unit: 'MINUTES')
-      }
-      steps {
-        sh '''
-          set +e
-          mkdir -p $ROBOT_LOGS_DIR
-          cd $WORKSPACE/voltha-system-tests
-          make vst_venv
-        '''
-        script {
-          Exception caughtException = null
-
-          catchError(buildResult: 'SUCCESS', stageResult: 'ABORTED') {
-            try {
-              sh '''
-                ROBOT_PARAMS="--exitonfailure \
-                  -v olt:${olts} \
-                  -v pon:${pons} \
-                  -v onu:${onus} \
-                  -v ONOS_SSH_PORT:30115 \
-                  -v ONOS_REST_PORT:30120 \
-                  -v workflow:${workflow} \
-                  -v withEapol:${withEapol} \
-                  -v withDhcp:${withDhcp} \
-                  -v withIgmp:${withIgmp} \
-                  -v timeout:${testTimeout}m \
-                  -v withMaclearning:${withMaclearning} \
-                  --noncritical non-critical \
-                  -i teardown"
-
-                  cd $WORKSPACE/voltha-system-tests
-                  source ./vst_venv/bin/activate
-                  robot -d $WORKSPACE/RobotLogs \
-                  $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
-                '''
-            } catch (org.jenkinsci.plugins.workflow.steps.FlowInterruptedException e) {
-              // if the error is a timeout don't mark the build as failed
-              println "Cleanup test timed out"
-            } catch (Throwable e) {
-              caughtException = e
-            }
-          }
-        }
-      }
-    }
-  }
-  post {
-    always {
-      stopComponentsLogs()
-      // collect result, done in the "post" step so it's executed even in the
-      // event of a timeout in the tests
-      sh '''
-        if [ ${withPcap} = true ] ; then
-          # stop ofAgent tcpdump
-          P_ID="\$(ps e -ww -A | grep "_TAG=ofagent-tcpdump" | grep -v grep | awk '{print \$1}')"
-          if [ -n "\$P_ID" ]; then
-            kill -9 \$P_ID
-          fi
-
-          # stop radius tcpdump
-          P_ID="\$(ps e -ww -A | grep "_TAG=radius-tcpdump" | grep -v grep | awk '{print \$1}')"
-          if [ -n "\$P_ID" ]; then
-            kill -9 \$P_ID
-          fi
-
-          # stop onos tcpdump
-          LIMIT=$(($NUM_OF_ONOS - 1))
-          for i in $(seq 0 $LIMIT); do
-            INSTANCE="onos-onos-classic-$i"
-            P_ID="\$(ps e -ww -A | grep "_TAG=$INSTANCE" | grep -v grep | awk '{print \$1}')"
-            if [ -n "\$P_ID" ]; then
-              kill -9 \$P_ID
-            fi
-          done
-
-          # copy the file
-          export OF_AGENT=$(kubectl get pods -l app=ofagent | awk 'NR==2{print $1}') || true
-          kubectl cp $OF_AGENT:out.pcap $LOG_FOLDER/ofagent.pcap || true
-
-          export RADIUS=$(kubectl get pods -l app=radius | awk 'NR==2{print $1}') || true
-          kubectl cp $RADIUS:out.pcap $LOG_FOLDER/radius.pcap || true
-
-          LIMIT=$(($NUM_OF_ONOS - 1))
-          for i in $(seq 0 $LIMIT); do
-            INSTANCE="onos-onos-classic-$i"
-            kubectl cp $INSTANCE:out.pcap $LOG_FOLDER/$INSTANCE.pcap || true
-          done
-        fi
-      '''
-      sh '''
-        if [ ${withProfiling} = true ] ; then
-          _TAG="pprof"
-          P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
-          if [ -n "$P_IDS" ]; then
-            echo $P_IDS
-            for P_ID in $P_IDS; do
-              kill -9 $P_ID
-            done
-          fi
-        fi
-      '''
-      plot([
-        csvFileName: 'scale-test.csv',
-        csvSeries: [
-          [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-        ],
-        group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus}, UNIs: ${unis})", yaxis: 'Time (s)', useDescr: true
-      ])
-      script {
-        try {
-          step([$class: 'RobotPublisher',
-            disableArchiveOutput: false,
-            logFileName: '**/log*.html',
-            otherFiles: '',
-            outputFileName: '**/output*.xml',
-            outputPath: 'RobotLogs',
-            passThreshold: 100,
-            reportFileName: '**/report*.html',
-            onlyCritical: true,
-            unstableThreshold: 0]);
-        } catch (Exception e) {
-            println "Cannot archive Robot Logs: ${e.toString()}"
-        }
-      }
-
-      getPodsInfo("$LOG_FOLDER")
-      // get all the logs from kubernetes PODs
-      sh returnStdout: false, script: '''
-        # copy the ONOS logs directly from the container to avoid the color codes
-        printf '%s\n' $(kubectl get pods -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
-
-        # get ONOS cfg from the 3 nodes
-        # kubectl exec -t voltha-infra-onos-classic-0 -- sh /root/onos/${karafHome}/bin/client cfg get > ~/voltha-infra-onos-classic-0-cfg.txt || true
-        # kubectl exec -t voltha-infra-onos-classic-1 -- sh /root/onos/${karafHome}/bin/client cfg get > ~/voltha-infra-onos-classic-1-cfg.txt || true
-        # kubectl exec -t voltha-infra-onos-classic-2 -- sh /root/onos/${karafHome}/bin/client cfg get > ~/voltha-infra-onos-classic-2-cfg.txt || true
-
-        # kubectl exec -t voltha-infra-onos-classic-0 -- sh /root/onos/${karafHome}/bin/client obj-next-ids > ~/voltha-infra-onos-classic-0-next-objs.txt || true
-        # kubectl exec -t voltha-infra-onos-classic-1 -- sh /root/onos/${karafHome}/bin/client obj-next-ids > ~/voltha-infra-onos-classic-1-next-objs.txt || true
-        # kubectl exec -t voltha-infra-onos-classic-2 -- sh /root/onos/${karafHome}/bin/client obj-next-ids > ~/voltha-infra-onos-classic-2-next-objs.txt || true
-
-        # get radius logs out of the container
-        kubectl cp $(kubectl get pods -l app=radius --no-headers  | awk '{print $1}'):/var/log/freeradius/radius.log $LOG_FOLDER/radius.log || true
-      '''
-      // dump all the BBSim(s) ONU information
-      sh '''
-      BBSIM_IDS=$(kubectl get pods | grep bbsim | grep -v server | awk '{print $1}')
-      IDS=($BBSIM_IDS)
-
-      for bbsim in "${IDS[@]}"
-      do
-        kubectl exec -t $bbsim -- bbsimctl onu list > $LOG_FOLDER/$bbsim-device-list.txt || true
-        kubectl exec -t $bbsim -- bbsimctl uni list > $LOG_FOLDER/$bbsim-uni-list.txt || true
-        kubectl exec -t $bbsim -- bbsimctl service list > $LOG_FOLDER/$bbsim-service-list.txt || true
-        kubectl exec -t $bbsim -- bbsimctl olt resources GEM_PORT > $LOG_FOLDER/$bbsim-flows-gem-ports.txt || true
-        kubectl exec -t $bbsim -- bbsimctl olt resources ALLOC_ID > $LOG_FOLDER/$bbsim-flows-alloc-ids.txt || true
-        kubectl exec -t $bbsim -- bbsimctl olt pons > $LOG_FOLDER/$bbsim-pon-resources.txt || true
-      done
-      '''
-      script {
-        // first make sure the port-forward is still running,
-        // sometimes Jenkins kills it regardless of the JENKINS_NODE_COOKIE=dontKillMe
-        def running = sh (
-            script: 'ps aux | grep port-forw | grep -E "onos|voltha" | grep -v grep | wc -l',
-            returnStdout: true
-        ).trim()
-        // if any of the voltha-api, onos-rest, onos-ssh port-forwards are not there
-        // kill all and restart
-        if (running != "3") {
-          start_port_forward(olts)
-        }
-      }
-      // get ONOS debug infos
-      sh '''
-
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg get > $LOG_FOLDER/onos-cfg.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 netcfg > $LOG_FOLDER/onos-netcfg.txt
-
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt
-
-        if [ ${withFlows} = true ] ; then
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-port-status > $LOG_FOLDER/onos-volt-port-status.txt
-        fi
-
-        if [ ${provisionSubscribers} = true ]; then
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt
-        fi
-
-        if [ ${withEapol} = true ] ; then
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt
-        fi
-
-        if [ ${withDhcp} = true ] ; then
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt
-        fi
-
-        if [ ${withIgmp} = true ] ; then
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 mcast-host-routes > $LOG_FOLDER/onos-mcast-host-routes.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 mcast-host-show > $LOG_FOLDER/onos-mcast-host-show.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 groups > $LOG_FOLDER/onos-groups.txt
-        fi
-
-        if [ ${withMaclearning} = true ] ; then
-           sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 mac-learner-get-mapping > $LOG_FOLDER/onos-maclearning-host-mappings.txt
-        fi
-      '''
-      // collect etcd metrics
-      sh '''
-        mkdir -p $WORKSPACE/etcd-metrics
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-backend-write-time.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-backend-write-time-sum.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_bucket' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-backend-write-time-bucket.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_wal_fsync_duration_seconds_bucket' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-wal-fsync-time-bucket.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_network_peer_round_trip_time_seconds_bucket' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-network-peer-round-trip-time-seconds.json || true
-        etcd_namespace=\$(kubectl get pods --all-namespaces | grep etcd-0 | awk 'NR==1{print \$1}')
-        etcd_container=\$(kubectl get pods --all-namespaces | grep etcd-0 | awk 'NR==1{print \$2}')
-        kubectl exec -it -n  \$etcd_namespace \$etcd_container -- etcdctl defrag --cluster || true
-        kubectl exec -it -n  \$etcd_namespace \$etcd_container -- etcdctl endpoint status -w table > $WORKSPACE/etcd-metrics/etcd-status-table.txt || true
-
-      '''
-      // get VOLTHA debug infos
-      script {
-        try {
-          sh '''
-          voltctl -m 32MB device list -o json > $LOG_FOLDER/device-list.json || true
-          python -m json.tool $LOG_FOLDER/device-list.json > $LOG_FOLDER/voltha-devices-list.json || true
-          rm $LOG_FOLDER/device-list.json || true
-          voltctl -m 32MB device list > $LOG_FOLDER/voltha-devices-list.txt || true
-
-          printf '%s\n' $(voltctl -m 32MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB device flows # > $LOG_FOLDER/voltha-device-flows-#.txt" || true
-              printf '%s\n' $(voltctl -m 32MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/voltha-device-ports-#.txt" || true
-
-          printf '%s\n' $(voltctl -m 32MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB logicaldevice flows # > $LOG_FOLDER/voltha-logicaldevice-flows-#.txt" || true
-          printf '%s\n' $(voltctl -m 32MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB logicaldevice port list # > $LOG_FOLDER/voltha-logicaldevice-ports-#.txt" || true
-          '''
-        } catch(e) {
-          sh '''
-          echo "Can't get device list from voltclt"
-          '''
-        }
-      }
-      // get cpu usage by container
-      sh '''
-      if [ ${withMonitoring} = true ] ; then
-        cd $WORKSPACE/voltha-system-tests
-        make vst_venv
-        source ./vst_venv/bin/activate || true
-        sleep 60 # we have to wait for prometheus to collect all the information
-        python scripts/sizing.py -o $WORKSPACE/plots || true
-      fi
-      '''
-      archiveArtifacts artifacts: 'execution-time.txt,logs/*,logs/pprof/*,RobotLogs/**/*,plots/*,etcd-metrics/*'
-    }
-  }
-}
-
-def start_port_forward(olts) {
-  sh """
-  bbsimRestPortFwd=50071
-  for i in {0..${olts.toInteger() - 1}}; do
-    daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward --address 0.0.0.0 -n default svc/bbsim\${i} \${bbsimRestPortFwd}:50071
-    ((bbsimRestPortFwd++))
-  done
-  """
-}
diff --git a/jjb/pipeline/voltha/voltha-2.11/voltha-tt-physical-functional-tests.groovy b/jjb/pipeline/voltha/voltha-2.11/voltha-tt-physical-functional-tests.groovy
deleted file mode 100644
index 1eacba9..0000000
--- a/jjb/pipeline/voltha/voltha-2.11/voltha-tt-physical-functional-tests.groovy
+++ /dev/null
@@ -1,265 +0,0 @@
-// -*- groovy -*-
-// -----------------------------------------------------------------------
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// -----------------------------------------------------------------------
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-node {
-  // Need this so that deployment_config has global scope when it's read later
-  deployment_config = null
-}
-
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: "${timeout}", unit: 'MINUTES')
-  }
-
-  environment {
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-    VOLTCONFIG="$HOME/.volt/config-minimal"
-    PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-  }
-
-  stages {
-    stage('Clone voltha-system-tests') {
-      steps {
-        step([$class: 'WsCleanup'])
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/voltha-system-tests",
-            refspec: "${volthaSystemTestsChange}"
-          ]],
-          branches: [[ name: "${branch}", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-        script {
-          sh(script:"""
-            if [ '${volthaSystemTestsChange}' != '' ] ; then
-              cd $WORKSPACE/voltha-system-tests;
-              git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
-            fi
-            """)
-        }
-      }
-    }
-    // This checkout allows us to show changes in Jenkins
-    // we only do this on master as we don't branch all the repos for all the releases
-    // (we should compute the difference by tracking the container version, not the code)
-    stage('Download All the VOLTHA repos') {
-      when {
-        expression {
-          return "${branch}" == 'master';
-        }
-      }
-      steps {
-       checkout(changelog: true,
-         poll: false,
-         scm: [$class: 'RepoScm',
-           manifestRepositoryUrl: "${params.manifestUrl}",
-           manifestBranch: "${params.branch}",
-           currentBranch: true,
-           destinationDir: 'voltha',
-           forceSync: true,
-           resetFirst: true,
-           quiet: true,
-           jobs: 4,
-           showAllChanges: true]
-         )
-      }
-    }
-    stage ('Initialize')
-    {
-        steps
-	{
-	    sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-	    script
-	    {
-		deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-	    }
-		
-        installVoltctl("${branch}")
-
-        sh returnStdout: false, script: """
-        mkdir -p "$WORKSPACE/bin"
-
-        # install kail
-        make -C "$WORKSPACE/voltha-system-tests" KAIL_PATH="$WORKSPACE/bin" kail
-
-        # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
-        # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
-        # We should change this. In the meantime here is a workaround.
-        if [ "${params.branch}" == "master" ]; then
-           set +e
-
-
-        # Remove noise from voltha-core logs
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        # Remove noise from openolt logs
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        fi
-        """
-      }
-    }
-
-    stage('Functional Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-        ROBOT_FILE="Voltha_TT_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/FunctionalTests"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if ( ${powerSwitch} ); then
-             export ROBOT_MISC_ARGS="--removekeywords wuks -i functionalTT -i PowerSwitch -i sanityTT -i sanityTT-MCAST -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-             if ( ${powerCycleOlt} ); then
-                  ROBOT_MISC_ARGS+=" -v power_cycle_olt:True"
-             fi
-        else
-             export ROBOT_MISC_ARGS="--removekeywords wuks -i functionalTT -e PowerSwitch -i sanityTT -i sanityTT-MCAST -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        fi
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
-        """
-      }
-    }
-
-    stage('Failure/Recovery Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-        ROBOT_FILE="Voltha_TT_FailureScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/FailureScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.enableMultiUni} = false ]; then
-          if ( ${powerSwitch} ); then
-               export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          else
-               export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          fi
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Multi-Tcont Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-        ROBOT_FILE="Voltha_TT_MultiTcontTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/MultiTcontScenarios"
-        ROBOT_TEST_INPUT_FILE="$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-TT-multi-tcont-tests-input.yaml"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.enableMultiUni} = false ]; then
-          if ( ${powerSwitch} ); then
-            export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel} -V $ROBOT_TEST_INPUT_FILE"
-          else
-            export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel} -V $ROBOT_TEST_INPUT_FILE"
-          fi
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Multicast Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-        ROBOT_FILE="Voltha_TT_MulticastTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/MulticastTests"
-        ROBOT_TEST_INPUT_FILE="$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-TT-multicast-tests-input.yaml"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.enableMultiUni} = true ]; then
-          if ( ${powerSwitch} ); then
-             export ROBOT_MISC_ARGS="--removekeywords wuks -i multicastTT -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel} -V $ROBOT_TEST_INPUT_FILE"
-          else
-             export ROBOT_MISC_ARGS="--removekeywords wuks -i multicastTT -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel} -V $ROBOT_TEST_INPUT_FILE"
-          fi
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
-        fi
-        """
-      }
-    }
-
-  }
-  post {
-    always {
-      getPodsInfo("$WORKSPACE/pods")
-      sh returnStdout: false, script: '''
-      set +e
-
-      # collect logs collected in the Robot Framework StartLogging keyword
-      cd $WORKSPACE
-      gzip *-combined.log || true
-      rm *-combined.log || true
-      '''
-      script {
-        deployment_config.olts.each { olt ->
-          if (olt.type == null || olt.type == "" || olt.type == "openolt") {
-             sh returnStdout: false, script: """
-             sshpass -p ${olt.pass} scp ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
-             sshpass -p ${olt.pass} scp ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
-             sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log  # Remove escape sequences
-             sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log  # Remove escape sequences
-             """
-          }
-        }
-      }
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: '**/log*.html',
-        otherFiles: '',
-        outputFileName: '**/output*.xml',
-        outputPath: 'RobotLogs',
-        passThreshold: 100,
-        reportFileName: '**/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true
-        ]);
-      archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.12/bbsim-tests.groovy b/jjb/pipeline/voltha/voltha-2.12/bbsim-tests.groovy
deleted file mode 100644
index 0adf32b..0000000
--- a/jjb/pipeline/voltha/voltha-2.12/bbsim-tests.groovy
+++ /dev/null
@@ -1,716 +0,0 @@
-// Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// voltha-2.x e2e tests for openonu-go
-// uses bbsim to simulate OLT/ONUs
-
-// [TODO] Update syntax below to the latest supported
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-    $class: 'GitSCMSource',
-    remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-//------------------//
-//---]  GLOBAL  [---//
-//------------------//
-String clusterName = 'kind-ci'
-
-// -----------------------------------------------------------------------
-// Intent: Return branch name for the script.  A hardcoded value is used
-//   as a guarantee release jobs are running in an expected sandbox.
-// -----------------------------------------------------------------------
-String branchName() {
-    String br = 'voltha-2.12'
-
-    // "${branch}" is assigned by jenkins
-    if (br != branch) {
-        String err = [
-            'ERROR: Detected invalid branch',
-            '(expected=[$br] != found=[$branch])'
-        ].join(' ')
-        throw new Exception(err) // groovylint-disable-line CatchException
-    }
-
-    return (br)
-}
-
-// -----------------------------------------------------------------------
-// Intent: Difficult at times to determine when pipeline jobs have
-//   regenerated.  Hardcode a version string that can be assigned
-//   per-script to be sure latest repository changes are being used.
-// -----------------------------------------------------------------------
-String pipelineVer() {
-    String version = '2563719757ee52af49cf9da3fe76ed4bb6877588'
-    return(version)
-}
-
-// -----------------------------------------------------------------------
-// Intent: Due to lack of a reliable stack trace, construct a literal.
-//         Jenkins will re-write the call stack for serialization.S
-// -----------------------------------------------------------------------
-// Note: Hardcoded version string used to visualize changes in jenkins UI
-// -----------------------------------------------------------------------
-String getIam(String func) {
-    String branchName = branchName()
-    String version    = pipelineVer()
-    String src = [
-        'ci-management',
-        'jjb',
-        'pipeline',
-        'voltha',
-        branchName,
-        'bbsim-tests.groovy'
-    ].join('/')
-
-    String name = [src, version, func].join('::')
-    return(name)
-}
-
-// -----------------------------------------------------------------------
-// Intent: Log progress message
-// -----------------------------------------------------------------------
-void enter(String name) {
-    // Announce ourselves for log usability
-    String iam = getIam(name)
-    println("${iam}: ENTER")
-    return
-}
-
-// -----------------------------------------------------------------------
-// Intent: Log progress message
-// -----------------------------------------------------------------------
-void leave(String name) {
-    // Announce ourselves for log usability
-    String iam = getIam(name)
-    println("${iam}: LEAVE")
-    return
-}
-
-// -----------------------------------------------------------------------
-// Intent: Determine if working on a release branch.
-//   Note: Conditional is legacy, should also check for *-dev or *-pre
-// -----------------------------------------------------------------------
-Boolean isReleaseBranch(String name) {
-    // List modifiers = ['-dev', '-pre', 'voltha-x.y.z-pre']
-    // if branchName in modifiers
-    return(name != 'master') // OR branchName.contains('-')
-}
-
-// -----------------------------------------------------------------------
-// Intent: Terminate orphaned port-forward from different namespaces
-// -----------------------------------------------------------------------
-void cleanupPortForward() {
-    enter('cleanupPortForward')
-
-    Map pkpfArgs =\
-    [
-        'banner'     : true, // display banner for logging
-        'show_procs' : true, // display procs under consideration
-        'filler'     : true  // fix conditional trailing comma
-    ]
-
-    // 'kubectl.*port-forward'
-    pkill_port_forward('port-forward', pkpfArgs)
-    leave('cleanupPortForward')
-    return
-}
-
-// -----------------------------------------------------------------------
-// Intent: Iterate over a list of test suites and invoke.
-// -----------------------------------------------------------------------
-void execute_test\
-(
-    String  testTarget,                       // functional-single-kind-dt
-    String  workflow,                         // dt
-    String  testLogging,                      // 'True'
-    Boolean teardown,                         // true
-    String  testSpecificHelmFlags=''
-) {
-    String infraNamespace  = 'default'
-    String volthaNamespace = 'voltha'
-    String logsDir = "$WORKSPACE/${testTarget}"
-
-    // -----------------------------------------------------------------------
-    // Intent: Cleanup stale port-forwarding
-    // -----------------------------------------------------------------------
-    stage('Cleanup') {
-        if (teardown) {
-            timeout(15) {
-                script {
-                    helmTeardown(['default', infraNamespace, volthaNamespace])
-                }
-            } // timeout
-
-            timeout(5) {
-                script {
-                    enter('Cleanup')
-                    cleanupPortForward()
-                    leave('Cleanup')
-                } // script
-            } // timeout
-        } // teardown
-    }// stage('Cleanup')
-
-    // -----------------------------------------------------------------------
-    // -----------------------------------------------------------------------
-    stage('Deploy common infrastructure') {
-        script {
-            local dashargs = [
-                'kpi_exporter.enabled=false',
-                'dashboards.xos=false',
-                'dashboards.onos=false',
-                'dashboards.aaa=false',
-                'dashboards.voltha=false',
-            ].join(',')
-
-            local promargs = [
-                'prometheus.alertmanager.enabled=false',
-                'prometheus.pushgateway.enabled=false',
-            ].join(',')
-
-            sh(label  : 'Deploy common infrastructure',
-               script : """
-    helm repo add onf https://charts.opencord.org
-    helm repo update
-
-    echo -e "\nwithMonitoring=[$withMonitoring]"
-    if [ ${withMonitoring} = true ] ; then
-      helm install nem-monitoring onf/nem-monitoring \
-          --set ${promargs} \
-          --set ${dashargs}
-    fi
-    """)
-        } // script
-    } // stage('Deploy Common Infra')
-
-    // -----------------------------------------------------------------------
-    // [TODO] Check onos_log output
-    // -----------------------------------------------------------------------
-    stage('Deploy Voltha') {
-        if (teardown)      {
-            timeout(10)    {
-                script     {
-                    String iam = getIam('Deploy Voltha')
-                    String onosLog = "${logsDir}/onos-voltha-startup-combined.log"
-
-                    sh(label  : 'Launch kail-startup',
-                       script : """
-mkdir -p "$logsDir"
-touch "$onosLog"
-
-_TAG=kail-startup kail -n ${infraNamespace} -n ${volthaNamespace} > "$onosLog" &
-""")
-
-                    // if we're downloading a voltha-helm-charts patch,
-                    // install from a local copy of the charts
-                    Boolean localCharts = false
-
-                    if (volthaHelmChartsChange != ''
-                        || gerritProject == 'voltha-helm-charts'
-                        || isReleaseBranch(branch) // branch != 'master'
-                    ) {
-                        localCharts = true
-                    }
-
-                    String branchName = branchName()
-                    Boolean isRelease = isReleaseBranch(branch)
-                    println([
-                        " ** localCharts=${localCharts}",
-                        "branchName=${branchName}",
-                        "branch=${branch}",
-                        "branch=isReleaseBranch=${isRelease}",
-                    ].join(', '))
-
-                    // -----------------------------------------------------------------------
-                    // Rewrite localHelmFlags using array join, moving code around and
-                    // refactoring into standalone functions
-                    // -----------------------------------------------------------------------
-                    // NOTE temporary workaround expose ONOS node ports
-                    // -----------------------------------------------------------------------
-                    String localHelmFlags = [
-                        extraHelmFlags.trim(),
-                        "--set global.log_level=${logLevel.toUpperCase()}",
-                        '--set onos-classic.onosSshPort=30115',
-                        '--set onos-classic.onosApiPort=30120',
-                        '--set onos-classic.onosOfPort=31653',
-                        '--set onos-classic.individualOpenFlowNodePorts=true',
-                        testSpecificHelmFlags
-                    ].join(' ')
-
-                    println("** ${iam} localHelmFlags = ${localHelmFlags}")
-
-                    if (gerritProject != '') {
-                        localHelmFlags += getVolthaImageFlags(gerritProject)
-                    }
-
-                    enter('volthaDeploy')
-                    volthaDeploy([
-                        infraNamespace: infraNamespace,
-                        volthaNamespace: volthaNamespace,
-                        workflow: workflow.toLowerCase(),
-                        withMacLearning: enableMacLearning.toBoolean(),
-                        extraHelmFlags: localHelmFlags,
-                        localCharts: localCharts,
-                        bbsimReplica: olts.toInteger(),
-                        dockerRegistry: registry,
-                    ])
-                    leave('volthaDeploy')
-                } // script
-
-                script { pgrep_port_forward() }
-
-                sh(label  : 'Terminate kail-startup',
-                   script : """
-if [[ \$(pgrep --count '_TAG=kail-startup') -gt 0 ]]; then
-    pkill --uid \$(uid -u) --echo --list-full --full '_TAG=kail-startup'
-fi
-""")
-
-                sh(label  : 'Lingering kail-startup check',
-                   script : """
-pgrep --uid \$(uid -u) --list-full --full 'kail-startup' || true
-""")
-
-                // -----------------------------------------------------------------------
-                // Bundle onos-voltha / kail logs
-                // -----------------------------------------------------------------------
-                sh(
-                    label  : 'Bundle logs: onos-voltha-startup-combined',
-                    script : """
-cat <<EOM
-
-** -----------------------------------------------------------------------
-** Combine and compress voltha startup log(s)
-** -----------------------------------------------------------------------
-EOM
-
-pushd "${logsDir}" || { echo "ERROR: pushd $logsDir failed"; exit 1; }
-gzip -k onos-voltha-startup-combined.log
-rm onos-voltha-startup-combined.log
-popd               || { echo "ERROR: popd $logsDir failed"; exit 1; }
-        """)
-            } // timeout(10)
-
-            // -----------------------------------------------------------------------
-            // -----------------------------------------------------------------------
-            sh(label  : 'while-true-port-forward',
-               """
-      JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
-      JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-infra-etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd 2379:2379; done"&
-      JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-infra-kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
-      bbsimDmiPortFwd=50075
-      for i in {0..${olts.toInteger() - 1}}; do
-        JENKINS_NODE_COOKIE="dontKillMe" _TAG="bbsim\${i}" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/bbsim\${i} \${bbsimDmiPortFwd}:50075; done"&
-        ((bbsimDmiPortFwd++))
-      done
-      if [ ${withMonitoring} = true ] ; then
-        JENKINS_NODE_COOKIE="dontKillMe" _TAG="nem-monitoring-prometheus-server" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n default svc/nem-monitoring-prometheus-server 31301:80; done"&
-      fi
-#      ps aux | grep port-forward
-""")
-            // ---------------------------------
-            // Sanity check port-forward spawned
-            // ---------------------------------
-            // [TODO] - Wait until forwarding successful else fatal
-            script { pgrep_port_forward() }
-
-            // setting ONOS log level
-            script {
-                enter('setOnosLogLevels')
-                setOnosLogLevels([
-                    onosNamespace: infraNamespace,
-                    apps: [
-                        'org.opencord.dhcpl2relay',
-                        'org.opencord.olt',
-                        'org.opencord.aaa',
-                        'org.opencord.maclearner',
-                        'org.onosproject.net.flowobjective.impl.FlowObjectiveManager',
-                        'org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager'
-                    ],
-                    logLevel: logLevel
-                ])
-                leave('setOnosLogLevels')
-            } // script
-        } // if (teardown)
-    } // stage('Deploy Voltha')
-
-    // -----------------------------------------------------------------------
-    // -----------------------------------------------------------------------
-    stage("Run test ${testTarget} on workflow ${workflow}") {
-        sh(
-            label : 'Monitor using mem_consumption.py',
-            script : """
-echo -e "\n** Monitor using mem_consumption.py ?"
-
-if [ ${withMonitoring} = true ] ; then
-    cat <<EOM
-
-** -----------------------------------------------------------------------
-** Monitoring memory usage with mem_consumption.py
-** -----------------------------------------------------------------------
-EOM
-  mkdir -p "$WORKSPACE/voltha-pods-mem-consumption-${workflow}"
-  cd "$WORKSPACE/voltha-system-tests"
-
-  echo '** Installing python virtualenv'
-  make venv-activate-patched
-
-  # Collect initial memory consumption
-  set +u && source .venv/bin/activate && set -u
-  python scripts/mem_consumption.py -o $WORKSPACE/voltha-pods-mem-consumption-${workflow} -a 0.0.0.0:31301 -n ${volthaNamespace}
-fi
-
-echo -e '** Monitor memory consumption: LEAVE\n'
-""")
-
-        sh(
-            label  : "make testTarget=[${testTarget}]",
-            script : """
-echo -e "\n** make testTarget=[${testTarget}]"
-mkdir -p ${logsDir}
-export ROBOT_MISC_ARGS="-d ${logsDir} ${params.extraRobotArgs} "
-ROBOT_MISC_ARGS+="-v ONOS_SSH_PORT:30115 -v ONOS_REST_PORT:30120 -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace} -v container_log_dir:${logsDir} -v logging:${testLogging}"
-export KVSTOREPREFIX=voltha/voltha_voltha
-
-make -C "$WORKSPACE/voltha-system-tests" ${testTarget}
-""")
-
-        getPodsInfo("${logsDir}")
-
-        // [TODO] make conditional, bundle when logs are available
-        sh(
-            label : 'Gather robot Framework logs',
-            script : """
-echo -e '\n** Gather robot Framework logs: ENTER'
-
-# set +e
-# collect logs collected in the Robot Framework StartLogging keyword
-cd "${logsDir}"
-
-echo "** Available logs:"
-/bin/ls -l "$logsDir"
-echo
-
-echo '** Bundle combined log'
-gzip *-combined.log || true
-rm -f *-combined.log || true
-
-echo -e '** Gather robot Framework logs: LEAVE\n'
-""")
-
-        // -----------------------------------------------------------------------
-        // -----------------------------------------------------------------------
-        sh(
-            label  : 'Monitor pod-mem-consumption',
-            script : """
-echo -e '** Monitor pod-mem-consumption: ENTER'
-if [ ${withMonitoring} = true ] ; then
-      cat <<EOM
-
-** -----------------------------------------------------------------------
-** Monitoring pod-memory-consumption using mem_consumption.py
-** -----------------------------------------------------------------------
-EOM
-
-cd "$WORKSPACE/voltha-system-tests"
-
-echo '** Installing python virtualenv'
-make venv-activate-patched
-
-# Collect memory consumption of voltha pods once all the tests are complete
-set +u && source .venv/bin/activate && set -u
-python scripts/mem_consumption.py -o $WORKSPACE/voltha-pods-mem-consumption-${workflow} -a 0.0.0.0:31301 -n ${volthaNamespace}
-fi
-echo -e '** Monitor pod-mem-consumption: LEAVE\n'
-""")
-    } // stage
-
-    return
-} // execute_test()
-
-// -----------------------------------------------------------------------
-// -----------------------------------------------------------------------
-void collectArtifacts(exitStatus) {
-    script {
-        String iam = getIam('collectArtifacts')
-        enter("exitStatus=${exitStatus}")
-
-        println("""
-
-** -----------------------------------------------------------------------
-** IAM: $iam
-** collectArtifacts
-** -----------------------------------------------------------------------
-""")
-    }
-
-    getPodsInfo("$WORKSPACE/${exitStatus}")
-
-    sh(label  : 'kubectl logs > voltha.log',
-       script : """
-kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha \
-    > $WORKSPACE/${exitStatus}/voltha.log
-""")
-
-    archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.txt,**/*.html,**/voltha-pods-mem-consumption-att/*,**/voltha-pods-mem-consumption-dt/*,**/voltha-pods-mem-consumption-tt/*'
-
-    script {
-        enter('pkill _TAG=kail-startup')
-        sh(label  : 'pgrep_proc - kill-pre',
-           script : """
-pgrep --uid "\$(id -u)" --list-full --full 'kail-startup' || true
-""")
-        sh(label  : 'pkill_proc - kail',
-           script : """
-if [[ \$(pgrep --count '_TAG=kail') -gt 0 ]]; then
-    pkill --uid "\$(id -u)" --echo --full 'kail'
-fi
-""")
-        leave('pkill _TAG=kail-startup')
-    }
-
-    enter('RobotPublisher')
-    step([$class: 'RobotPublisher',
-          disableArchiveOutput: false,
-          logFileName: '**/*/log*.html',
-          otherFiles: '',
-          outputFileName: '**/*/output*.xml',
-          outputPath: '.',
-          passThreshold: 100,
-          reportFileName: '**/*/report*.html',
-          unstableThreshold: 0,
-          onlyCritical: true])
-    leave('RobotPublisher')
-
-    leave("exitStatus=${exitStatus}")
-    return
-}
-
-// -----------------------------------------------------------------------
-// Intent: main
-// -----------------------------------------------------------------------
-pipeline {
-    /* no label, executor is determined by JJB */
-    agent {
-        label "${params.buildNode}"
-    }
-
-    options {
-        timeout(time: "${timeout}", unit: 'MINUTES')
-    }
-
-    environment {
-        KUBECONFIG = "$HOME/.kube/kind-${clusterName}"
-        VOLTCONFIG = "$HOME/.volt/config"
-        PATH = "$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-        DIAGS_PROFILE = 'VOLTHA_PROFILE'
-        SSHPASS = 'karaf'
-    }
-
-    stages {
-        stage('Download Code') {
-            steps {
-                getVolthaCode([
-                    branch: "${branch}",
-                    gerritProject: "${gerritProject}",
-                    gerritRefspec: "${gerritRefspec}",
-                    volthaSystemTestsChange: "${volthaSystemTestsChange}",
-                    volthaHelmChartsChange: "${volthaHelmChartsChange}",
-                ])
-            }
-        }
-
-        stage('Build patch v1.1') {
-            // build the patch only if gerritProject is specified
-            when {
-                expression { return !gerritProject.isEmpty() }
-            }
-
-            steps {
-                // NOTE that the correct patch has already been checked out
-                // during the getVolthaCode step
-                buildVolthaComponent("${gerritProject}")
-            }
-        }
-
-        // -----------------------------------------------------------------------
-        // -----------------------------------------------------------------------
-        stage('Install Kail')
-        {
-            steps
-            {
-                script
-                {
-                    String cmd = [
-                        'make',
-                        '--no-print-directory',
-                        '-C', "$WORKSPACE/voltha-system-tests",
-                        "KAIL_PATH=\"$WORKSPACE/bin\"",
-                        'kail',
-                    ].join(' ')
-
-                    println(" ** Running: ${cmd}")
-                    sh("${cmd}")
-                } // script
-            } // steps
-        } // stage
-
-        // -----------------------------------------------------------------------
-        // -----------------------------------------------------------------------
-        stage('Install Tools') {
-            steps              {
-                script         {
-                    String branchName = branchName()
-                    String iam = getIam('Install Tools')
-
-                    println("${iam}: ENTER (branch=$branch)")
-                    installKind(branch)   // needed early by stage(Cleanup)
-                    println("${iam}: LEAVE (branch=$branch)")
-                } // script
-            } // steps
-        } // stage
-
-        // -----------------------------------------------------------------------
-        // -----------------------------------------------------------------------
-        stage('Create K8s Cluster') {
-            steps {
-                script {
-                    def clusterExists = sh(
-                        label : 'Create K8s Cluster',
-                        returnStdout: true,
-                        script: """kind get clusters | grep "${clusterName}" | wc -l""")
-
-                    if (clusterExists.trim() == '0') {
-                        createKubernetesCluster([nodes: 3, name: clusterName])
-                    }
-                } // script
-            } // steps
-        } // stage('Create K8s Cluster')
-
-        // -----------------------------------------------------------------------
-        // -----------------------------------------------------------------------
-        stage('Replace voltctl') {
-            // if the project is voltctl, override the downloaded one with the built one
-            when {
-                expression { return gerritProject == 'voltctl' }
-            }
-
-            // Hmmmm(?) where did the voltctl download happen ?
-            // Likely Makefile but would be helpful to document here.
-            steps {
-                script {
-                    String iam = getIam('Replace voltctl')
-
-                    println("${iam} Running: installVoltctl($branch)")
-                    println("${iam}: ENTER")
-                    installVoltctl("$branch")
-                    println("${iam}: LEAVE")
-                } // script
-            } // step
-        } // stage
-
-        // -----------------------------------------------------------------------
-        // -----------------------------------------------------------------------
-        stage('Load image in kind nodes')
-        {
-            when {
-                expression { return !gerritProject.isEmpty() }
-            }
-            steps {
-                loadToKind()
-            } // steps
-        } // stage
-
-        // -----------------------------------------------------------------------
-        // [TODO] verify testing output
-        // -----------------------------------------------------------------------
-        stage('Parse and execute tests')
-        {
-            steps {
-                script {
-                    // Announce ourselves for log usability
-                    enter('Parse and execute tests')
-
-                    def tests = readYaml text: testTargets // typeof == Map (?)
-                    println("** [DEBUG]: tests=$tests")
-
-                    // Display expected tests for times when output goes dark
-                    tests.eachWithIndex { test, idx ->
-                        String  target = test['target']
-                        println("**      test[${idx}]: ${target}\n")
-                    }
-
-                    println('''
-** -----------------------------------------------------------------------
-** NOTE: For odd/silent job failures verify a few details
-**   - All tests mentioned in the tests-to-run index were logged.
-**   - Test suites display ENTER/LEAVE mesasge pairs.
-**   - Processing terminated prematurely when LEAVE strings are missing.
-** -----------------------------------------------------------------------
-''')
-                    tests.eachWithIndex { test, idx ->
-                        println "** readYaml test suite[$idx]) test=[${test}]"
-
-                        String  target      = test['target']
-                        String  workflow    = test['workflow']
-                        String  flags       = test['flags']
-                        Boolean teardown    = test['teardown'].toBoolean()
-                        Boolean logging     = test['logging'].toBoolean()
-                        String  testLogging = (logging) ? 'True' : 'False'
-
-                        print("""
-** -----------------------------------------------------------------------
-** Executing test ${target} on workflow ${workflow} with logging ${testLogging} and extra flags ${flags}
-** -----------------------------------------------------------------------
-""")
-
-                        try {
-                            enter("execute_test (target=$target)")
-                            execute_test(target, workflow, testLogging, teardown, flags)
-                        }
-                        // groovylint-disable-next-line CatchException
-                        catch (Exception err) {
-                            String iamexc = getIam(test)
-                            println("** ${iamexc}: EXCEPTION ${err}")
-                        }
-                        finally {
-                            leave("execute_test (target=$target)")
-                        }
-                    } // for
-                    // Premature exit if this message is not logged
-                    leave('Parse and execute tests')
-                } // script
-            } // steps
-        } // stage
-    } // stages
-
-    post
-    {
-        aborted {
-            collectArtifacts('aborted')
-            script { cleanupPortForward() }
-        }
-        failure {
-            collectArtifacts('failed')
-            script { cleanupPortForward() }
-        }
-        always {
-            collectArtifacts('always')
-            script { cleanupPortForward() }
-        }
-    }
-} // pipeline
-
-// [EOF]
diff --git a/jjb/pipeline/voltha/voltha-2.12/device-management-mock-tests.groovy b/jjb/pipeline/voltha/voltha-2.12/device-management-mock-tests.groovy
deleted file mode 100644
index 8362a08..0000000
--- a/jjb/pipeline/voltha/voltha-2.12/device-management-mock-tests.groovy
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-def localCharts = false
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: 90, unit: 'MINUTES')
-  }
-  environment {
-    KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-    ROBOT_MISC_ARGS="-d $WORKSPACE/RobotLogs"
-  }
-
-  stages {
-
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${branch}",
-          gerritProject: "${gerritProject}",
-          gerritRefspec: "${gerritRefspec}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage('Build Redfish Importer Image') {
-      steps {
-        sh """
-           make -C $WORKSPACE/device-management/\$1 DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build-importer
-           """
-      }
-    }
-    stage('Build demo_test Image') {
-      steps {
-        sh """
-           make -C $WORKSPACE/device-management/\$1/demo_test DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build
-           """
-      }
-    }
-    stage('Build mock-redfish-server  Image') {
-      steps {
-        sh """
-           make -C $WORKSPACE/device-management/\$1/mock-redfish-server DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build
-           """
-      }
-    }
-    stage('Create K8s Cluster') {
-      steps {
-        createKubernetesCluster([nodes: 3])
-      }
-    }
-    stage('Load image in kind nodes') {
-      steps {
-        loadToKind()
-      }
-    }
-    stage('Deploy Voltha') {
-      steps {
-        script {
-          if (branch != "master" || volthaHelmChartsChange != "") {
-            // if we're using a release or testing changes in the charts, then use the local clone
-            localCharts = true
-          }
-        }
-        volthaDeploy([
-          workflow: "att",
-          extraHelmFlags: extraHelmFlags,
-          dockerRegistry: "mirror.registry.opennetworking.org",
-          localCharts: localCharts,
-        ])
-        // start logging
-        sh """
-        mkdir -p $WORKSPACE/att
-        _TAG=kail-att kail -n infra -n voltha -n default > $WORKSPACE/att/onos-voltha-combined.log &
-        """
-        // forward ONOS and VOLTHA ports
-        sh """
-        _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8101:8101&
-        _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8181:8181&
-        _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha svc/voltha-voltha-api 55555:55555&
-        """
-      }
-    }
-
-    stage('Run E2E Tests') {
-      steps {
-        sh '''
-           mkdir -p $WORKSPACE/RobotLogs
-
-           # tell the kubernetes script to use images tagged citest and pullPolicy:Never
-           sed -i 's/master/citest/g' $WORKSPACE/device-management/kubernetes/deploy*.yaml
-           sed -i 's/imagePullPolicy: Always/imagePullPolicy: Never/g' $WORKSPACE/device-management/kubernetes/deploy*.yaml
-           make -C $WORKSPACE/device-management functional-mock-test || true
-           '''
-      }
-    }
-  }
-
-  post {
-    always {
-      sh '''
-         set +e
-         kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
-         kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
-         kubectl get nodes -o wide
-         kubectl get pods -o wide --all-namespaces
-
-         sync
-         pkill kail || true
-
-         ## Pull out errors from log files
-         extract_errors_go() {
-           echo
-           echo "Error summary for $1:"
-           grep $1 $WORKSPACE/att/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
-           echo
-         }
-
-         extract_errors_python() {
-           echo
-           echo "Error summary for $1:"
-           grep $1 $WORKSPACE/att/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
-           echo
-         }
-
-         extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
-         extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
-         extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
-         extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
-
-         gzip $WORKSPACE/att/onos-voltha-combined.log
-         '''
-         step([$class: 'RobotPublisher',
-            disableArchiveOutput: false,
-            logFileName: 'RobotLogs/log*.html',
-            otherFiles: '',
-            outputFileName: 'RobotLogs/output*.xml',
-            outputPath: '.',
-            passThreshold: 80,
-            reportFileName: 'RobotLogs/report*.html',
-            unstableThreshold: 0]);
-         archiveArtifacts artifacts: '**/*.log,**/*.gz'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.12/dmi-build-and-test.groovy b/jjb/pipeline/voltha/voltha-2.12/dmi-build-and-test.groovy
deleted file mode 100755
index 6d66a53..0000000
--- a/jjb/pipeline/voltha/voltha-2.12/dmi-build-and-test.groovy
+++ /dev/null
@@ -1,355 +0,0 @@
-#!/usr/bin/env groovy
-// -----------------------------------------------------------------------
-// Copyright 2022-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// -----------------------------------------------------------------------
-// Intent: used to deploy VOLTHA and configure ONOS physical PODs
-//
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-// -----------------------------------------------------------------------
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-
-def deploy_custom_chart(namespace, name, chart, extraHelmFlags) {
-  sh """
-    helm install --create-namespace --set defaults.image_pullPolicy=Always --namespace ${namespace} ${extraHelmFlags} ${name} ${chart}
-   """
-}
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: 45, unit: 'MINUTES')
-  }
-  environment {
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-    VOLTCONFIG="$HOME/.volt/config-minimal"
-    LOG_FOLDER="$WORKSPACE/dmi/"
-    APPS_TO_LOG="${OltDevMgr}"
-  }
-
-  stages{
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${branch}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage ("Parse deployment configuration file") {
-      steps {
-        sh returnStdout: true, script: "rm -rf ${configBaseDir}"
-        sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-          if ( params.workFlow == "DT" ) {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-          }
-          else if ( params.workFlow == "TT" )
-          {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-          }
-          else
-          {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-          }
-        }
-      }
-    }
-    stage('Clean up') {
-      steps {
-        timeout(15) {
-          script {
-            helmTeardown(["default", infraNamespace, volthaNamespace])
-          }
-          timeout(1) {
-            sh returnStdout: false, script: '''
-            # remove orphaned port-forward from different namespaces
-            ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-            '''
-          }
-        }
-      }
-    }
-    stage('Install Voltha')  {
-      steps {
-        timeout(20) {
-          installVoltctl("${branch}")
-          script {
-            // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
-            def localCharts = false
-            if (volthaHelmChartsChange != "") {
-              localCharts = true
-            }
-
-            // should the config file be suffixed with the workflow? see "deployment_config"
-            def localHelmFlags = "-f $WORKSPACE/${configBaseDir}/${configKubernetesDir}/voltha/${configFileName}.yml --set global.log_level=${logLevel} "
-
-            if (workFlow.toLowerCase() == "dt") {
-              localHelmFlags += " --set radius.enabled=false "
-            }
-            if (workFlow.toLowerCase() == "tt") {
-              localHelmFlags += " --set radius.enabled=false --set global.incremental_evto_update=true "
-                if (enableMultiUni.toBoolean()) {
-                    localHelmFlags += " --set voltha-adapter-openonu.adapter_open_onu.uni_port_mask=${uniPortMask} "
-                }
-            }
-
-            // NOTE temporary workaround expose ONOS node ports (pod-config needs to be updated to contain these values)
-            // and to connect the ofagent to all instances of ONOS
-            localHelmFlags = localHelmFlags + " --set onos-classic.onosSshPort=30115 " +
-            "--set onos-classic.onosApiPort=30120 " +
-            "--set onos-classic.onosOfPort=31653 " +
-            "--set onos-classic.individualOpenFlowNodePorts=true " +
-            "--set voltha.onos_classic.replicas=${params.NumOfOnos}"
-
-            if (bbsimReplicas.toInteger() != 0) {
-              localHelmFlags = localHelmFlags + " --set onu=${onuNumber},pon=${ponNumber} "
-            }
-
-            // adding user specified helm flags at the end so they'll have priority over everything else
-            localHelmFlags = localHelmFlags + " ${extraHelmFlags}"
-
-            def numberOfAdaptersToWait = 2
-
-            if(openoltAdapterChart != "onf/voltha-adapter-openolt") {
-              localHelmFlags = localHelmFlags + " --set voltha-adapter-openolt.enabled=false"
-              // We skip waiting for adapters in the volthaDeploy step because it's already waiting for
-              // both of them after the deployment of the custom olt adapter. See line 156.
-              numberOfAdaptersToWait = 0
-            }
-
-            volthaDeploy([
-              workflow: workFlow.toLowerCase(),
-              extraHelmFlags: localHelmFlags,
-              localCharts: localCharts,
-              kubeconfig: "$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf",
-              onosReplica: params.NumOfOnos,
-              atomixReplica: params.NumOfAtomix,
-              kafkaReplica: params.NumOfKafka,
-              etcdReplica: params.NumOfEtcd,
-              bbsimReplica: bbsimReplicas.toInteger(),
-              adaptersToWait: numberOfAdaptersToWait,
-              withVolthaInfra: installVolthaInfra.toBoolean(),
-              withVolthaStack: installVolthaStack.toBoolean(),
-              ])
-
-            if(installVolthaStack.toBoolean()) {
-              if(openoltAdapterChart != "onf/voltha-adapter-openolt"){
-                extraHelmFlags = extraHelmFlags + " --set global.log_level=${logLevel}"
-                deploy_custom_chart(volthaNamespace, oltAdapterReleaseName, openoltAdapterChart, extraHelmFlags)
-                waitForAdapters([
-                  adaptersToWait: 2
-                ])
-              }
-            }
-          }
-          sh """
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd ${params.VolthaEtcdPort}:2379; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
-          ps aux | grep port-forward
-          """
-          getPodsInfo("$WORKSPACE")
-        }
-      }
-    }
-    stage('Deploy Device Manager Interface Chart') {
-      steps {
-        script {
-          deploy_custom_chart('default', 'olt-device-manager', dmiChart, extraHelmFlags)
-        }
-        println "Wait for olt-device-manager to start"
-        sh """
-            set +x
-            devmgr=\$(kubectl get pods -l app.kubernetes.io/name=${params.OltDevMgr} --no-headers | grep "0/" | wc -l)
-            while [[ \$devmgr != 0 ]]; do
-              sleep 5
-              devmgr=\$(kubectl get pods -l app.kubernetes.io/name=${params.OltDevMgr} --no-headers | grep "0/" | wc -l)
-            done
-        """
-        sh """
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="${params.OltDevMgr}" bash -c "while true; do kubectl port-forward --address 0.0.0.0 svc/${params.OltDevMgr} 50051; done"&
-          ps aux | grep port-forward
-        """
-      }
-    }
-	stage('Start logging')
-	{
-	    steps
-	    {
-		// Install kail
-		sh("""make -C "$WORKSPACE/voltha-system-tests" KAIL_PATH="$WORKSPACE/bin" kail""")
-
-		sh returnStdout: false, script: '''
-          # start logging with kail
-          cd $WORKSPACE
-          mkdir -p $LOG_FOLDER
-          list=($APPS_TO_LOG)
-          for app in "${list[@]}"
-          do
-            echo "Starting logs for: ${app}"
-            _TAG=kail-$app kail -l app.kubernetes.io/name=$app --since 1h > $LOG_FOLDER/$app.log&
-          done
-        '''
-	    }
-	}
-
-	stage('Reinstall OLT software') {
-      steps {
-        script {
-          if ( params.reinstallOlt ) {
-            for(int i=0; i < deployment_config.olts.size(); i++) {
-              sh returnStdout: true, script: """
-              ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
-              if [ "${params.inBandManagement}" == "true" ]; then
-                sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'kill -9 `pgrep -f "[b]ash /opt/openolt/openolt_dev_mgmt_daemon_process_watchdog"` || true'
-              fi
-              sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} "dpkg --install ${deployment_config.olts[i].oltDebVersion}"
-              sleep 10
-              """
-              timeout(5) {
-                waitUntil {
-                  olt_sw_present = sh returnStdout: true, script: """
-                  if [[ "${deployment_config.olts[i].oltDebVersion}" == *"asfvolt16"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep asfvolt16 | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"asgvolt64"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep asgvolt64 | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-1600x-w"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-1600x-w | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-1600g-w"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-1600g-w | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-3200g-w"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-3200g-w | wc -l'
-                  else
-                    echo Unknown Debian package for openolt
-                  fi
-                  if (${deployment_config.olts[i].fortygig}); then
-                    if [[ "${params.inBandManagement}" == "true" ]]; then
-                      ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'mkdir -p /opt/openolt/'
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp /root/watchdog-script/* /opt/openolt/'
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp /root/bal_cli_appl/example_user_appl /broadcom'
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp in-band-startup-script/* /etc/init.d/'
-                    fi
-                  fi
-                  """
-                  return olt_sw_present.toInteger() > 0
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-    stage('Restart OLT processes') {
-      steps {
-        script {
-          if ( params.restartOlt ) {
-            //rebooting OLTs
-            for(int i=0; i < deployment_config.olts.size(); i++) {
-              timeout(15) {
-                sh returnStdout: true, script: """
-                ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
-                sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; rm -f /var/log/openolt_process_watchdog.log; reboot > /dev/null &' || true
-                """
-              }
-            }
-            sh returnStdout: true, script: """
-            sleep ${params.waitTimerForOltUp}
-            """
-            //Checking dev_management_deamon and openoltprocesses
-            for(int i=0; i < deployment_config.olts.size(); i++) {
-              if ( params.oltAdapterReleaseName != "open-olt" ) {
-                timeout(15) {
-                  waitUntil {
-                    devprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep dev_mgmt_daemon | wc -l'"
-                    return devprocess.toInteger() > 0
-                  }
-                }
-                timeout(15) {
-                  waitUntil {
-                    openoltprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep openolt | wc -l'"
-                    return openoltprocess.toInteger() > 0
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-    stage('Run Device Management Interface Tests') {
-      environment {
-        ROBOT_FILE="dmi-hw-management.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs"
-        ROBOT_CONFIG_FILE="$WORKSPACE/voltha-system-tests/tests/data/dmi-components-adtran.yaml"
-      }
-      steps {
-        sh """
-          mkdir -p $ROBOT_LOGS_DIR
-          export ROBOT_MISC_ARGS="--removekeywords wuks -e notreadyDMI -i functionalDMI -d $ROBOT_LOGS_DIR"
-          make -C $WORKSPACE/voltha-system-tests voltha-dmi-test || true
-        """
-      }
-    }
-  }
-
-  post {
-    always {
-      getPodsInfo("$WORKSPACE")
-      sh '''
-      # stop the kail processes
-      list=($APPS_TO_LOG)
-      for app in "${list[@]}"
-      do
-        echo "Stopping logs for: ${app}"
-        _TAG="kail-$app"
-        P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
-        if [ -n "$P_IDS" ]; then
-          echo $P_IDS
-          for P_ID in $P_IDS; do
-            kill -9 $P_ID
-          done
-        fi
-      done
-      '''
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: 'RobotLogs/log*.html',
-        otherFiles: '',
-        outputFileName: 'RobotLogs/output*.xml',
-        outputPath: '.',
-        passThreshold: 100,
-        reportFileName: 'RobotLogs/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true]);
-      archiveArtifacts artifacts: '**/*.txt,**/*.gz,*.gz,**/*.log'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.12/physical-build.groovy b/jjb/pipeline/voltha/voltha-2.12/physical-build.groovy
deleted file mode 100755
index f216e92..0000000
--- a/jjb/pipeline/voltha/voltha-2.12/physical-build.groovy
+++ /dev/null
@@ -1,432 +0,0 @@
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// used to deploy VOLTHA and configure ONOS physical PODs
-
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-// -----------------------------------------------------------------------
-// -----------------------------------------------------------------------
-def getIam(String func)
-{
-    // Cannot rely on a stack trace due to jenkins manipulation
-    String src = 'jjb/pipeline/voltha-2.12/physical-build.groovy'
-    String iam = [src, func].join('::')
-    return iam
-}
-
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-
-// -----------------------------------------------------------------------
-// -----------------------------------------------------------------------
-def deploy_custom_oltAdapterChart(namespace, name, chart, extraHelmFlags) {
-    String iam = getIam('deploy_custom_oltAdapterChart')
-    println("** ${iam}: ENTER")
-
-    sh """
-    helm install --create-namespace --set defaults.image_pullPolicy=Always --namespace ${namespace} ${extraHelmFlags} ${name} ${chart}
-   """
-
-    println("** ${iam}: LEAVE")
-    return
-}
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: 35, unit: 'MINUTES')
-  }
-  environment {
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-  }
-
-  stages{
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${branch}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage ("Parse deployment configuration file") {
-      steps {
-        sh returnStdout: true, script: "rm -rf ${configBaseDir}"
-        sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-          if ( params.workFlow == "DT" ) {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-          }
-          else if ( params.workFlow == "TT" )
-          {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-          }
-          else
-          {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-          }
-        }
-      }
-    }
-    stage('Clean up') {
-      steps {
-        timeout(15) {
-          script {
-            helmTeardown(["default", infraNamespace, volthaNamespace])
-          }
-          timeout(1) {
-            sh returnStdout: false, script: '''
-            # remove orphaned port-forward from different namespaces
-            ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-            '''
-          }
-        }
-      }
-    }
-    stage('Install Voltha')  {
-      steps {
-        timeout(20) {
-          installVoltctl("${branch}")
-          script {
-            // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
-            def localCharts = false
-            if (volthaHelmChartsChange != "") {
-              localCharts = true
-            }
-
-            // should the config file be suffixed with the workflow? see "deployment_config"
-            def localHelmFlags = "-f $WORKSPACE/${configBaseDir}/${configKubernetesDir}/voltha/${configFileName}.yml --set global.log_level=${logLevel} "
-
-            if (workFlow.toLowerCase() == "dt") {
-              localHelmFlags += " --set radius.enabled=false "
-            }
-            if (workFlow.toLowerCase() == "tt") {
-              localHelmFlags += " --set radius.enabled=false --set global.incremental_evto_update=true "
-                if (enableMultiUni.toBoolean()) {
-                    localHelmFlags += " --set voltha-adapter-openonu.adapter_open_onu.uni_port_mask=${uniPortMask} "
-                }
-            }
-
-            // NOTE temporary workaround expose ONOS node ports (pod-config needs to be updated to contain these values)
-            // and to connect the ofagent to all instances of ONOS
-            localHelmFlags = localHelmFlags + " --set onos-classic.onosSshPort=30115 " +
-            "--set onos-classic.onosApiPort=30120 " +
-            "--set onos-classic.onosOfPort=31653 " +
-            "--set onos-classic.individualOpenFlowNodePorts=true " +
-            "--set voltha.onos_classic.replicas=${params.NumOfOnos}"
-
-            if (bbsimReplicas.toInteger() != 0) {
-              localHelmFlags = localHelmFlags + " --set onu=${onuNumber},pon=${ponNumber} "
-            }
-
-            // adding user specified helm flags at the end so they'll have priority over everything else
-            localHelmFlags = localHelmFlags + " ${extraHelmFlags}"
-
-            def numberOfAdaptersToWait = 2
-
-            if(openoltAdapterChart != "onf/voltha-adapter-openolt") {
-              localHelmFlags = localHelmFlags + " --set voltha-adapter-openolt.enabled=false"
-              // We skip waiting for adapters in the volthaDeploy step because it's already waiting for
-              // both of them after the deployment of the custom olt adapter. See line 156.
-              numberOfAdaptersToWait = 0
-            }
-
-            volthaDeploy([
-              workflow: workFlow.toLowerCase(),
-              extraHelmFlags: localHelmFlags,
-              localCharts: localCharts,
-              kubeconfig: "$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf",
-              onosReplica: params.NumOfOnos,
-              atomixReplica: params.NumOfAtomix,
-              kafkaReplica: params.NumOfKafka,
-              etcdReplica: params.NumOfEtcd,
-              bbsimReplica: bbsimReplicas.toInteger(),
-              withFttb: withFttb.toBoolean(),
-              adaptersToWait: numberOfAdaptersToWait,
-              ])
-
-            if(openoltAdapterChart != "onf/voltha-adapter-openolt"){
-              extraHelmFlags = extraHelmFlags + " --set global.log_level=${logLevel}"
-              deploy_custom_oltAdapterChart(volthaNamespace, oltAdapterReleaseName, openoltAdapterChart, extraHelmFlags)
-              waitForAdapters([
-                adaptersToWait: 2
-              ])
-            }
-          }
-          sh """
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd ${params.VolthaEtcdPort}:2379; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
-          ps aux | grep port-forward
-          """
-          getPodsInfo("$WORKSPACE")
-        }
-      }
-    }
-    stage('Push Tech-Profile') {
-      steps {
-        script {
-          if ( params.configurePod && params.profile != "Default" ) {
-            for(int i=0; i < deployment_config.olts.size(); i++) {
-              def tech_prof_directory = "XGS-PON"
-              if (deployment_config.olts[i].containsKey("board_technology")){
-                tech_prof_directory = deployment_config.olts[i]["board_technology"]
-              }
-              timeout(1) {
-                sh returnStatus: true, script: """
-                export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
-                etcd_container=\$(kubectl get pods -n ${infraNamespace} -l app.kubernetes.io/name=etcd --no-headers | awk 'NR==1{print \$1}')
-                if [[ "${workFlow}" == "TT" ]]; then
-                   kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-HSIA.json \$etcd_container:/tmp/hsia.json
-                   kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64'
-                   kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-VoIP.json \$etcd_container:/tmp/voip.json
-                   kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65'
-                   if [[ "${params.enableMultiUni}" == "true" ]]; then
-                      kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-MCAST-AdditionalBW-None.json \$etcd_container:/tmp/mcast_additionalBW_none.json
-                      kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_none.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66'
-                      kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-MCAST-AdditionalBW-NA.json \$etcd_container:/tmp/mcast_additionalBW_na.json
-                      kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_na.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/67'
-                   else
-                      kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST-AdditionalBW-None.json \$etcd_container:/tmp/mcast_additionalBW_none.json
-                      kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_none.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66'
-                      kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST-AdditionalBW-NA.json \$etcd_container:/tmp/mcast_additionalBW_na.json
-                      kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_na.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/67'
-                   fi
-                else
-                   kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-${profile}.json \$etcd_container:/tmp/flexpod.json
-                   kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64'
-                fi
-                """
-              }
-              timeout(1) {
-                sh returnStatus: true, script: """
-                export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
-                etcd_container=\$(kubectl get pods -n ${infraNamespace} -l app.kubernetes.io/name=etcd --no-headers | awk 'NR==1{print \$1}')
-                kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'ETCDCTL_API=3 etcdctl get --prefix service/voltha/technology_profiles/${tech_prof_directory}/64'
-                """
-              }
-            }
-          }
-        }
-      }
-    }
-    stage('Push MIB templates') {
-      steps {
-        sh """
-        export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
-        etcd_container=\$(kubectl get pods -n ${infraNamespace} -l app.kubernetes.io/name=etcd --no-headers | awk 'NR==1{print \$1}')
-        kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/MIB_Alpha.json \$etcd_container:/tmp/MIB_Alpha.json
-        kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Alpha.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/go_templates/BRCM/BVM4K00BRA0915-0083/5023_020O02414'
-        kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Alpha.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/templates/BRCM/BVM4K00BRA0915-0083/5023_020O02414'
-        kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/MIB_Scom.json \$etcd_container:/tmp/MIB_Scom.json
-        kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Scom.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/go_templates/SCOM/Glasfaser-Modem/090140.1.0.304'
-        kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Scom.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/templates/SCOM/Glasfaser-Modem/090140.1.0.304'
-        """
-      }
-    }
-    stage('Push Sadis-config') {
-      steps {
-        timeout(1) {
-          sh returnStatus: true, script: """
-          if [[ "${workFlow}" == "DT" ]]; then
-            curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-DT.json
-          elif [[ "${workFlow}" == "TT" ]]; then
-            curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-TT.json
-          else
-            # this is the ATT case, rename the file in *-sadis-ATT.json so that we can avoid special cases and just load the file
-            curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis.json
-          fi
-          """
-        }
-      }
-    }
-    stage('Switch Configurations in ONOS') {
-      steps {
-        script {
-          if ( deployment_config.fabric_switches.size() > 0 ) {
-            timeout(1) {
-              def netcfg = "$WORKSPACE/${configBaseDir}/${configToscaDir}/voltha/${configFileName}-onos-netcfg-switch.json"
-              if (params.inBandManagement){
-                netcfg = "$WORKSPACE/${configBaseDir}/${configToscaDir}/voltha/${configFileName}-onos-netcfg-switch-inband.json"
-              }
-              sh """
-              curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @${netcfg}
-              curl -sSL --user karaf:karaf -X POST http://${deployment_config.nodes[0].ip}:30120/onos/v1/applications/org.onosproject.segmentrouting/active
-              """
-            }
-            timeout(3) {
-              setOnosLogLevels([
-                  onosNamespace: infraNamespace,
-                  apps: [
-                    'org.opencord.dhcpl2relay',
-                    'org.opencord.olt',
-                    'org.opencord.aaa',
-                    'org.opencord.maclearner',
-                    'org.onosproject.net.flowobjective.impl.FlowObjectiveManager',
-                    'org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager'
-                  ]
-              ])
-              waitUntil {
-                sr_active_out = sh returnStatus: true, script: """
-                curl -sSL --user karaf:karaf -X GET http://${deployment_config.nodes[0].ip}:30120/onos/v1/applications/org.onosproject.segmentrouting | jq '.state' | grep ACTIVE
-                sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled false"
-                sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "cfg set org.onosproject.net.flow.impl.FlowRuleManager purgeOnDisconnection false"
-                sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "cfg set org.onosproject.net.meter.impl.MeterManager purgeOnDisconnection false"
-                """
-                return sr_active_out == 0
-              }
-            }
-            timeout(8) {
-              for(int i=0; i < deployment_config.hosts.src.size(); i++) {
-                for(int j=0; j < deployment_config.olts.size(); j++) {
-                  def aggPort = -1
-                  if(deployment_config.olts[j].serial == deployment_config.hosts.src[i].olt){
-                      aggPort = deployment_config.olts[j].aggPort
-                      if(aggPort == -1){
-                        throw new Exception("Upstream port for the olt is not configured, field aggPort is empty")
-                      }
-                      sh """
-                      sleep 10 # NOTE why are we sleeping?
-                      curl -X POST --user karaf:karaf --header 'Content-Type: application/json' --header 'Accept: application/json' -d '{"deviceId": "${deployment_config.fabric_switches[0].device_id}", "vlanId": "${deployment_config.hosts.src[i].s_tag}", "endpoints": [${deployment_config.fabric_switches[0].bngPort},${aggPort}]}' 'http://${deployment_config.nodes[0].ip}:30120/onos/segmentrouting/xconnect'
-                      """
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-    stage('Reinstall OLT software') {
-      steps {
-        script {
-          if ( params.reinstallOlt ) {
-            for(int i=0; i < deployment_config.olts.size(); i++) {
-              // NOTE what is oltDebVersion23? is that for VOLTHA-2.3? do we still need this differentiation?
-              sh returnStdout: true, script: """
-              ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
-              if [ "${params.inBandManagement}" == "true" ]; then
-                sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'kill -9 `pgrep -f "[b]ash /opt/openolt/openolt_dev_mgmt_daemon_process_watchdog"` || true'
-              fi
-              sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} "dpkg --install ${deployment_config.olts[i].oltDebVersion}"
-              sleep 10
-              """
-              timeout(5) {
-                waitUntil {
-                  olt_sw_present = sh returnStdout: true, script: """
-                  if [[ "${deployment_config.olts[i].oltDebVersion}" == *"asfvolt16"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep asfvolt16 | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"asgvolt64"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep asgvolt64 | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-1600x-w"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-1600x-w | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-1600g-w"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-1600g-w | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-3200g-w"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-3200g-w | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"sda3016ss"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep sda3016ss | wc -l'
-                  else
-                    echo Unknown Debian package for openolt
-                  fi
-                  if (${deployment_config.olts[i].fortygig}); then
-                    if [[ "${params.inBandManagement}" == "true" ]]; then
-                      ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'mkdir -p /opt/openolt/'
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp /root/watchdog-script/* /opt/openolt/'
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp /root/bal_cli_appl/example_user_appl /broadcom'
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp in-band-startup-script/* /etc/init.d/'
-                    fi
-                  fi
-                  """
-                  return olt_sw_present.toInteger() > 0
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-    stage('Restart OLT processes') {
-      steps {
-        script {
-          //rebooting OLTs
-          for(int i=0; i < deployment_config.olts.size(); i++) {
-            timeout(15) {
-              sh returnStdout: true, script: """
-              ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
-              sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; rm -f /var/log/openolt_process_watchdog.log; reboot > /dev/null &' || true
-              """
-            }
-          }
-          sh returnStdout: true, script: """
-          sleep ${params.waitTimerForOltUp}
-          """
-          //Checking dev_management_deamon and openoltprocesses
-          for(int i=0; i < deployment_config.olts.size(); i++) {
-            if ( params.oltAdapterReleaseName != "open-olt" ) {
-              timeout(15) {
-                waitUntil {
-                  devprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep dev_mgmt_daemon | wc -l'"
-                  return devprocess.toInteger() > 0
-                }
-              }
-              timeout(15) {
-                waitUntil {
-                  openoltprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep openolt | wc -l'"
-                  return openoltprocess.toInteger() > 0
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-  }
-
-  post {
-    aborted {
-      getPodsInfo("$WORKSPACE/failed")
-      sh """
-      kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.log || true
-      """
-      archiveArtifacts artifacts: '**/*.log,**/*.txt'
-    }
-    failure {
-      getPodsInfo("$WORKSPACE/failed")
-      sh """
-      kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.logs || true
-      """
-      archiveArtifacts artifacts: '**/*.log,**/*.txt'
-    }
-    always {
-      archiveArtifacts artifacts: '*.txt'
-    }
-  }
-}
-
-// [EOF]
diff --git a/jjb/pipeline/voltha/voltha-2.12/software-upgrades.groovy b/jjb/pipeline/voltha/voltha-2.12/software-upgrades.groovy
deleted file mode 100755
index 3f15e7a..0000000
--- a/jjb/pipeline/voltha/voltha-2.12/software-upgrades.groovy
+++ /dev/null
@@ -1,383 +0,0 @@
-// Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// voltha-2.x e2e tests
-// uses bbsim to simulate OLT/ONUs
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-// -----------------------------------------------------------------------
-// Intent:
-// -----------------------------------------------------------------------
-String branchName() {
-    String name = 'voltha-2.12'
-
-    // [TODO] Sanity check the target branch
-    // if (name != jenkins.branch) { fatal }
-    return(name)
-}
-
-// -----------------------------------------------------------------------
-// Intent: Due to lack of a reliable stack trace, construct a literal.
-//         Jenkins will re-write the call stack for serialization.
-// -----------------------------------------------------------------------
-String getIam(String func) {
-    String branchName = branchName()
-    String src = [
-        'ci-management',
-        'jjb',
-        'pipeline',
-        'voltha',
-        branchName,
-        'software-upgrades.groovy'
-    ].join('/')
-
-    String name = [src, func].join('::')
-    return(name)
-}
-
-// -----------------------------------------------------------------------
-// fetches the versions/tags of the voltha component
-// returns the deployment version which is one less than the latest available tag of the repo, first voltha stack gets deployed using this;
-// returns the test version which is the latest tag of the repo, the component upgrade gets tested on this.
-// Note: if there is a major version change between deployment and test tags, then deployment tag will be same as test tag, i.e. both as latest.
-// -----------------------------------------------------------------------
-def get_voltha_comp_versions(component, base_deploy_tag) {
-    def comp_test_tag = sh(
-        script: "git ls-remote --refs --tags https://github.com/opencord/${component} | cut --delimiter='/' --fields=3 | tr '-' '~' | sort --version-sort | tail --lines=1 | sed 's/v//'",
-        returnStdout: true
-    ).trim()
-    def comp_deploy_tag = sh(
-        script: "git ls-remote --refs --tags https://github.com/opencord/${component} | cut --delimiter='/' --fields=3 | tr '-' '~' | sort --version-sort | tail --lines=2 | head -n 1 | sed 's/v//'",
-        returnStdout: true
-    ).trim()
-    def comp_deploy_major = comp_deploy_tag.substring(0, comp_deploy_tag.indexOf('.'))
-    def comp_test_major = comp_test_tag.substring(0, comp_test_tag.indexOf('.'))
-    if ("${comp_deploy_major.trim()}" != "${comp_test_major.trim()}") {
-        comp_deploy_tag = comp_test_tag
-    }
-    if ("${comp_test_tag.trim()}" == "${base_deploy_tag.trim()}") {
-        comp_deploy_tag = comp_test_tag
-        comp_test_tag = "master"
-    }
-    println "${component}: deploy_tag: ${comp_deploy_tag}, test_tag: ${comp_test_tag}"
-    return [comp_deploy_tag, comp_test_tag]
-}
-
-// -----------------------------------------------------------------------
-// -----------------------------------------------------------------------
-def test_software_upgrade(name) {
-    def infraNamespace = "infra"
-    def volthaNamespace = "voltha"
-    def openolt_adapter_deploy_tag = ''
-    def openolt_adapter_test_tag = ''
-    def openonu_adapter_deploy_tag = ''
-    def openonu_adapter_test_tag = ''
-    def rw_core_deploy_tag = ''
-    def rw_core_test_tag = ''
-    def ofagent_deploy_tag = ''
-    def ofagent_test_tag = ''
-    def logsDir = "$WORKSPACE/${name}"
-    stage('Deploy Voltha - ' + name) {
-        timeout(10) {
-            // start logging
-            sh """
-      rm -rf ${logsDir} || true
-      mkdir -p ${logsDir}
-      _TAG=kail-${name} kail -n ${infraNamespace} -n ${volthaNamespace} > ${logsDir}/onos-voltha-startup-combined.log &
-      """
-            def extraHelmFlags = extraHelmFlags.trim()
-            if ("${name}" == "onos-app-upgrade" || "${name}" == "onu-software-upgrade" || "${name}" == "onu-software-upgrade-omci-extended-msg" || "${name}" == "voltha-component-upgrade" || "${name}" == "voltha-component-rolling-upgrade") {
-                extraHelmFlags = " --set global.log_level=${logLevel.toUpperCase()},onu=1,pon=1 --set onos-classic.replicas=3,onos-classic.atomix.replicas=3 " + extraHelmFlags
-            }
-            if ("${name}" == "onu-software-upgrade" || "${name}" == "onu-software-upgrade-omci-extended-msg") {
-                extraHelmFlags = " --set global.extended_omci_support.enabled=true " + extraHelmFlags
-            }
-            if ("${name}" == "onu-software-upgrade-omci-extended-msg") {
-                extraHelmFlags = " --set omccVersion=180 " + extraHelmFlags
-            }
-            if ("${name}" == "onu-image-dwl-simultaneously") {
-                extraHelmFlags = " --set global.log_level=${logLevel.toUpperCase()},onu=2,pon=2 --set onos-classic.replicas=3,onos-classic.atomix.replicas=3 " + extraHelmFlags
-            }
-            if ("${name}" == "onos-app-upgrade" || "${name}" == "onu-software-upgrade" || "${name}" == "onu-software-upgrade-omci-extended-msg" || "${name}" == "onu-image-dwl-simultaneously") {
-                extraHelmFlags = " --set global.image_tag=master --set onos-classic.image.tag=master " + extraHelmFlags
-            }
-            if ("${name}" == "voltha-component-upgrade" || "${name}" == "voltha-component-rolling-upgrade") {
-                extraHelmFlags = " --set images.onos_config_loader.tag=master-onos-config-loader --set onos-classic.image.tag=master " + extraHelmFlags
-            }
-            extraHelmFlags += " --set onos-classic.onosSshPort=30115 --set onos-classic.onosApiPort=30120 "
-            extraHelmFlags += " --set voltha.onos_classic.replicas=3"
-            //ONOS custom image handling
-            if ( onosImg.trim() != '' ) {
-                String[] split;
-                onosImg = onosImg.trim()
-                split = onosImg.split(':')
-                extraHelmFlags += " --set onos-classic.image.repository=" + split[0] +",onos-classic.image.tag=" + split[1] + " "
-            }
-            Integer olts = 1
-            if ("${name}" == 'onu-image-dwl-simultaneously') {
-                olts = 2
-            }
-            if ("${name}" == 'voltha-component-upgrade' || "${name}" == 'voltha-component-rolling-upgrade') {
-                // fetch voltha components versions/tags
-                (openolt_adapter_deploy_tag, openolt_adapter_test_tag) = get_voltha_comp_versions('voltha-openolt-adapter', openoltAdapterDeployBaseTag.trim())
-                extraHelmFlags += " --set voltha-adapter-openolt.images.adapter_open_olt.tag=${openolt_adapter_deploy_tag} "
-                (openonu_adapter_deploy_tag, openonu_adapter_test_tag) = get_voltha_comp_versions('voltha-openonu-adapter-go', openonuAdapterDeployBaseTag.trim())
-                extraHelmFlags += " --set voltha-adapter-openonu.images.adapter_open_onu_go.tag=${openonu_adapter_deploy_tag} "
-                (rw_core_deploy_tag, rw_core_test_tag) = get_voltha_comp_versions('voltha-go', rwCoreDeployBaseTag.trim())
-                extraHelmFlags += " --set voltha.images.rw_core.tag=${rw_core_deploy_tag} "
-                (ofagent_deploy_tag, ofagent_test_tag) = get_voltha_comp_versions('ofagent-go', ofagentDeployBaseTag.trim())
-                extraHelmFlags += " --set voltha.images.ofagent.tag=${ofagent_deploy_tag} "
-            }
-            def localCharts = false
-            // Currently only testing with ATT workflow
-            // TODO: Support for other workflows
-            volthaDeploy([bbsimReplica: olts.toInteger(), workflow: 'att', extraHelmFlags: extraHelmFlags, localCharts: localCharts])
-            // stop logging
-            sh """
-        P_IDS="\$(ps e -ww -A | grep "_TAG=kail-${name}" | grep -v grep | awk '{print \$1}')"
-        if [ -n "\$P_IDS" ]; then
-          echo \$P_IDS
-          for P_ID in \$P_IDS; do
-            kill -9 \$P_ID
-          done
-        fi
-        cd ${logsDir}
-        gzip -k onos-voltha-startup-combined.log
-        rm onos-voltha-startup-combined.log
-      """
-            // forward ONOS and VOLTHA ports
-            sh('''
-      JENKINS_NODE_COOKIE="dontKillMe" _TAG=onos-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/voltha-infra-onos-classic-hs 8101:8101; done 2>&1 " &
-      JENKINS_NODE_COOKIE="dontKillMe" _TAG=onos-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/voltha-infra-onos-classic-hs 8181:8181; done 2>&1 " &
-      JENKINS_NODE_COOKIE="dontKillMe" _TAG=port-forward-voltha-api /bin/bash -c "while true; do kubectl -n voltha port-forward --address 0.0.0.0 service/voltha-voltha-api 55555:55555; done 2>&1 " &
-      ''')
-            sh('''
-      sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 log:set DEBUG org.opencord
-      ''')
-        }
-    }
-
-    stage('Test - ' + name) {
-        timeout(75) {
-            sh """
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/${name}"
-        mkdir -p \$ROBOT_LOGS_DIR
-        if [[ ${name} == 'onos-app-upgrade' ]]; then
-          export ONOS_APPS_UNDER_TEST+=''
-          if [ ${aaaVer.trim()} != '' ] && [ ${aaaOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.aaa,${aaaVer.trim()},${aaaOarUrl.trim()}*"
-          fi
-          if [ ${oltVer.trim()} != '' ] && [ ${oltOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.olt,${oltVer.trim()},${oltOarUrl.trim()}*"
-          fi
-          if [ ${dhcpl2relayVer.trim()} != '' ] && [ ${dhcpl2relayOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.dhcpl2relay,${dhcpl2relayVer.trim()},${dhcpl2relayOarUrl.trim()}*"
-          fi
-          if [ ${igmpproxyVer.trim()} != '' ] && [ ${igmpproxyOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.igmpproxy,${igmpproxyVer.trim()},${igmpproxyOarUrl.trim()}*"
-          fi
-          if [ ${sadisVer.trim()} != '' ] && [ ${sadisOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.sadis,${sadisVer.trim()},${sadisOarUrl.trim()}*"
-          fi
-          if [ ${mcastVer.trim()} != '' ] && [ ${mcastOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.mcast,${mcastVer.trim()},${mcastOarUrl.trim()}*"
-          fi
-          if [ ${kafkaVer.trim()} != '' ] && [ ${kafkaOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.kafka,${kafkaVer.trim()},${kafkaOarUrl.trim()}*"
-          fi
-          export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v onos_apps_under_test:\$ONOS_APPS_UNDER_TEST -e PowerSwitch"
-          export TARGET=onos-app-upgrade-test
-        fi
-        if [ ${name} == 'voltha-component-upgrade' ] || [ ${name} == 'voltha-component-rolling-upgrade' ]; then
-          export VOLTHA_COMPS_UNDER_TEST+=''
-          VOLTHA_COMPS_UNDER_TEST+="adapter-open-olt,adapter-open-olt,voltha/voltha-openolt-adapter:${openolt_adapter_test_tag}*"
-          VOLTHA_COMPS_UNDER_TEST+="adapter-open-onu,adapter-open-onu,voltha/voltha-openonu-adapter-go:${openonu_adapter_test_tag}*"
-          VOLTHA_COMPS_UNDER_TEST+="rw-core,voltha,voltha/voltha-rw-core:${rw_core_test_tag}*"
-          VOLTHA_COMPS_UNDER_TEST+="ofagent,ofagent,voltha/voltha-ofagent-go:${ofagent_test_tag}*"
-          export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v voltha_comps_under_test:\$VOLTHA_COMPS_UNDER_TEST -e PowerSwitch"
-        fi
-        if [[ ${name} == 'voltha-component-upgrade' ]]; then
-          export TARGET=voltha-comp-upgrade-test
-        fi
-        if [[ ${name} == 'voltha-component-rolling-upgrade' ]]; then
-          export TARGET=voltha-comp-rolling-upgrade-test
-        fi
-        if [ ${name} == 'onu-software-upgrade' ] || [ ${name} == 'onu-software-upgrade-omci-extended-msg' ]; then
-          export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v image_version:${onuImageVersion.trim()} -v image_url:${onuImageUrl.trim()} -v image_vendor:${onuImageVendor.trim()} -v image_activate_on_success:${onuImageActivateOnSuccess.trim()} -v image_commit_on_success:${onuImageCommitOnSuccess.trim()} -v image_crc:${onuImageCrc.trim()} -e PowerSwitch"
-          export TARGET=onu-upgrade-test
-        fi
-        if [[ ${name} == 'onu-image-dwl-simultaneously' ]]; then
-          export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v image_version:${onuImageVersion.trim()} -v image_url:${onuImageUrl.trim()} -v image_vendor:${onuImageVendor.trim()} -v image_activate_on_success:${onuImageActivateOnSuccess.trim()} -v image_commit_on_success:${onuImageCommitOnSuccess.trim()} -v image_crc:${onuImageCrc.trim()} -e PowerSwitch"
-          export TARGET=onu-upgrade-test-multiolt-kind-att
-        fi
-        testLogging='False'
-        if [ ${logging} = true ]; then
-          testLogging='True'
-        fi
-        export VOLTCONFIG=$HOME/.volt/config-minimal
-        export KUBECONFIG=$HOME/.kube/kind-config-voltha-minimal
-        ROBOT_MISC_ARGS+=" -v ONOS_SSH_PORT:30115 -v ONOS_REST_PORT:30120 -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace} -v container_log_dir:${logsDir} -v logging:\$testLogging"
-        # Run the specified tests
-        make -C $WORKSPACE/voltha-system-tests \$TARGET || true
-      """
-            // remove port-forwarding
-            sh """
-        # remove orphaned port-forward from different namespaces
-        ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
-      """
-            // collect pod details
-            get_pods_info("$WORKSPACE/${name}")
-            sh """
-        set +e
-        # collect logs collected in the Robot Framework StartLogging keyword
-        cd ${logsDir}
-        gzip *-combined.log || true
-        rm *-combined.log || true
-      """
-            helmTeardown(['infra', 'voltha'])
-        }
-    }
-}
-
-// -----------------------------------------------------------------------
-// -----------------------------------------------------------------------
-void get_pods_info(dest) {
-    // collect pod details, this is here in case of failure
-    sh """
-  mkdir -p ${dest} || true
-  kubectl get pods --all-namespaces -o wide > ${dest}/pods.txt || true
-  kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee ${dest}/pod-images.txt || true
-  kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee ${dest}/pod-imagesId.txt || true
-  kubectl describe pods --all-namespaces -l app.kubernetes.io/part-of=voltha > ${dest}/voltha-pods-describe.txt
-  kubectl describe pods -n infra -l app=onos-classic > ${dest}/onos-pods-describe.txt
-  helm ls --all-namespaces > ${dest}/helm-charts.txt
-  """
-    sh '''
-  # copy the ONOS logs directly from the container to avoid the color codes
-  printf '%s\\n' $(kubectl get pods -n infra -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c 'kubectl -n infra cp #:apache-karaf-4.2.14/data/log/karaf.log ''' + dest + '''/#.log' || true
-  '''
-    return
-}
-
-// -----------------------------------------------------------------------
-// -----------------------------------------------------------------------
-pipeline {
-    /* no label, executor is determined by JJB */
-    agent {
-        label "${params.buildNode}"
-    }
-
-    options {
-        timeout(time: 220, unit: 'MINUTES')
-    }
-
-    environment {
-        PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-        KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
-        SSHPASS="karaf"
-    }
-
-    stages {
-        stage('Download Code') {
-            steps {
-                getVolthaCode([
-                    branch: "${branch}",
-                    volthaSystemTestsChange: "${volthaSystemTestsChange}",
-                    volthaHelmChartsChange: "${volthaHelmChartsChange}",
-                ])
-            }
-        }
-
-        // -----------------------------------------------------------------------
-        // -----------------------------------------------------------------------
-        stage('Install Tools')
-        {
-            steps
-            {
-                script
-                {
-                    String iam = getIam('Install Kind')
-                    println("${iam}: ENTER")
-                    installKind("$branch")   // needed early by stage(Cleanup)
-                    println("${iam}: LEAVE")
-                } // script
-	        } // steps
-        } // stage
-
-        // -----------------------------------------------------------------------
-        // -----------------------------------------------------------------------
-        stage('Cleanup') {
-            steps {
-                // remove port-forwarding
-                sh(label  : 'Remove port forwarding',
-                   script : """
-if [[ \$(pgrep --count 'port-forw') -gt 0 ]]; then
-    pkill --uid "\$(id -u)" --echo --full 'port-forw'
-fi
-""")
-                helmTeardown(['infra', 'voltha'])
-            }
-        }
-
-        // -----------------------------------------------------------------------
-        // -----------------------------------------------------------------------
-        stage('Create K8s Cluster') {
-            steps {
-                createKubernetesCluster([nodes: 3])
-            }
-        }
-
-        // -----------------------------------------------------------------------
-        // -----------------------------------------------------------------------
-        stage('Run Test') {
-            steps {
-                test_software_upgrade('onos-app-upgrade')
-                test_software_upgrade('voltha-component-upgrade')
-                test_software_upgrade('voltha-component-rolling-upgrade')
-                test_software_upgrade('onu-software-upgrade')
-                test_software_upgrade('onu-software-upgrade-omci-extended-msg')
-                test_software_upgrade('onu-image-dwl-simultaneously')
-            }
-        }
-    }
-
-    // -----------------------------------------------------------------------
-    // -----------------------------------------------------------------------
-    post {
-        aborted {
-            get_pods_info("$WORKSPACE/failed")
-        }
-        failure {
-            get_pods_info("$WORKSPACE/failed")
-        }
-        always {
-            step([$class: 'RobotPublisher',
-                  disableArchiveOutput: false,
-                  logFileName: 'RobotLogs/*/log*.html',
-                  otherFiles: '',
-                  outputFileName: 'RobotLogs/*/output*.xml',
-                  outputPath: '.',
-                  passThreshold: 100,
-                  reportFileName: 'RobotLogs/*/report*.html',
-                  unstableThreshold: 0,
-                  onlyCritical: true])
-            archiveArtifacts artifacts: '*.log,**/*.log,**/*.gz,*.gz,*.txt,**/*.txt'
-        } // always
-    } // post
-} // pipeline
diff --git a/jjb/pipeline/voltha/voltha-2.12/tucson-build-and-test.groovy b/jjb/pipeline/voltha/voltha-2.12/tucson-build-and-test.groovy
deleted file mode 100644
index 81b26ab..0000000
--- a/jjb/pipeline/voltha/voltha-2.12/tucson-build-and-test.groovy
+++ /dev/null
@@ -1,367 +0,0 @@
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// used to deploy VOLTHA and configure ONOS physical PODs
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-def clusterName = "kind-ci"
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: 120, unit: 'MINUTES')
-  }
-  environment {
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-    KUBECONFIG="$HOME/.kube/kind-${clusterName}"
-    VOLTCONFIG="$HOME/.volt/config"
-    LOG_FOLDER="$WORKSPACE/${workflow}/"
-    APPS_TO_LOG="etcd kafka onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server onos-config-loader"
-
-  }
-  stages{
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${branch}",
-          gerritProject: "${gerritProject}",
-          gerritRefspec: "${gerritRefspec}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage ("Parse deployment configuration file") {
-      steps {
-        sh returnStdout: true, script: "rm -rf ${configBaseDir}"
-        sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-
-          if (params.workflow.toUpperCase() == "TT") {
-            error("The Tucson POD does not support TT workflow at the moment")
-          }
-
-          if ( params.workflow.toUpperCase() == "DT" ) {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-          }
-          else if ( params.workflow.toUpperCase() == "TT" ) {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-          }
-          else {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-          }
-        }
-      }
-    }
-    stage('Clean up') {
-      steps {
-        timeout(15) {
-          script {
-            helmTeardown(["default", infraNamespace, volthaNamespace])
-          }
-          timeout(1) {
-            sh returnStdout: false, script: '''
-            # remove orphaned port-forward from different namespaces
-            ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-            '''
-          }
-        }
-      }
-    }
-    stage('Build patch') {
-      steps {
-        // NOTE that the correct patch has already been checked out
-        // during the getVolthaCode step
-        buildVolthaComponent("${gerritProject}")
-      }
-    }
-    stage('Create K8s Cluster') {
-      steps {
-        script {
-          def clusterExists = sh returnStdout: true, script: """
-          kind get clusters | grep ${clusterName} | wc -l
-          """
-          if (clusterExists.trim() == "0") {
-            createKubernetesCluster([nodes: 3, name: clusterName])
-          }
-        }
-      }
-    }
-    stage('Load image in kind nodes') {
-      steps {
-        loadToKind()
-      }
-    }
-    stage('Install Voltha')  {
-      steps {
-        timeout(20) {
-          script {
-            imageFlags = getVolthaImageFlags(gerritProject)
-            // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
-            def localCharts = false
-            if (volthaHelmChartsChange != "" || gerritProject == "voltha-helm-charts") {
-              localCharts = true
-            }
-            def flags = "-f $WORKSPACE/${configBaseDir}/${configKubernetesDir}/voltha/${configFileName}.yml ${imageFlags} "
-            // NOTE temporary workaround expose ONOS node ports (pod-config needs to be updated to contain these values)
-            flags = flags + "--set onos-classic.onosSshPort=30115 " +
-            "--set onos-classic.onosApiPort=30120 " +
-            "--set onos-classic.onosOfPort=31653 " +
-            "--set onos-classic.individualOpenFlowNodePorts=true " + extraHelmFlags
-            volthaDeploy([
-              workflow: workFlow.toLowerCase(),
-              extraHelmFlags: flags,
-              localCharts: localCharts,
-              kubeconfig: "$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf",
-              onosReplica: 3,
-              atomixReplica: 3,
-              kafkaReplica: 3,
-              etcdReplica: 3,
-              ])
-          }
-          // start logging
-          sh """
-          rm -rf $WORKSPACE/${workFlow}/
-          mkdir -p $WORKSPACE/${workFlow}
-          _TAG=kail-${workFlow} kail -n infra -n voltha > $WORKSPACE/${workFlow}/onos-voltha-combined.log &
-          """
-          sh returnStdout: false, script: '''
-          # start logging with kail
-
-          mkdir -p $LOG_FOLDER
-
-          list=($APPS_TO_LOG)
-          for app in "${list[@]}"
-          do
-            echo "Starting logs for: ${app}"
-            _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
-          done
-          '''
-          sh """
-          JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd 2379:2379; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
-          ps aux | grep port-forward
-          """
-          getPodsInfo("$WORKSPACE")
-        }
-      }
-    }
-    stage('Deploy Kafka Dump Chart') {
-      steps {
-        script {
-          sh returnStdout: false, script: """
-              helm repo add cord https://charts.opencord.org
-              helm repo update
-              if helm version -c --short|grep v2 -q; then
-                helm install -n voltha-kafka-dump cord/voltha-kafka-dump
-              else
-                helm install voltha-kafka-dump cord/voltha-kafka-dump
-              fi
-          """
-        }
-      }
-    }
-    stage('Push Tech-Profile') {
-      when {
-        expression { params.profile != "Default" }
-      }
-      steps {
-        sh returnStdout: false, script: """
-        etcd_container=\$(kubectl get pods -n voltha | grep voltha-etcd-cluster | awk 'NR==1{print \$1}')
-        kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-${profile}.json voltha/\$etcd_container:/tmp/flexpod.json
-        kubectl exec -it \$etcd_container -n voltha -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/XGS-PON/64'
-        """
-      }
-    }
-
-    stage('Push Sadis-config') {
-      steps {
-        sh returnStdout: false, script: """
-        ssh-keygen -R [${deployment_config.nodes[0].ip}]:30115
-        ssh-keyscan -p 30115 -H ${deployment_config.nodes[0].ip} >> ~/.ssh/known_hosts
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.dhcpl2relay"
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.aaa"
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.olt"
-        #TRACE in the pipeliner is too chatty, moving to DEBUG
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.opencord.olt.driver"
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.FlowObjectiveManager"
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager"
-
-        if [[ "${workFlow.toUpperCase()}" == "DT" ]]; then
-          curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-DT.json
-        elif [[ "${workFlow.toUpperCase()}" == "TT" ]]; then
-          curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-TT.json
-        else
-          # this is the ATT case, rename the file in *-sadis-ATT.json so that we can avoid special cases and just load the file
-          curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis.json
-        fi
-        """
-      }
-    }
-    stage('Reinstall OLT software') {
-      when {
-        expression { params.reinstallOlt }
-      }
-      steps {
-        script {
-          deployment_config.olts.each { olt ->
-            sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --remove asfvolt16 && dpkg --purge asfvolt16'"
-            waitUntil {
-              olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
-              return olt_sw_present.toInteger() == 0
-            }
-            if ( params.branch == 'voltha-2.3' ) {
-              oltDebVersion = oltDebVersionVoltha23
-            } else {
-              oltDebVersion = oltDebVersionMaster
-            }
-            sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --install ${oltDebVersion}'"
-            waitUntil {
-              olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
-              return olt_sw_present.toInteger() == 1
-            }
-            if ( olt.fortygig ) {
-              // If the OLT is connected to a 40G switch interface, set the NNI port to be downgraded
-              sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'echo port ce128 sp=40000 >> /broadcom/qax.soc ; /opt/bcm68620/svk_init.sh'"
-            }
-          }
-        }
-      }
-    }
-
-    stage('Restart OLT processes') {
-      steps {
-        script {
-          deployment_config.olts.each { olt ->
-            sh returnStdout: false, script: """
-            ssh-keyscan -H ${olt.ip} >> ~/.ssh/known_hosts
-            sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; reboot'
-            sleep 120
-            """
-            waitUntil {
-              onu_discovered = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'grep \"onu discover indication\" /var/log/openolt.log | wc -l'"
-              return onu_discovered.toInteger() > 0
-            }
-          }
-        }
-      }
-    }
-    stage('Run E2E Tests') {
-      steps {
-        script {
-          // different workflows need different make targets and different robot files
-          if ( params.workflow.toUpperCase() == "DT" ) {
-            robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-            robotFile = "Voltha_DT_PODTests.robot"
-            makeTarget = "voltha-dt-test"
-            robotFunctionalKeyword = "-i functionalDt"
-            robotDataplaneKeyword = "-i dataplaneDt"
-          }
-          else if ( params.workflow.toUpperCase() == "TT" ) {
-            // TODO the TT tests have diffent tags, address once/if TT is support on the Tucson POD
-            robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-            robotFile = "Voltha_TT_PODTests.robot"
-            makeTarget = "voltha-tt-test"
-            robotFunctionalKeyword = "-i functionalTt"
-            robotDataplaneKeyword = "-i dataplaneTt"
-          }
-          else {
-            robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-            robotFile = "Voltha_PODTests.robot"
-            makeTarget = "voltha-test"
-            robotFunctionalKeyword = "-i functional"
-            robotDataplaneKeyword = "-i dataplane"
-          }
-        }
-        sh returnStdout: false, script: """
-        mkdir -p $WORKSPACE/RobotLogs
-
-        export ROBOT_CONFIG_FILE="$WORKSPACE/${robotConfigFile}"
-        export ROBOT_MISC_ARGS="${params.extraRobotArgs} --removekeywords wuks -d $WORKSPACE/RobotLogs -v container_log_dir:$WORKSPACE "
-        export ROBOT_FILE="${robotFile}"
-
-        # If the Gerrit comment contains a line with "functional tests" then run the full
-        # functional test suite.  This covers tests tagged either 'sanity' or 'functional'.
-        # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
-        REGEX="functional tests"
-        if [[ "${gerritComment}" =~ \$REGEX ]]; then
-          ROBOT_MISC_ARGS+="${robotFunctionalKeyword} "
-        fi
-        # Likewise for dataplane tests
-        REGEX="dataplane tests"
-        if [[ "${gerritComment}" =~ \$REGEX ]]; then
-          ROBOT_MISC_ARGS+="${robotDataplaneKeyword}"
-        fi
-
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests ${makeTarget} || true
-        """
-      }
-    }
-  }
-  post {
-    always {
-      // stop logging
-      sh """
-        P_IDS="\$(ps e -ww -A | grep "_TAG=kail-${workFlow}" | grep -v grep | awk '{print \$1}')"
-        if [ -n "\$P_IDS" ]; then
-          echo \$P_IDS
-          for P_ID in \$P_IDS; do
-            kill -9 \$P_ID
-          done
-        fi
-        gzip $WORKSPACE/${workFlow}/onos-voltha-combined.log || true
-      """
-      sh '''
-      # stop the kail processes
-      list=($APPS_TO_LOG)
-      for app in "${list[@]}"
-      do
-        echo "Stopping logs for: ${app}"
-        _TAG="kail-$app"
-        P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
-        if [ -n "$P_IDS" ]; then
-          echo $P_IDS
-          for P_ID in $P_IDS; do
-            kill -9 $P_ID
-          done
-        fi
-      done
-      '''
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: 'RobotLogs/log*.html',
-        otherFiles: '',
-        outputFileName: 'RobotLogs/output*.xml',
-        outputPath: '.',
-        passThreshold: 100,
-        reportFileName: 'RobotLogs/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true]);
-      archiveArtifacts artifacts: '**/*.txt,**/*.gz,*.gz,**/*.log'
-    }
-  }
-}
-
-// refs/changes/06/24206/5
diff --git a/jjb/pipeline/voltha/voltha-2.12/voltha-dt-physical-functional-tests.groovy b/jjb/pipeline/voltha/voltha-2.12/voltha-dt-physical-functional-tests.groovy
deleted file mode 100644
index 5a14eab..0000000
--- a/jjb/pipeline/voltha/voltha-2.12/voltha-dt-physical-functional-tests.groovy
+++ /dev/null
@@ -1,332 +0,0 @@
-#!/usr/bin/env groovy
-// -----------------------------------------------------------------------
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// -----------------------------------------------------------------------
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-node {
-  // Need this so that deployment_config has global scope when it's read later
-  deployment_config = null
-}
-
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: "${timeout}", unit: 'MINUTES')
-  }
-
-  environment {
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-    VOLTCONFIG="$HOME/.volt/config-minimal"
-    PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-  }
-
-  stages {
-    stage('Clone voltha-system-tests') {
-      steps {
-        step([$class: 'WsCleanup'])
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/voltha-system-tests",
-            refspec: "${volthaSystemTestsChange}"
-          ]],
-          branches: [[ name: "${branch}", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-        script {
-          sh(script:"""
-            if [ '${volthaSystemTestsChange}' != '' ] ; then
-              cd $WORKSPACE/voltha-system-tests;
-              git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
-            fi
-            """)
-        }
-      }
-    }
-    // This checkout allows us to show changes in Jenkins
-    // we only do this on master as we don't branch all the repos for all the releases
-    // (we should compute the difference by tracking the container version, not the code)
-    stage('Download All the VOLTHA repos') {
-      when {
-        expression {
-          return "${branch}" == 'master';
-        }
-      }
-      steps {
-       checkout(changelog: true,
-         poll: false,
-         scm: [$class: 'RepoScm',
-           manifestRepositoryUrl: "${params.manifestUrl}",
-           manifestBranch: "${params.branch}",
-           currentBranch: true,
-           destinationDir: 'voltha',
-           forceSync: true,
-           resetFirst: true,
-           quiet: true,
-           jobs: 4,
-           showAllChanges: true]
-         )
-      }
-    }
-    stage ('Initialize') {
-      steps {
-        sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-           deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        }
-        installVoltctl("${branch}")
-        sh """
-        ps -ef | grep port-forward
-        """
-
-        sh returnStdout: false, script: '''
-        # remove orphaned port-forward from different namespaces
-        ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-        '''
-        sh """
-        JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
-        JENKINS_NODE_COOKIE="dontKillMe" _TAG="etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd ${params.VolthaEtcdPort}:2379; done"&
-        JENKINS_NODE_COOKIE="dontKillMe" _TAG="kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
-        ps aux | grep port-forward
-        """
-
-        sh("""ps -ef | grep port-forward""")
-
-        sh(returnStdout: false, script: """
-        mkdir -p "$WORKSPACE/bin"
-
-        # install kail
-        make -C "$WORKSPACE/voltha-system-tests" KAIL_PATH="$WORKSPACE/bin" kail
-
-        # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
-        # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
-        # We should change this. In the meantime here is a workaround.
-           set +e
-
-        # Remove noise from voltha-core logs
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        # Remove noise from openolt logs
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        """)
-      }
-    }
-
-    stage('Functional Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/FunctionalTests"
-      }
-      steps {
-        sh """
-        ps -ef | grep port-forward
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.withFttb} = false ]; then
-          if ( ${powerSwitch} ); then
-               export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanityDt -i functionalDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-               if ( ${powerCycleOlt} ); then
-                    ROBOT_MISC_ARGS+=" -v power_cycle_olt:True"
-               fi
-          else
-               export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i sanityDt -i functionalDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          fi
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('FTTB Functional Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_FTTB_Tests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/FunctionalTests"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.withFttb} = true ]; then
-          export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i sanityDtFttb -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace} -v has_dataplane:False"
-          make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Failure/Recovery Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_FailureScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/FailureScenarios"
-      }
-      steps {
-        sh """
-        ps -ef | grep port-forward
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.withFttb} = false ]; then
-          if ( ${powerSwitch} ); then
-               export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalDt -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          else
-               export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalDt -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          fi
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Dataplane Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/DataplaneTests"
-      }
-      steps {
-        sh """
-        ps -ef | grep port-forward
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.withFttb} = false ]; then
-          export ROBOT_MISC_ARGS="--removekeywords wuks -i dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-    stage('HA Tests') {
-       environment {
-       ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-       ROBOT_FILE="Voltha_ONOSHATests.robot"
-       ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ONOSHAScenarios"
-      }
-      steps {
-        sh """
-        ps -ef | grep port-forward
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.withFttb} = false ]; then
-          export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v workflow:${params.workFlow} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Multiple OLT Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_MultiOLT_Tests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/MultipleOLTScenarios"
-      }
-      steps {
-        sh """
-        ps -ef | grep port-forward
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.withFttb} = false ]; then
-          if ( ${powerSwitch} ); then
-               export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalDt -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          else
-               export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalDt -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          fi
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-
-    stage('Error Scenario Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_ErrorScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/ErrorScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.withFttb} = false ]; then
-          export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v workflow:${params.workFlow} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        fi
-        """
-      }
-    }
-  }
-  post {
-    always {
-      getPodsInfo("$WORKSPACE/pods")
-      sh returnStdout: false, script: '''
-      set +e
-
-      # collect logs collected in the Robot Framework StartLogging keyword
-      cd $WORKSPACE
-      gzip *-combined.log || true
-      rm *-combined.log || true
-      '''
-      script {
-        deployment_config.olts.each { olt ->
-            if (olt.type == null || olt.type == "" || olt.type == "openolt") {
-              sh returnStdout: false, script: """
-              sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
-              sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log  # Remove escape sequences
-              sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
-              sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log  # Remove escape sequences
-              sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/startup.log $WORKSPACE/startup-${olt.sship}.log || true
-              sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/startup-${olt.sship}.log || true # Remove escape sequences
-              sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt_process_watchdog.log $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true
-              sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true # Remove escape sequences
-              """
-            }
-        }
-      }
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: '**/log*.html',
-        otherFiles: '',
-        outputFileName: '**/output*.xml',
-        outputPath: 'RobotLogs',
-        passThreshold: 100,
-        reportFileName: '**/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true
-        ]);
-      archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.12/voltha-physical-functional-tests.groovy b/jjb/pipeline/voltha/voltha-2.12/voltha-physical-functional-tests.groovy
deleted file mode 100644
index 8565148..0000000
--- a/jjb/pipeline/voltha/voltha-2.12/voltha-physical-functional-tests.groovy
+++ /dev/null
@@ -1,269 +0,0 @@
-#!/usr/bin/env groovy
-// -----------------------------------------------------------------------
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// -----------------------------------------------------------------------
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-
-node {
-  // Need this so that deployment_config has global scope when it's read later
-  deployment_config = null
-}
-
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: "${timeout}", unit: 'MINUTES')
-  }
-
-  environment {
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-    VOLTCONFIG="$HOME/.volt/config-minimal"
-    PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-  }
-  stages {
-    stage('Clone voltha-system-tests') {
-      steps {
-        step([$class: 'WsCleanup'])
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/voltha-system-tests",
-            refspec: "${volthaSystemTestsChange}"
-          ]],
-          branches: [[ name: "${branch}", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-        script {
-          sh(script:"""
-            if [ '${volthaSystemTestsChange}' != '' ] ; then
-              cd $WORKSPACE/voltha-system-tests;
-              git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
-            fi
-            """)
-        }
-      }
-    }
-    stage('Download All the VOLTHA repos') {
-      when {
-        expression {
-          return "${branch}" == 'master';
-        }
-      }
-      steps {
-       checkout(changelog: true,
-         poll: false,
-         scm: [$class: 'RepoScm',
-           manifestRepositoryUrl: "${params.manifestUrl}",
-           manifestBranch: "${params.branch}",
-           currentBranch: true,
-           destinationDir: 'voltha',
-           forceSync: true,
-           resetFirst: true,
-           quiet: true,
-           jobs: 4,
-           showAllChanges: true]
-         )
-      }
-    }
-    stage ('Initialize') {
-      steps {
-        sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-          deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        }
-	installVoltctl("${branch}")
-	
-	sh(returnStdout: false, script: """
-
-        mkdir -p "$WORKSPACE/bin"
-
-        # install kail
-        make -C "$WORKSPACE/voltha-system-tests" KAIL_PATH="$WORKSPACE/bin" kail
-
-        if [ "${params.branch}" == "master" ]; then
-        # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
-        # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
-        # We should change this. In the meantime here is a workaround.
-           set +e
-
-        # Remove noise from voltha-core logs
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        # Remove noise from openolt logs
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        fi
-        """)
-      }
-    }
-
-    stage('Functional Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        ROBOT_FILE="Voltha_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/FunctionalTests"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if ( ${powerSwitch} ); then
-             export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanity -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-             if ( ${powerCycleOlt} ); then
-                  ROBOT_MISC_ARGS+=" -v power_cycle_olt:True"
-             fi
-        else
-             export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i sanity -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        fi
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        """
-      }
-    }
-
-    stage('Failure/Recovery Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        ROBOT_FILE="Voltha_FailureScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/FailureScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if ( ${powerSwitch} ); then
-             export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        else
-             export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        fi
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        """
-      }
-    }
-
-    stage('Dataplane Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        ROBOT_FILE="Voltha_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/DataplaneTests"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        export ROBOT_MISC_ARGS="--removekeywords wuks -i dataplane -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        """
-      }
-    }
-    stage('HA Tests') {
-       environment {
-       ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-       ROBOT_FILE="Voltha_ONOSHATests.robot"
-       ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ONOSHAScenarios"
-      }
-      steps {
-       sh """
-       mkdir -p $ROBOT_LOGS_DIR
-       export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v workflow:${params.workFlow} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-       ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-       make -C $WORKSPACE/voltha-system-tests voltha-test || true
-       """
-      }
-    }
-
-    stage('Error Scenario Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        ROBOT_FILE="Voltha_ErrorScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ErrorScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        """
-      }
-    }
-  }
-  post {
-    always {
-      getPodsInfo("$WORKSPACE/pods")
-      sh returnStdout: false, script: '''
-      set +e
-
-      # collect logs collected in the Robot Framework StartLogging keyword
-      cd $WORKSPACE
-      gzip *-combined.log || true
-      rm *-combined.log || true
-
-      # store information on the running pods
-      kubectl get pods --all-namespaces -o wide > $WORKSPACE/pods.txt || true
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-images.txt || true
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-imagesId.txt || true
-
-
-      # collect ETCD cluster logs
-      mkdir -p $WORKSPACE/etcd
-      printf '%s\n' $(kubectl get pods -l app=etcd -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I% bash -c "kubectl logs % > $WORKSPACE/etcd/%.log"
-      '''
-      script {
-        deployment_config.olts.each { olt ->
-          if (olt.type == null || olt.type == "" || olt.type == "openolt") {
-            sh returnStdout: false, script: """
-            sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
-            sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log  # Remove escape sequences
-            sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
-            sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log  # Remove escape sequences
-            sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/startup.log $WORKSPACE/startup-${olt.sship}.log || true
-            sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/startup-${olt.sship}.log || true # Remove escape sequences
-            sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt_process_watchdog.log $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true
-            sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true # Remove escape sequences
-            """
-          }
-        }
-      }
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: '**/log*.html',
-        otherFiles: '',
-        outputFileName: '**/output*.xml',
-        outputPath: 'RobotLogs',
-        passThreshold: 100,
-        reportFileName: '**/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true
-        ]);
-      archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.12/voltha-physical-soak-dt-tests.groovy b/jjb/pipeline/voltha/voltha-2.12/voltha-physical-soak-dt-tests.groovy
deleted file mode 100644
index 6320cfb..0000000
--- a/jjb/pipeline/voltha/voltha-2.12/voltha-physical-soak-dt-tests.groovy
+++ /dev/null
@@ -1,258 +0,0 @@
-#!/usr/bin/env groovy
-// -----------------------------------------------------------------------
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// -----------------------------------------------------------------------
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-node {
-  // Need this so that deployment_config has global scope when it's read later
-  deployment_config = null
-}
-
-def volthaNamespace = "voltha"
-def infraNamespace = "infra"
-
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: "${timeout}", unit: 'MINUTES')
-  }
-
-  environment {
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-    VOLTCONFIG="$HOME/.volt/config-minimal"
-    PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-  }
-
-
-  stages {
-    stage('Clone voltha-system-tests') {
-      steps {
-        step([$class: 'WsCleanup'])
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/voltha-system-tests",
-            refspec: "${volthaSystemTestsChange}"
-          ]],
-          branches: [[ name: "${branch}", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-        script {
-          sh(script:"""
-            if [ '${volthaSystemTestsChange}' != '' ] ; then
-              cd $WORKSPACE/voltha-system-tests;
-              git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
-            fi
-            """)
-        }
-      }
-    }
-    // This checkout allows us to show changes in Jenkins
-    // we only do this on master as we don't branch all the repos for all the releases
-    // (we should compute the difference by tracking the container version, not the code)
-    stage('Download All the VOLTHA repos') {
-      when {
-        expression {
-          return "${branch}" == 'master';
-        }
-      }
-      steps {
-       checkout(changelog: true,
-         poll: false,
-         scm: [$class: 'RepoScm',
-           manifestRepositoryUrl: "${params.manifestUrl}",
-           manifestBranch: "${params.branch}",
-           currentBranch: true,
-           destinationDir: 'voltha',
-           forceSync: true,
-           resetFirst: true,
-           quiet: true,
-           jobs: 4,
-           showAllChanges: true]
-         )
-      }
-    }
-    stage ('Initialize') {
-      steps {
-        sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-           deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        }
-        installVoltctl("${branch}")
-
-	sh(returnStdout: false, script: """
-
-        mkdir -p "$WORKSPACE/bin"
-
-        # install kail
-        make -C "$WORKSPACE/voltha-system-tests" KAIL_PATH="$WORKSPACE/bin" kail
-
-        if [ "${params.branch}" == "master" ]; then
-        # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
-        # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
-        # We should change this. In the meantime here is a workaround.
-           set +e
-
-        # Remove noise from voltha-core logs
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        # Remove noise from openolt logs
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        fi
-        """)
-
-        sh("""
-        mkdir -p $WORKSPACE/voltha-pods-mem-consumption
-        cd $WORKSPACE/voltha-system-tests
-        make vst_venv
-        source ./vst_venv/bin/activate || true
-        # Collect initial memory consumption
-        python scripts/mem_consumption.py -o $WORKSPACE/voltha-pods-mem-consumption -a 0.0.0.0:31301 -n ${volthaNamespace} || true
-        """)
-      }
-    }
-
-    stage('Functional Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/FunctionalTests"
-      }
-      steps {
-        sh """
-        JENKINS_NODE_COOKIE="dontKillMe" _TAG="prometheus" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n cattle-prometheus svc/access-prometheus 31301:80; done"&
-        ps aux | grep port-forward
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ "${params.testType}" == "Functional" ]; then
-            if ( ${powerSwitch} ); then
-                 export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i soak -e dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-            else
-                 export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i soak -e dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-            fi
-            ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-            make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Failure/Recovery Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_FailureScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/FailureScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ "${params.testType}" == "Failure" ]; then
-           export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i soak -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-           ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-           make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Dataplane Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/DataplaneTests"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ "${params.testType}" == "Dataplane" ]; then
-           export ROBOT_MISC_ARGS="--removekeywords wuks -i soakDataplane -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-           ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-           make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-  }
-  post {
-    always {
-      getPodsInfo("$WORKSPACE/pods")
-      sh returnStdout: false, script: '''
-      set +e
-
-      # collect logs collected in the Robot Framework StartLogging keyword
-      cd $WORKSPACE
-      gzip *-combined.log || true
-      rm *-combined.log || true
-
-      # collect ETCD cluster logs
-      mkdir -p $WORKSPACE/etcd
-      printf '%s\n' $(kubectl get pods -l app=etcd -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I% bash -c "kubectl logs % > $WORKSPACE/etcd/%.log"
-      '''
-      script {
-        deployment_config.olts.each { olt ->
-          sh returnStdout: false, script: """
-          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
-          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log  # Remove escape sequences
-          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
-          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log  # Remove escape sequences
-          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/startup.log $WORKSPACE/startup-${olt.sship}.log || true
-          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/startup-${olt.sship}.log || true # Remove escape sequences
-          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt_process_watchdog.log $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true
-          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true # Remove escape sequences
-          """
-        }
-      }
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: '**/log*.html',
-        otherFiles: '',
-        outputFileName: '**/output*.xml',
-        outputPath: 'RobotLogs',
-        passThreshold: 100,
-        reportFileName: '**/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true
-        ]);
-      // get cpu usage by container
-      sh """
-      mkdir -p $WORKSPACE/plots || true
-      cd $WORKSPACE/voltha-system-tests
-      source ./vst_venv/bin/activate || true
-      sleep 60 # we have to wait for prometheus to collect all the information
-      python scripts/sizing.py -o $WORKSPACE/plots -a 0.0.0.0:31301 -n ${volthaNamespace} -s 3600 || true
-      # Collect memory consumption of voltha pods once all the tests are complete
-      python scripts/mem_consumption.py -o $WORKSPACE/voltha-pods-mem-consumption -a 0.0.0.0:31301 -n ${volthaNamespace} || true
-      """
-      archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt,plots/*,voltha-pods-mem-consumption/*'
-    }
-  }
-}
-
-// [EOF]
diff --git a/jjb/pipeline/voltha/voltha-2.12/voltha-scale-lwc-test.groovy b/jjb/pipeline/voltha/voltha-2.12/voltha-scale-lwc-test.groovy
deleted file mode 100644
index 84308ac..0000000
--- a/jjb/pipeline/voltha/voltha-2.12/voltha-scale-lwc-test.groovy
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// deploy VOLTHA and performs a scale test with the LWC controller
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-// [TODO] fix path, achilles heel for testing.
-def lwc_helm_chart_path="/home/jenkins/Radisys_LWC_helm_charts"
-def value_file="/home/jenkins/lwc-values.yaml"
-def workflow="dt"
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-      timeout(time: 60, unit: 'MINUTES')
-  }
-  environment {
-    JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
-    KUBECONFIG="$HOME/.kube/config"
-    VOLTCONFIG="$HOME/.volt/config"
-    SSHPASS="karaf"
-    VOLTHA_LOG_LEVEL="${logLevel}"
-    NUM_OF_BBSIM="${olts}"
-    NUM_OF_OPENONU="${openonuAdapterReplicas}"
-    NUM_OF_ONOS="${onosReplicas}"
-    NUM_OF_ATOMIX="${atomixReplicas}"
-    EXTRA_HELM_FLAGS=" "
-    LOG_FOLDER="$WORKSPACE/logs"
-    GERRIT_PROJECT="${GERRIT_PROJECT}"
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-  }
-
-  stages {
-    stage ('Cleanup') {
-      steps {
-        script {
-          try {
-            timeout(time: 5, unit: 'MINUTES') {
-              sh returnStdout: false, script: '''
-              cd $WORKSPACE
-              rm -rf $WORKSPACE/*
-              '''
-              // removing the voltha-infra chart first
-              // if we don't ONOS might get stuck because of all the events when BBSim goes down
-              sh returnStdout: false, script: '''
-              set +x
-              helm del -n infra voltha-infra || true
-              helm del voltha-infra || true
-              echo -ne "\nWaiting for ONOS to be removed..."
-              onos=$(kubectl get pod -n default -l app=onos-classic --no-headers | wc -l)
-              while [[ $onos != 0 ]]; do
-                onos=$(kubectl get pod -n default -l app=onos-classic --no-headers | wc -l)
-                sleep 5
-                echo -ne "."
-              done
-              '''
-            }
-          } catch(org.jenkinsci.plugins.workflow.steps.FlowInterruptedException e) {
-            // if we have a timeout in the Cleanup fase most likely ONOS got stuck somewhere, thuse force remove the pods
-            sh '''
-              kubectl get pods | grep Terminating | awk '{print $1}' | xargs kubectl delete pod --force --grace-period=0
-            '''
-          }
-          timeout(time: 10, unit: 'MINUTES') {
-            script {
-              helmTeardown(["default", "voltha1", "voltha-infra"])
-            }
-            sh returnStdout: false, script: '''
-              helm repo add onf https://charts.opencord.org
-              helm repo update
-
-              # remove all persistent volume claims
-              kubectl delete pvc --all-namespaces --all
-              PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-              while [[ \$PVCS != 0 ]]; do
-                sleep 5
-                PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-              done
-
-              # remove orphaned port-forward from different namespaces
-              ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-            '''
-          }
-        }
-      }
-    }
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${release}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage('Deploy Voltha') {
-      steps {
-        timeout(time: 5, unit: 'MINUTES') {
-          installVoltctl("${release}")
-          script {
-            startComponentsLogs([
-              appsToLog: [
-                'app.kubernetes.io/name=etcd',
-                'app.kubernetes.io/name=kafka',
-                'app=lwc',
-                'app=adapter-open-onu',
-                'app=adapter-open-olt',
-                'app=rw-core',
-                'app=bbsim',
-              ]
-            ])
-          }
-        }
-        timeout(time: 10, unit: 'MINUTES') {
-          sh """
-          cd /home/jenkins/Radisys_LWC_helm_charts
-
-          helm dep update ${lwc_helm_chart_path}/voltha-infra
-          helm upgrade --install --create-namespace -n infra voltha-infra ${lwc_helm_chart_path}/voltha-infra -f examples/${workflow}-values.yaml \
-            -f ${value_file} --wait
-
-          # helm dep update ${lwc_helm_chart_path}/voltha-stack
-          helm upgrade --install --create-namespace -n voltha1 voltha1 onf/voltha-stack \
-          --set voltha.ingress.enabled=true --set voltha.ingress.enableVirtualHosts=true --set voltha.fullHostnameOverride=voltha.scale1.dev \
-          -f ${value_file} --wait
-
-          helm upgrade --install -n voltha1 bbsim0 onf/bbsim --set olt_id=10 -f examples/${workflow}-values.yaml --set pon=${pons},onu=${onus} --version 4.6.0 --set oltRebootDelay=5 --wait
-          """
-        }
-      }
-    }
-    stage('Load MIB Template') {
-      when {
-        expression {
-          return params.withMibTemplate
-        }
-      }
-      steps {
-        sh """
-        # load MIB template
-        wget ${mibTemplateUrl} -O mibTemplate.json
-        cat mibTemplate.json | kubectl exec -it -n infra \$(kubectl get pods -n infra |grep etcd-0 | awk 'NR==1{print \$1}') -- etcdctl put service/voltha/omci_mibs/go_templates/BBSM/12345123451234512345/BBSM_IMG_00001
-        """
-      }
-    }
-    stage('Run Test') {
-      steps {
-        sh """
-          mkdir -p $WORKSPACE/RobotLogs
-          cd $WORKSPACE/voltha-system-tests
-          make vst_venv
-
-          daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward -n infra svc/lwc 8182:8181 --address 0.0.0.0
-          daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward -n voltha1 svc/voltha1-voltha-api 55555 --address 0.0.0.0
-
-          source ./vst_venv/bin/activate
-          robot -d $WORKSPACE/RobotLogs \
-          --exitonfailure \
-          -v pon:${pons} -v onu:${onus} \
-          tests/scale/Voltha_Scale_Tests_lwc.robot
-
-          python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time.txt || true
-          cat $WORKSPACE/execution-time.txt
-        """
-      }
-    }
-  }
-  post {
-    always {
-      stopComponentsLogs()
-      script {
-        try {
-          step([$class: 'RobotPublisher',
-            disableArchiveOutput: false,
-            logFileName: '**/log*.html',
-            otherFiles: '',
-            outputFileName: '**/output*.xml',
-            outputPath: 'RobotLogs',
-            passThreshold: 100,
-            reportFileName: '**/report*.html',
-            onlyCritical: true,
-            unstableThreshold: 0]);
-        } catch (Exception e) {
-            println "Cannot archive Robot Logs: ${e.toString()}"
-        }
-      }
-      plot([
-        csvFileName: 'scale-test.csv',
-        csvSeries: [
-          [file: 'plots/plot-lwc-olts.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-lwc-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-lwc-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-lwc-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-        ],
-        group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus}, UNIs: ${unis})", yaxis: 'Time (s)', useDescr: true
-      ])
-      getPodsInfo("$LOG_FOLDER")
-      archiveArtifacts artifacts: 'execution-time.txt,logs/*,logs/pprof/*,RobotLogs/**/*,plots/*,etcd-metrics/*'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.12/voltha-scale-multi-stack.groovy b/jjb/pipeline/voltha/voltha-2.12/voltha-scale-multi-stack.groovy
deleted file mode 100644
index 8420da0..0000000
--- a/jjb/pipeline/voltha/voltha-2.12/voltha-scale-multi-stack.groovy
+++ /dev/null
@@ -1,468 +0,0 @@
-// Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// deploy VOLTHA using kind-voltha and performs a scale test
-
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-      timeout(time: 120, unit: 'MINUTES')
-  }
-  environment {
-    JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
-    KUBECONFIG="$HOME/.kube/config"
-    SSHPASS="karaf"
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-
-    LOG_FOLDER="$WORKSPACE/logs"
-  }
-
-  stages {
-    stage ('Cleanup') {
-      steps {
-        timeout(time: 11, unit: 'MINUTES') {
-          script {
-            def namespaces = ["infra"]
-            // FIXME we may have leftovers from more VOLTHA stacks (eg: run1 had 10 stacks, run2 had 2 stacks)
-            volthaStacks.toInteger().times {
-              namespaces += "voltha${it + 1}"
-            }
-            helmTeardown(namespaces)
-          }
-          sh returnStdout: false, script: '''
-            helm repo add onf https://charts.opencord.org
-            helm repo update
-
-            # remove all persistent volume claims
-            kubectl delete pvc --all-namespaces --all
-            PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-            while [[ \$PVCS != 0 ]]; do
-              sleep 5
-              PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-            done
-
-            # remove orphaned port-forward from different namespaces
-            ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-
-            cd $WORKSPACE
-            rm -rf $WORKSPACE/*
-          '''
-        }
-      }
-    }
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${release}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage('Deploy common infrastructure') {
-      // includes monitoring
-      steps {
-        sh '''
-        if [ ${withMonitoring} = true ] ; then
-          helm install -n infra nem-monitoring cord/nem-monitoring \
-          -f $HOME/voltha-scale/grafana.yaml \
-          --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
-          --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
-        fi
-        '''
-      }
-    }
-    stage('Start logging') {
-      steps {
-        script {
-          startComponentsLogs([
-            appsToLog: [
-              'app.kubernetes.io/name=etcd',
-              'app.kubernetes.io/name=kafka',
-              'app=onos-classic',
-              'app=adapter-open-onu',
-              'app=adapter-open-olt',
-              'app=rw-core',
-              'app=ofagent',
-              'app=bbsim',
-              'app=radius',
-              'app=bbsim-sadis-server',
-              'app=onos-config-loader',
-            ]
-          ])
-        }
-      }
-    }
-    stage('Deploy VOLTHA infrastructure') {
-      steps {
-        timeout(time: 5, unit: 'MINUTES') {
-          script {
-            def localCharts = false
-            if (volthaHelmChartsChange != "" || release != "master") {
-              localCharts = true
-            }
-
-            def infraHelmFlags =
-                "--set global.log_level=${logLevel} " +
-                "--set radius.enabled=${withEapol} " +
-                "--set onos-classic.onosSshPort=30115 " +
-                "--set onos-classic.onosApiPort=30120 " +
-                params.extraHelmFlags
-
-            volthaInfraDeploy([
-              workflow: workflow,
-              infraNamespace: "infra",
-              extraHelmFlags: infraHelmFlags,
-              localCharts: localCharts,
-              onosReplica: onosReplicas,
-              atomixReplica: atomixReplicas,
-              kafkaReplica: kafkaReplicas,
-              etcdReplica: etcdReplicas,
-            ])
-          }
-        }
-      }
-    }
-    stage('Deploy Voltha') {
-      steps {
-        installVoltctl("${release}")
-        deploy_voltha_stacks(params.volthaStacks)
-      }
-    }
-    stage('Configuration') {
-      steps {
-        script {
-          sh returnStdout: false, script: """
-
-          # forward ETCD port
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG=etcd-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/etcd 9999:2379; done 2>&1 " &
-
-          # forward ONOS ports
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG=onos-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/voltha-infra-onos-classic-hs 8101:8101; done 2>&1 " &
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG=onos-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/voltha-infra-onos-classic-hs 8181:8181; done 2>&1 " &
-
-          # make sure the the port-forward has started before moving forward
-          sleep 5
-          """
-          sh returnStdout: false, script: """
-          # TODO this needs to be repeated per stack
-          # kubectl exec \$(kubectl get pods | grep -E "bbsim[0-9]" | awk 'NR==1{print \$1}') -- bbsimctl log ${logLevel.toLowerCase()} false
-
-          #Setting link discovery
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 900
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.opencord.olt.impl.Olt provisionDelay 1000
-
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.onosproject
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.opencord
-
-          # Set Flows/Ports/Meters poll frequency
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
-
-          #SR is not needed in scale tests and not currently used by operators in production, can be disabled.
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.onosproject.segmentrouting
-
-
-          if [ ${withFlows} = false ]; then
-            sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.olt
-          fi
-          """
-        }
-      }
-    }
-    stage('Setup Test') {
-      steps {
-        sh '''
-          mkdir -p $WORKSPACE/RobotLogs
-          cd $WORKSPACE/voltha-system-tests
-          make vst_venv
-        '''
-      }
-    }
-    stage('Run Test') {
-      steps {
-        test_voltha_stacks(params.volthaStacks)
-      }
-    }
-  }
-  post {
-    always {
-      stopComponentsLogs([compress: true])
-      // collect result, done in the "post" step so it's executed even in the
-      // event of a timeout in the tests
-      plot([
-        csvFileName: 'scale-test.csv',
-        csvSeries: [
-          [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-        ],
-        group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (Stacks: ${params.volthaStacks}, OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus})", yaxis: 'Time (s)', useDescr: true
-      ])
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: 'RobotLogs/**/log.html',
-        otherFiles: '',
-        outputFileName: 'RobotLogs/**/output.xml',
-        outputPath: '.',
-        passThreshold: 100,
-        reportFileName: 'RobotLogs/**/report.html',
-        onlyCritical: true,
-        unstableThreshold: 0]);
-      // get all the logs from kubernetes PODs
-      sh returnStdout: false, script: '''
-
-        # store information on running charts
-        helm ls --all-namespaces > $LOG_FOLDER/helm-list.txt || true
-
-        # store information on the running pods
-        kubectl get pods --all-namespaces -o wide > $LOG_FOLDER/pods.txt || true
-        kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
-        kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
-
-        # copy the ONOS logs directly from the container to avoid the color codes
-        printf '%s\n' $(kubectl get pods -n infra -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp -n infra #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
-
-      '''
-      // dump all the BBSim(s) ONU information
-      script {
-        for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
-          stack_ns="voltha"+i
-          sh """
-          mkdir -p \$LOG_FOLDER/${stack_ns}
-          BBSIM_IDS=\$(kubectl -n ${stack_ns} get pods | grep bbsim | grep -v server | awk '{print \$1}')
-          IDS=(\$BBSIM_IDS)
-
-          for bbsim in "\${IDS[@]}"
-          do
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl onu list > \$LOG_FOLDER/${stack_ns}/\$bbsim-device-list.txt || true
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl service list > \$LOG_FOLDER/${stack_ns}/\$bbsim-service-list.txt || true
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl olt resources GEM_PORT > \$LOG_FOLDER/${stack_ns}/\$bbsim-flows-gem-ports.txt || true
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl olt resources ALLOC_ID > \$LOG_FOLDER/${stack_ns}/\$bbsim-flows-alloc-ids.txt || true
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl olt pons > \$LOG_FOLDER/${stack_ns}/\$bbsim-pon-resources.txt || true
-          done
-          """
-        }
-      }
-      // get ONOS debug infos
-      sh '''
-
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt || true
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt || true
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt || true
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt || true
-
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 netcfg > $LOG_FOLDER/onos-netcfg.txt || true
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg get > $LOG_FOLDER/onos-cfg.txt || true
-
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt || true
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt || true
-
-        if [ ${withFlows} = true ] ; then
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt || true
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-port-status > $LOG_FOLDER/onos-volt-port-status.txt || true
-        fi
-
-        if [ ${provisionSubscribers} = true ]; then
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt || true
-        fi
-
-        if [ ${withEapol} = true ] ; then
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt || true
-        fi
-
-        if [ ${withDhcp} = true ] ; then
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt || true
-        fi
-      '''
-      // collect etcd metrics
-      sh '''
-        mkdir -p $WORKSPACE/etcd-metrics
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-backend-write-time.json || true
-      '''
-      // get VOLTHA debug infos
-      script {
-        for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
-          stack_ns="voltha"+i
-          voltcfg="~/.volt/config-voltha"+i
-          try {
-            sh """
-
-            # _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555& > /dev/null 2>&1
-            _TAG="voltha-port-forward" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555 > /dev/null 2>&1; done"&
-
-            voltctl -m 32MB device list -o json > $LOG_FOLDER/${stack_ns}/device-list.json || true
-            python -m json.tool $LOG_FOLDER/${stack_ns}/device-list.json > $LOG_FOLDER/${stack_ns}/voltha-devices-list.json || true
-            rm $LOG_FOLDER/${stack_ns}/device-list.json || true
-            voltctl -m 32MB device list > $LOG_FOLDER/${stack_ns}/voltha-devices-list.txt || true
-
-            DEVICE_LIST=
-            printf '%s\n' \$(voltctl -m 32MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB device flows # > $LOG_FOLDER/${stack_ns}/voltha-device-flows-#.txt" || true
-            printf '%s\n' \$(voltctl -m 32MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/${stack_ns}/voltha-device-ports-#.txt" || true
-
-            printf '%s\n' \$(voltctl -m 32MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB logicaldevice flows # > $LOG_FOLDER/${stack_ns}/voltha-logicaldevice-flows-#.txt" || true
-            printf '%s\n' \$(voltctl -m 32MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB logicaldevice port list # > $LOG_FOLDER/${stack_ns}/voltha-logicaldevice-ports-#.txt" || true
-
-            # remove VOLTHA port-forward
-            ps aux | grep port-forw | grep voltha-api | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
-            """
-          } catch(e) {
-            println e
-            sh '''
-            echo "Can't get device list from voltctl"
-            '''
-          }
-        }
-      }
-      // get cpu usage by container
-      sh '''
-      if [ ${withMonitoring} = true ] ; then
-        cd $WORKSPACE/voltha-system-tests
-        source ./vst_venv/bin/activate
-        sleep 60 # we have to wait for prometheus to collect all the information
-        python scripts/sizing.py -o $WORKSPACE/plots || true
-      fi
-      '''
-      archiveArtifacts artifacts: 'kind-voltha/install-*.log,execution-time-*.txt,logs/**/*.txt,logs/**/*.tar.gz,logs/**/*.tgz,RobotLogs/**/*,plots/*,etcd-metrics/*'
-    }
-  }
-}
-
-def deploy_voltha_stacks(numberOfStacks) {
-  for (int i = 1; i <= numberOfStacks.toInteger(); i++) {
-    timeout(time: 5, unit: 'MINUTES') {
-      stage("Deploy VOLTHA stack " + i) {
-
-        def localCharts = false
-        if (volthaHelmChartsChange != "" || release != "master") {
-          localCharts = true
-        }
-
-        def volthaHelmFlags =
-                "--set global.log_level=${logLevel} " +
-                "--set enablePerf=true,onu=${onus},pon=${pons} " +
-                "--set securityContext.enabled=false " +
-                params.extraHelmFlags
-
-        volthaStackDeploy([
-          bbsimReplica: olts.toInteger(),
-          infraNamespace: "infra",
-          volthaNamespace: "voltha${i}",
-          stackName: "voltha${i}",
-          stackId: i,
-          workflow: workflow,
-          extraHelmFlags: volthaHelmFlags,
-          localCharts: localCharts,
-          onosReplica: onosReplicas,
-        ])
-      }
-    }
-  }
-}
-
-def test_voltha_stacks(numberOfStacks) {
-  for (int i = 1; i <= numberOfStacks.toInteger(); i++) {
-    stage("Test VOLTHA stack " + i) {
-      timeout(time: 15, unit: 'MINUTES') {
-        sh """
-
-        # we are restarting the voltha-api port-forward for each stack, no need to have a different voltconfig file
-        voltctl -s 127.0.0.1:55555 config > $HOME/.volt/config
-        export VOLTCONFIG=$HOME/.volt/config
-
-        # _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555& > /dev/null 2>&1
-        _TAG="voltha-port-forward" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555 > /dev/null 2>&1; done"&
-
-        # wait a bit to make sure the port-forwarding has started
-        sleep 5
-
-
-          ROBOT_PARAMS="-v stackId:${i} \
-            -v olt:${olts} \
-            -v pon:${pons} \
-            -v onu:${onus} \
-            -v workflow:${workflow} \
-            -v withEapol:${withEapol} \
-            -v withDhcp:${withDhcp} \
-            -v withIgmp:${withIgmp} \
-            --noncritical non-critical \
-            -e igmp \
-            -e onu-upgrade \
-            -e teardown "
-
-          if [ ${withEapol} = false ] ; then
-            ROBOT_PARAMS+="-e authentication "
-          fi
-
-          if [ ${withDhcp} = false ] ; then
-            ROBOT_PARAMS+="-e dhcp "
-          fi
-
-          if [ ${provisionSubscribers} = false ] ; then
-            # if we're not considering subscribers then we don't care about authentication and dhcp
-            ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
-          fi
-
-          if [ ${withFlows} = false ] ; then
-            ROBOT_PARAMS+="-i setup -i activation "
-          fi
-
-          cd $WORKSPACE/voltha-system-tests
-          source ./vst_venv/bin/activate
-          robot -d $WORKSPACE/RobotLogs/voltha${i} \
-          \$ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
-
-          # collect results
-          python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/voltha${i}/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time-voltha${i}.txt || true
-          cat $WORKSPACE/execution-time-voltha${i}.txt
-        """
-        sh """
-          # remove VOLTHA port-forward
-          ps aux | grep port-forw | grep voltha-api | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 2>&1 > /dev/null || true
-        """
-      }
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.12/voltha-scale-test.groovy b/jjb/pipeline/voltha/voltha-2.12/voltha-scale-test.groovy
deleted file mode 100644
index 88d6070..0000000
--- a/jjb/pipeline/voltha/voltha-2.12/voltha-scale-test.groovy
+++ /dev/null
@@ -1,933 +0,0 @@
-// Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// deploy VOLTHA and performs a scale test
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-// this function generates the correct parameters for ofAgent
-// to connect to multiple ONOS instances
-def ofAgentConnections(numOfOnos, releaseName, namespace) {
-    def params = " "
-    numOfOnos.times {
-        params += "--set voltha.services.controller[${it}].address=${releaseName}-onos-classic-${it}.${releaseName}-onos-classic-hs.${namespace}.svc:6653 "
-    }
-    return params
-}
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-      timeout(time: 60, unit: 'MINUTES')
-  }
-  environment {
-    JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
-    KUBECONFIG="$HOME/.kube/config"
-    VOLTCONFIG="$HOME/.volt/config"
-    SSHPASS="karaf"
-    VOLTHA_LOG_LEVEL="${logLevel}"
-    NUM_OF_BBSIM="${olts}"
-    NUM_OF_OPENONU="${openonuAdapterReplicas}"
-    NUM_OF_ONOS="${onosReplicas}"
-    NUM_OF_ATOMIX="${atomixReplicas}"
-    EXTRA_HELM_FLAGS=" "
-    LOG_FOLDER="$WORKSPACE/logs"
-    GERRIT_PROJECT="${GERRIT_PROJECT}"
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-  }
-
-  stages {
-    stage ('Cleanup') {
-      steps {
-        script {
-          try {
-            timeout(time: 5, unit: 'MINUTES') {
-              sh returnStdout: false, script: '''
-              cd $WORKSPACE
-              rm -rf $WORKSPACE/*
-              '''
-              // removing the voltha-infra chart first
-              // if we don't ONOS might get stuck because of all the events when BBSim goes down
-              sh returnStdout: false, script: '''
-              set +x
-              helm del voltha-infra || true
-              echo -ne "\nWaiting for ONOS to be removed..."
-              onos=$(kubectl get pod -n default -l app=onos-classic --no-headers | wc -l)
-              while [[ $onos != 0 ]]; do
-                onos=$(kubectl get pod -n default -l app=onos-classic --no-headers | wc -l)
-                sleep 5
-                echo -ne "."
-              done
-              '''
-            }
-          } catch(org.jenkinsci.plugins.workflow.steps.FlowInterruptedException e) {
-            // if we have a timeout in the Cleanup fase most likely ONOS got stuck somewhere, thuse force remove the pods
-            sh '''
-              kubectl get pods | grep Terminating | awk '{print $1}' | xargs kubectl delete pod --force --grace-period=0
-            '''
-          }
-          timeout(time: 10, unit: 'MINUTES') {
-            script {
-              helmTeardown(["default", "voltha1", "infra"])
-            }
-            sh returnStdout: false, script: '''
-              helm repo add onf https://charts.opencord.org
-              helm repo update
-
-              # remove all persistent volume claims
-              kubectl delete pvc --all-namespaces --all
-              PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-              while [[ \$PVCS != 0 ]]; do
-                sleep 5
-                PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-              done
-
-              # remove orphaned port-forward from different namespaces
-              ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-            '''
-          }
-        }
-      }
-    }
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${release}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage('Build patch') {
-      when {
-        expression {
-          return params.GERRIT_PROJECT
-        }
-      }
-      steps {
-        sh """
-        git clone https://\$GERRIT_HOST/\$GERRIT_PROJECT
-        cd \$GERRIT_PROJECT
-        git fetch https://\$GERRIT_HOST/\$GERRIT_PROJECT \$GERRIT_REFSPEC && git checkout FETCH_HEAD
-
-        DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-build
-        DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-push
-        """
-      }
-    }
-    stage('Deploy common infrastructure') {
-      steps {
-        sh '''
-        if [ ${withMonitoring} = true ] ; then
-          helm install nem-monitoring onf/nem-monitoring \
-          -f $HOME/voltha-scale/grafana.yaml \
-          --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
-          --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
-        fi
-        '''
-      }
-    }
-    stage('Deploy Voltha') {
-      steps {
-        timeout(time: 10, unit: 'MINUTES') {
-          installVoltctl("${release}")
-          script {
-            startComponentsLogs([
-              appsToLog: [
-                'app.kubernetes.io/name=etcd',
-                'app.kubernetes.io/name=kafka',
-                'app=voltha-infra-atomix',
-                'app=onos-classic',
-                'app=adapter-open-onu',
-                'app=adapter-open-olt',
-                'app=rw-core',
-                'app=ofagent',
-                'app=bbsim',
-                'app=radius',
-                'app=bbsim-sadis-server',
-                'app=onos-config-loader',
-              ]
-            ])
-            def returned_flags = sh (returnStdout: true, script: """
-
-              export EXTRA_HELM_FLAGS+=' '
-
-              # BBSim custom image handling
-              if [ '${bbsimImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'bbsim' ]; then
-                IFS=: read -r bbsimRepo bbsimTag <<< '${bbsimImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set images.bbsim.repository=\$bbsimRepo,images.bbsim.tag=\$bbsimTag "
-              fi
-
-              # VOLTHA custom image handling
-              if [ '${rwCoreImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-go' ]; then
-                IFS=: read -r rwCoreRepo rwCoreTag <<< '${rwCoreImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set voltha.images.rw_core.repository=\$rwCoreRepo,voltha.images.rw_core.tag=\$rwCoreTag "
-              fi
-
-              # ofAgent custom image handling
-              if [ '${ofAgentImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'of-agent' ]; then
-                IFS=: read -r ofAgentRepo ofAgentTag <<< '${ofAgentImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set voltha.images.ofagent.repository=\$ofAgentRepo,voltha.images.ofagent.tag=\$ofAgentTag "
-              fi
-
-              # OpenOLT custom image handling
-              if [ '${openoltAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openolt-adapter' ]; then
-                IFS=: read -r openoltAdapterRepo openoltAdapterTag <<< '${openoltAdapterImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set voltha-adapter-openolt.images.adapter_open_olt.repository=\$openoltAdapterRepo,voltha-adapter-openolt.images.adapter_open_olt.tag=\$openoltAdapterTag "
-              fi
-
-              # OpenONU custom image handling
-              if [ '${openonuAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter' ]; then
-                IFS=: read -r openonuAdapterRepo openonuAdapterTag <<< '${openonuAdapterImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set voltha-adapter-openonu.images.adapter_open_onu.repository=\$openonuAdapterRepo,voltha-adapter-openonu.images.adapter_open_onu.tag=\$openonuAdapterTag "
-              fi
-
-              # OpenONU GO custom image handling
-              if [ '${openonuAdapterGoImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter-go' ]; then
-                IFS=: read -r openonuAdapterGoRepo openonuAdapterGoTag <<< '${openonuAdapterGoImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set voltha-adapter-openonu.images.adapter_open_onu_go.repository=\$openonuAdapterGoRepo,voltha-adapter-openonu.images.adapter_open_onu_go.tag=\$openonuAdapterGoTag "
-              fi
-
-              # ONOS custom image handling
-              if [ '${onosImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-onos' ]; then
-                IFS=: read -r onosRepo onosTag <<< '${onosImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set onos-classic.image.repository=\$onosRepo,onos-classic.image.tag=\$onosTag "
-              fi
-
-              # set BBSim parameters
-              EXTRA_HELM_FLAGS+='--set enablePerf=true,pon=${pons},onu=${onus},uni=${unis} '
-
-              # disable the securityContext, this is a development cluster
-              EXTRA_HELM_FLAGS+='--set securityContext.enabled=false '
-
-              # No persistent-volume-claims in Atomix
-              EXTRA_HELM_FLAGS+="--set onos-classic.atomix.persistence.enabled=false "
-
-              # Use custom built images
-
-              if [ '\$GERRIT_PROJECT' == 'voltha-go' ]; then
-                EXTRA_HELM_FLAGS+="--set voltha.images.rw_core.repository=${dockerRegistry}/voltha/voltha-rw-core,voltha.images.rw_core.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'voltha-openolt-adapter' ]; then
-                EXTRA_HELM_FLAGS+="--set voltha-openolt-adapter.images.adapter_open_olt.repository=${dockerRegistry}/voltha/voltha-openolt-adapter,voltha-openolt-adapter.images.adapter_open_olt.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter' ]; then
-                EXTRA_HELM_FLAGS+="--set voltha-openonu-adapter.images.adapter_open_onu.repository=${dockerRegistry}/voltha/voltha-openonu-adapter,voltha-openonu-adapter.images.adapter_open_onu.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter-go' ]; then
-                EXTRA_HELM_FLAGS+="--set voltha-openonu-adapter-go.images.adapter_open_onu_go.repository=${dockerRegistry}/voltha/voltha-openonu-adapter-go,voltha-openonu-adapter-go.images.adapter_open_onu_go.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'ofagent-go' ]; then
-                EXTRA_HELM_FLAGS+="--set voltha.images.ofagent.repository=${dockerRegistry}/voltha/voltha-ofagent-go,ofagent-go.images.ofagent.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'voltha-onos' ]; then
-                EXTRA_HELM_FLAGS+="--set onos-classic.image.repository=${dockerRegistry}/voltha/voltha-onos,onos-classic.image.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'bbsim' ]; then
-                EXTRA_HELM_FLAGS+="--set images.bbsim.repository=${dockerRegistry}/voltha/bbsim,images.bbsim.tag=voltha-scale "
-              fi
-              echo \$EXTRA_HELM_FLAGS
-
-            """).trim()
-
-            def extraHelmFlags = returned_flags
-            // The added space before params.extraHelmFlags is required due to the .trim() above
-            def infraHelmFlags =
-              "--set global.log_level=${logLevel} " +
-              "--set radius.enabled=${withEapol} " +
-              "--set onos-classic.onosSshPort=30115 " +
-              "--set onos-classic.onosApiPort=30120 " +
-              extraHelmFlags + " " + params.extraHelmFlags
-
-            println "Passing the following parameters to the VOLTHA infra deploy: ${infraHelmFlags}."
-
-            def localCharts = false
-            if (volthaHelmChartsChange != "") {
-              localCharts = true
-            }
-
-            volthaInfraDeploy([
-              workflow: workflow,
-              infraNamespace: "default",
-              extraHelmFlags: infraHelmFlags,
-              localCharts: localCharts,
-              onosReplica: onosReplicas,
-              atomixReplica: atomixReplicas,
-              kafkaReplica: kafkaReplicas,
-              etcdReplica: etcdReplicas,
-            ])
-
-            stackHelmFlags = " --set onu=${onus},pon=${pons},uni=${unis} --set global.log_level=${logLevel.toLowerCase()} "
-            stackHelmFlags += " --set voltha.ingress.enabled=true --set voltha.ingress.enableVirtualHosts=true --set voltha.fullHostnameOverride=voltha.scale1.dev "
-            stackHelmFlags += extraHelmFlags + " " + params.extraHelmFlags
-
-            volthaStackDeploy([
-              bbsimReplica: olts.toInteger(),
-              infraNamespace: "default",
-              volthaNamespace: "default",
-              stackName: "voltha1", // TODO support custom charts
-              workflow: workflow,
-              extraHelmFlags: stackHelmFlags,
-              localCharts: localCharts,
-              onosReplica: onosReplicas,
-            ])
-            sh """
-              set +x
-
-              echo -ne "\nWaiting for VOLTHA and ONOS to start..."
-              voltha=\$(kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
-              onos=\$(kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l)
-              while [[ \$voltha != 0 || \$onos != 0 ]]; do
-                sleep 5
-                echo -ne "."
-                voltha=\$(kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
-                onos=\$(kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l)
-              done
-              echo -ne "\nVOLTHA and ONOS pods ready\n"
-              kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l
-              kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l
-            """
-            start_port_forward(olts)
-          }
-        }
-      }
-    }
-    stage('Configuration') {
-      steps {
-        script {
-          setOnosLogLevels([
-              onosNamespace: "default",
-              apps: [
-                'org.opencord.dhcpl2relay',
-                'org.opencord.olt',
-                'org.opencord.aaa',
-                'org.opencord.maclearner',
-                'org.onosproject.net.flowobjective.impl.FlowObjectiveManager',
-                'org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager'
-              ],
-              logLevel: logLevel
-          ])
-          def tech_prof_directory = "XGS-PON"
-          sh returnStdout: false, script: """
-          #Setting link discovery
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
-
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
-
-
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 900
-
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
-
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.opencord.olt.impl.Olt provisionDelay 1000
-
-          # BBSim logs at debug level don't slow down the system much and are very helpful while troubleshooting
-          BBSIM_IDS=\$(kubectl get pods | grep bbsim | grep -v server | awk '{print \$1}')
-          IDS=(\$BBSIM_IDS)
-
-          for bbsim in "\${IDS[@]}"
-          do
-            kubectl exec -t \$bbsim -- bbsimctl log debug false
-          done
-
-          # Set Flows/Ports/Meters/Groups poll frequency
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.group.impl.OpenFlowGroupProvider groupPollInterval ${onosGroupInterval}
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.FlowObjectiveManager numThreads ${flowObjWorkerThreads}
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager objectiveTimeoutMs 300000
-
-          if [ ${withFlows} = false ]; then
-            sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 app deactivate org.opencord.olt
-          fi
-
-          if [ '${workflow}' = 'tt' ]; then
-            etcd_container=\$(kubectl get pods --all-namespaces | grep etcd-0 | awk 'NR==1{print \$2}')
-            kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-HSIA.json \$etcd_container:/tmp/hsia.json
-            put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64')
-            kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-VoIP.json \$etcd_container:/tmp/voip.json
-            put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65')
-            kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST-AdditionalBW-NA.json \$etcd_container:/tmp/mcast.json
-            put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66')
-          fi
-
-          if [ ${withPcap} = true ] ; then
-            # Start the tcp-dump in ofagent
-            export OF_AGENT=\$(kubectl get pods -l app=ofagent -o name)
-            kubectl exec \$OF_AGENT -- apk update
-            kubectl exec \$OF_AGENT -- apk add tcpdump
-            _TAG=ofagent-tcpdump kubectl exec \$OF_AGENT -- tcpdump -nei eth0 -w out.pcap&
-
-            # Start the tcp-dump in radius
-            export RADIUS=\$(kubectl get pods -l app=radius -o name)
-            kubectl exec \$RADIUS -- apt-get update
-            kubectl exec \$RADIUS -- apt-get install -y tcpdump
-            _TAG=radius-tcpdump kubectl exec \$RADIUS -- tcpdump -w out.pcap&
-
-            # Start the tcp-dump in ONOS
-            for i in \$(seq 0 \$ONOSES); do
-              INSTANCE="onos-onos-classic-\$i"
-              kubectl exec \$INSTANCE -- apt-get update
-              kubectl exec \$INSTANCE -- apt-get install -y tcpdump
-              kubectl exec \$INSTANCE -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
-              _TAG=\$INSTANCE kubectl exec \$INSTANCE -- /usr/bin/tcpdump -nei eth0 port 1812 -w out.pcap&
-            done
-          fi
-          """
-        }
-      }
-    }
-    stage('Load MIB Template') {
-      when {
-        expression {
-          return params.withMibTemplate
-        }
-      }
-      steps {
-        sh """
-        # load MIB template
-        wget ${mibTemplateUrl} -O mibTemplate.json
-        cat mibTemplate.json | kubectl exec -it \$(kubectl get pods |grep etcd-0 | awk 'NR==1{print \$1}') -- etcdctl put service/voltha/omci_mibs/go_templates/BBSM/v0.0.1/BBSM_IMG_00001
-        """
-      }
-    }
-    stage('Run Test') {
-      steps {
-        sh '''
-          mkdir -p $WORKSPACE/RobotLogs
-          cd $WORKSPACE/voltha-system-tests
-          make vst_venv
-        '''
-        sh '''
-          if [ ${withProfiling} = true ] ; then
-            mkdir -p $LOG_FOLDER/pprof
-            echo $PATH
-            #Creating Python script for ONU Detection
-            cat << EOF > $WORKSPACE/pprof.sh
-timestamp() {
-  date +"%T"
-}
-
-i=0
-while [[ true ]]; do
-  ((i++))
-  ts=$(timestamp)
-  go tool pprof -png http://127.0.0.1:6060/debug/pprof/heap > $LOG_FOLDER/pprof/rw-core-heap-\\$i-\\$ts.png
-  go tool pprof -png http://127.0.0.1:6060/debug/pprof/goroutine > $LOG_FOLDER/pprof/rw-core-goroutine-\\$i-\\$ts.png
-  curl -o $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof http://127.0.0.1:6060/debug/pprof/profile?seconds=10
-  go tool pprof -png $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.png
-
-  go tool pprof -png http://127.0.0.1:6061/debug/pprof/heap > $LOG_FOLDER/pprof/openolt-heap-\\$i-\\$ts.png
-  go tool pprof -png http://127.0.0.1:6061/debug/pprof/goroutine > $LOG_FOLDER/pprof/openolt-goroutine-\\$i-\\$ts.png
-  curl -o $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof http://127.0.0.1:6061/debug/pprof/profile?seconds=10
-  go tool pprof -png $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.png
-
-  go tool pprof -png http://127.0.0.1:6062/debug/pprof/heap > $LOG_FOLDER/pprof/ofagent-heap-\\$i-\\$ts.png
-  go tool pprof -png http://127.0.0.1:6062/debug/pprof/goroutine > $LOG_FOLDER/pprof/ofagent-goroutine-\\$i-\\$ts.png
-  curl -o $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof http://127.0.0.1:6062/debug/pprof/profile?seconds=10
-  go tool pprof -png $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.png
-
-  sleep 10
-done
-EOF
-
-            _TAG="pprof"
-            _TAG=$_TAG bash $WORKSPACE/pprof.sh &
-          fi
-        '''
-        timeout(time: "${testTimeout.toInteger() + 5}", unit: 'MINUTES') {
-          sh '''
-            ROBOT_PARAMS="--exitonfailure \
-              -v olt:${olts} \
-              -v pon:${pons} \
-              -v onu:${onus} \
-              -v ONOS_SSH_PORT:30115 \
-              -v ONOS_REST_PORT:30120 \
-              -v workflow:${workflow} \
-              -v withEapol:${withEapol} \
-              -v withDhcp:${withDhcp} \
-              -v withIgmp:${withIgmp} \
-              -v timeout:${testTimeout}m \
-              -v withMaclearning:${withMaclearning} \
-              --noncritical non-critical \
-              -e onu-upgrade -e igmp -e teardown "
-
-            if [ ${withEapol} = false ] ; then
-              ROBOT_PARAMS+="-e authentication "
-            fi
-
-            if [ ${withDhcp} = false ] ; then
-              ROBOT_PARAMS+="-e dhcp "
-            fi
-
-            if [ ${provisionSubscribers} = false ] ; then
-              # if we're not considering subscribers then we don't care about authentication and dhcp
-              ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
-            fi
-
-            if [ ${withFlows} = false ] ; then
-              ROBOT_PARAMS+="-i setup -i activation "
-            fi
-
-            if [ ${withOnuUpgrade} = true ] ; then
-              ROBOT_PARAMS+="-e flow-before "
-            fi
-
-            cd $WORKSPACE/voltha-system-tests
-            source ./vst_venv/bin/activate
-            robot -d $WORKSPACE/RobotLogs \
-            $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
-
-            python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time.txt || true
-            cat $WORKSPACE/execution-time.txt
-          '''
-        }
-      }
-    }
-    stage('Run ONU Upgrade Tests') {
-      environment {
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/OnuUpgradeTests"
-      }
-      when {
-        expression {
-          return params.withOnuUpgrade
-        }
-      }
-      options {
-          timeout(time: "${testTimeout.toInteger() + 1}", unit: 'MINUTES')
-      }
-      steps {
-        sh '''
-          set +e
-          mkdir -p $ROBOT_LOGS_DIR
-          cd $WORKSPACE/voltha-system-tests
-          make vst_venv
-        '''
-        script {
-          Exception caughtException = null
-
-          catchError(buildResult: 'SUCCESS', stageResult: 'ABORTED') {
-            try {
-              sh '''
-                ROBOT_PARAMS="--exitonfailure \
-                  -v olt:${olts} \
-                  -v pon:${pons} \
-                  -v onu:${onus} \
-                  -v workflow:${workflow} \
-                  -v withEapol:${withEapol} \
-                  -v withDhcp:${withDhcp} \
-                  -v withIgmp:${withIgmp} \
-                  -v timeout:${testTimeout}m \
-                  -v withMaclearning:${withMaclearning}
-                  -v image_version:BBSM_IMG_00002 \
-                  -v image_url:http://bbsim0:50074/images/software-image.img \
-                  -v image_vendor:BBSM \
-                  -v image_activate_on_success:false \
-                  -v image_commit_on_success:false \
-                  -v image_crc:0 \
-                  -v ONOS_SSH_PORT:30115 \
-                  -v ONOS_REST_PORT:30120 \
-                  --noncritical non-critical \
-                  -i onu-upgrade \
-                  -e setup -e activation -e flow-before \
-                  -e authentication -e provision -e flow-after \
-                  -e dhcp -e igmp -e teardown "
-                cd $WORKSPACE/voltha-system-tests
-                source ./vst_venv/bin/activate
-                robot -d $ROBOT_LOGS_DIR \
-                $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
-              '''
-            } catch (org.jenkinsci.plugins.workflow.steps.FlowInterruptedException e) {
-              // if the error is a timeout don't mark the build as failed
-              println "ONU Upgrade test timed out"
-            } catch (Throwable e) {
-              caughtException = e
-            }
-          }
-
-          if (caughtException) {
-            error caughtException.message
-          }
-        }
-      }
-    }
-    stage('Run Igmp Tests') {
-      environment {
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/IgmpTests"
-      }
-      when {
-        expression {
-          return params.withIgmp
-        }
-      }
-      options {
-          timeout(time: "${testTimeout.toInteger() + 1}", unit: 'MINUTES')
-      }
-      steps {
-        sh returnStdout: false, script: """
-          # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.onosproject.store.group.impl
-        """
-        sh '''
-          set +e
-          mkdir -p $ROBOT_LOGS_DIR
-          cd $WORKSPACE/voltha-system-tests
-          make vst_venv
-        '''
-        script {
-          Exception caughtException = null
-
-          catchError(buildResult: 'SUCCESS', stageResult: 'ABORTED') {
-            try {
-              sh '''
-                ROBOT_PARAMS="--exitonfailure \
-                  -v olt:${olts} \
-                  -v pon:${pons} \
-                  -v onu:${onus} \
-                  -v workflow:${workflow} \
-                  -v withEapol:${withEapol} \
-                  -v withDhcp:${withDhcp} \
-                  -v withIgmp:${withIgmp} \
-                  -v timeout:${testTimeout}m \
-                  -v withMaclearning:${withMaclearning}
-                  -v ONOS_SSH_PORT:30115 \
-                  -v ONOS_REST_PORT:30120 \
-                  --noncritical non-critical \
-                  -i igmp \
-                  -e setup -e activation -e flow-before \
-                  -e authentication -e provision -e flow-after \
-                  -e dhcp -e onu-upgrade -e teardown "
-                cd $WORKSPACE/voltha-system-tests
-                source ./vst_venv/bin/activate
-                robot -d $ROBOT_LOGS_DIR \
-                $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
-              '''
-            } catch (org.jenkinsci.plugins.workflow.steps.FlowInterruptedException e) {
-              // if the error is a timeout don't mark the build as failed
-              println "IGMP test timed out"
-            } catch (Throwable e) {
-              caughtException = e
-            }
-          }
-
-          if (caughtException) {
-            error caughtException.message
-          }
-        }
-      }
-    }
-    stage("Device removal") {
-      options {
-          timeout(time: "${testTimeout.toInteger() + 5}", unit: 'MINUTES')
-      }
-      steps {
-        sh '''
-          set +e
-          mkdir -p $ROBOT_LOGS_DIR
-          cd $WORKSPACE/voltha-system-tests
-          make vst_venv
-        '''
-        script {
-          Exception caughtException = null
-
-          catchError(buildResult: 'SUCCESS', stageResult: 'ABORTED') {
-            try {
-              sh '''
-                ROBOT_PARAMS="--exitonfailure \
-                  -v olt:${olts} \
-                  -v pon:${pons} \
-                  -v onu:${onus} \
-                  -v ONOS_SSH_PORT:30115 \
-                  -v ONOS_REST_PORT:30120 \
-                  -v workflow:${workflow} \
-                  -v withEapol:${withEapol} \
-                  -v withDhcp:${withDhcp} \
-                  -v withIgmp:${withIgmp} \
-                  -v timeout:${testTimeout}m \
-                  -v withMaclearning:${withMaclearning} \
-                  --noncritical non-critical \
-                  -i teardown"
-
-                  cd $WORKSPACE/voltha-system-tests
-                  source ./vst_venv/bin/activate
-                  robot -d $WORKSPACE/RobotLogs \
-                  $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
-                '''
-            } catch (org.jenkinsci.plugins.workflow.steps.FlowInterruptedException e) {
-              // if the error is a timeout don't mark the build as failed
-              println "Cleanup test timed out"
-            } catch (Throwable e) {
-              caughtException = e
-            }
-          }
-        }
-      }
-    }
-  }
-  post {
-    always {
-      stopComponentsLogs()
-      // collect result, done in the "post" step so it's executed even in the
-      // event of a timeout in the tests
-      sh '''
-        if [ ${withPcap} = true ] ; then
-          # stop ofAgent tcpdump
-          P_ID="\$(ps e -ww -A | grep "_TAG=ofagent-tcpdump" | grep -v grep | awk '{print \$1}')"
-          if [ -n "\$P_ID" ]; then
-            kill -9 \$P_ID
-          fi
-
-          # stop radius tcpdump
-          P_ID="\$(ps e -ww -A | grep "_TAG=radius-tcpdump" | grep -v grep | awk '{print \$1}')"
-          if [ -n "\$P_ID" ]; then
-            kill -9 \$P_ID
-          fi
-
-          # stop onos tcpdump
-          LIMIT=$(($NUM_OF_ONOS - 1))
-          for i in $(seq 0 $LIMIT); do
-            INSTANCE="onos-onos-classic-$i"
-            P_ID="\$(ps e -ww -A | grep "_TAG=$INSTANCE" | grep -v grep | awk '{print \$1}')"
-            if [ -n "\$P_ID" ]; then
-              kill -9 \$P_ID
-            fi
-          done
-
-          # copy the file
-          export OF_AGENT=$(kubectl get pods -l app=ofagent | awk 'NR==2{print $1}') || true
-          kubectl cp $OF_AGENT:out.pcap $LOG_FOLDER/ofagent.pcap || true
-
-          export RADIUS=$(kubectl get pods -l app=radius | awk 'NR==2{print $1}') || true
-          kubectl cp $RADIUS:out.pcap $LOG_FOLDER/radius.pcap || true
-
-          LIMIT=$(($NUM_OF_ONOS - 1))
-          for i in $(seq 0 $LIMIT); do
-            INSTANCE="onos-onos-classic-$i"
-            kubectl cp $INSTANCE:out.pcap $LOG_FOLDER/$INSTANCE.pcap || true
-          done
-        fi
-      '''
-      sh '''
-        if [ ${withProfiling} = true ] ; then
-          _TAG="pprof"
-          P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
-          if [ -n "$P_IDS" ]; then
-            echo $P_IDS
-            for P_ID in $P_IDS; do
-              kill -9 $P_ID
-            done
-          fi
-        fi
-      '''
-      plot([
-        csvFileName: 'scale-test.csv',
-        csvSeries: [
-          [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-        ],
-        group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus}, UNIs: ${unis})", yaxis: 'Time (s)', useDescr: true
-      ])
-      script {
-        try {
-          step([$class: 'RobotPublisher',
-            disableArchiveOutput: false,
-            logFileName: '**/log*.html',
-            otherFiles: '',
-            outputFileName: '**/output*.xml',
-            outputPath: 'RobotLogs',
-            passThreshold: 100,
-            reportFileName: '**/report*.html',
-            onlyCritical: true,
-            unstableThreshold: 0]);
-        } catch (Exception e) {
-            println "Cannot archive Robot Logs: ${e.toString()}"
-        }
-      }
-
-      getPodsInfo("$LOG_FOLDER")
-      // get all the logs from kubernetes PODs
-      sh returnStdout: false, script: '''
-        # copy the ONOS logs directly from the container to avoid the color codes
-        printf '%s\n' $(kubectl get pods -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
-
-        # get ONOS cfg from the 3 nodes
-        # kubectl exec -t voltha-infra-onos-classic-0 -- sh /root/onos/${karafHome}/bin/client cfg get > ~/voltha-infra-onos-classic-0-cfg.txt || true
-        # kubectl exec -t voltha-infra-onos-classic-1 -- sh /root/onos/${karafHome}/bin/client cfg get > ~/voltha-infra-onos-classic-1-cfg.txt || true
-        # kubectl exec -t voltha-infra-onos-classic-2 -- sh /root/onos/${karafHome}/bin/client cfg get > ~/voltha-infra-onos-classic-2-cfg.txt || true
-
-        # kubectl exec -t voltha-infra-onos-classic-0 -- sh /root/onos/${karafHome}/bin/client obj-next-ids > ~/voltha-infra-onos-classic-0-next-objs.txt || true
-        # kubectl exec -t voltha-infra-onos-classic-1 -- sh /root/onos/${karafHome}/bin/client obj-next-ids > ~/voltha-infra-onos-classic-1-next-objs.txt || true
-        # kubectl exec -t voltha-infra-onos-classic-2 -- sh /root/onos/${karafHome}/bin/client obj-next-ids > ~/voltha-infra-onos-classic-2-next-objs.txt || true
-
-        # get radius logs out of the container
-        kubectl cp $(kubectl get pods -l app=radius --no-headers  | awk '{print $1}'):/var/log/freeradius/radius.log $LOG_FOLDER/radius.log || true
-      '''
-      // dump all the BBSim(s) ONU information
-      sh '''
-      BBSIM_IDS=$(kubectl get pods | grep bbsim | grep -v server | awk '{print $1}')
-      IDS=($BBSIM_IDS)
-
-      for bbsim in "${IDS[@]}"
-      do
-        kubectl exec -t $bbsim -- bbsimctl onu list > $LOG_FOLDER/$bbsim-device-list.txt || true
-        kubectl exec -t $bbsim -- bbsimctl uni list > $LOG_FOLDER/$bbsim-uni-list.txt || true
-        kubectl exec -t $bbsim -- bbsimctl service list > $LOG_FOLDER/$bbsim-service-list.txt || true
-        kubectl exec -t $bbsim -- bbsimctl olt resources GEM_PORT > $LOG_FOLDER/$bbsim-flows-gem-ports.txt || true
-        kubectl exec -t $bbsim -- bbsimctl olt resources ALLOC_ID > $LOG_FOLDER/$bbsim-flows-alloc-ids.txt || true
-        kubectl exec -t $bbsim -- bbsimctl olt pons > $LOG_FOLDER/$bbsim-pon-resources.txt || true
-      done
-      '''
-      script {
-        // first make sure the port-forward is still running,
-        // sometimes Jenkins kills it regardless of the JENKINS_NODE_COOKIE=dontKillMe
-        def running = sh (
-            script: 'ps aux | grep port-forw | grep -E "onos|voltha" | grep -v grep | wc -l',
-            returnStdout: true
-        ).trim()
-        // if any of the voltha-api, onos-rest, onos-ssh port-forwards are not there
-        // kill all and restart
-        if (running != "3") {
-          start_port_forward(olts)
-        }
-      }
-      // get ONOS debug infos
-      sh '''
-
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg get > $LOG_FOLDER/onos-cfg.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 netcfg > $LOG_FOLDER/onos-netcfg.txt
-
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt
-
-        if [ ${withFlows} = true ] ; then
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-port-status > $LOG_FOLDER/onos-volt-port-status.txt
-        fi
-
-        if [ ${provisionSubscribers} = true ]; then
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt
-        fi
-
-        if [ ${withEapol} = true ] ; then
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt
-        fi
-
-        if [ ${withDhcp} = true ] ; then
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt
-        fi
-
-        if [ ${withIgmp} = true ] ; then
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 mcast-host-routes > $LOG_FOLDER/onos-mcast-host-routes.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 mcast-host-show > $LOG_FOLDER/onos-mcast-host-show.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 groups > $LOG_FOLDER/onos-groups.txt
-        fi
-
-        if [ ${withMaclearning} = true ] ; then
-           sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 mac-learner-get-mapping > $LOG_FOLDER/onos-maclearning-host-mappings.txt
-        fi
-      '''
-      // collect etcd metrics
-      sh '''
-        mkdir -p $WORKSPACE/etcd-metrics
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-backend-write-time.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-backend-write-time-sum.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_bucket' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-backend-write-time-bucket.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_wal_fsync_duration_seconds_bucket' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-wal-fsync-time-bucket.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_network_peer_round_trip_time_seconds_bucket' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-network-peer-round-trip-time-seconds.json || true
-        etcd_namespace=\$(kubectl get pods --all-namespaces | grep etcd-0 | awk 'NR==1{print \$1}')
-        etcd_container=\$(kubectl get pods --all-namespaces | grep etcd-0 | awk 'NR==1{print \$2}')
-        kubectl exec -it -n  \$etcd_namespace \$etcd_container -- etcdctl defrag --cluster || true
-        kubectl exec -it -n  \$etcd_namespace \$etcd_container -- etcdctl endpoint status -w table > $WORKSPACE/etcd-metrics/etcd-status-table.txt || true
-
-      '''
-      // get VOLTHA debug infos
-      script {
-        try {
-          sh '''
-          voltctl -m 32MB device list -o json > $LOG_FOLDER/device-list.json || true
-          python -m json.tool $LOG_FOLDER/device-list.json > $LOG_FOLDER/voltha-devices-list.json || true
-          rm $LOG_FOLDER/device-list.json || true
-          voltctl -m 32MB device list > $LOG_FOLDER/voltha-devices-list.txt || true
-
-          printf '%s\n' $(voltctl -m 32MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB device flows # > $LOG_FOLDER/voltha-device-flows-#.txt" || true
-              printf '%s\n' $(voltctl -m 32MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/voltha-device-ports-#.txt" || true
-
-          printf '%s\n' $(voltctl -m 32MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB logicaldevice flows # > $LOG_FOLDER/voltha-logicaldevice-flows-#.txt" || true
-          printf '%s\n' $(voltctl -m 32MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 32MB logicaldevice port list # > $LOG_FOLDER/voltha-logicaldevice-ports-#.txt" || true
-          '''
-        } catch(e) {
-          sh '''
-          echo "Can't get device list from voltclt"
-          '''
-        }
-      }
-      // get cpu usage by container
-      sh '''
-      if [ ${withMonitoring} = true ] ; then
-        cd $WORKSPACE/voltha-system-tests
-        make vst_venv
-        source ./vst_venv/bin/activate || true
-        sleep 60 # we have to wait for prometheus to collect all the information
-        python scripts/sizing.py -o $WORKSPACE/plots || true
-      fi
-      '''
-      archiveArtifacts artifacts: 'execution-time.txt,logs/*,logs/pprof/*,RobotLogs/**/*,plots/*,etcd-metrics/*'
-    }
-  }
-}
-
-def start_port_forward(olts) {
-  sh """
-  bbsimRestPortFwd=50071
-  for i in {0..${olts.toInteger() - 1}}; do
-    daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward --address 0.0.0.0 -n default svc/bbsim\${i} \${bbsimRestPortFwd}:50071
-    ((bbsimRestPortFwd++))
-  done
-  """
-}
diff --git a/jjb/pipeline/voltha/voltha-2.12/voltha-tt-physical-functional-tests.groovy b/jjb/pipeline/voltha/voltha-2.12/voltha-tt-physical-functional-tests.groovy
deleted file mode 100644
index 1eacba9..0000000
--- a/jjb/pipeline/voltha/voltha-2.12/voltha-tt-physical-functional-tests.groovy
+++ /dev/null
@@ -1,265 +0,0 @@
-// -*- groovy -*-
-// -----------------------------------------------------------------------
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// -----------------------------------------------------------------------
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-node {
-  // Need this so that deployment_config has global scope when it's read later
-  deployment_config = null
-}
-
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: "${timeout}", unit: 'MINUTES')
-  }
-
-  environment {
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-    VOLTCONFIG="$HOME/.volt/config-minimal"
-    PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-  }
-
-  stages {
-    stage('Clone voltha-system-tests') {
-      steps {
-        step([$class: 'WsCleanup'])
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/voltha-system-tests",
-            refspec: "${volthaSystemTestsChange}"
-          ]],
-          branches: [[ name: "${branch}", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-        script {
-          sh(script:"""
-            if [ '${volthaSystemTestsChange}' != '' ] ; then
-              cd $WORKSPACE/voltha-system-tests;
-              git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
-            fi
-            """)
-        }
-      }
-    }
-    // This checkout allows us to show changes in Jenkins
-    // we only do this on master as we don't branch all the repos for all the releases
-    // (we should compute the difference by tracking the container version, not the code)
-    stage('Download All the VOLTHA repos') {
-      when {
-        expression {
-          return "${branch}" == 'master';
-        }
-      }
-      steps {
-       checkout(changelog: true,
-         poll: false,
-         scm: [$class: 'RepoScm',
-           manifestRepositoryUrl: "${params.manifestUrl}",
-           manifestBranch: "${params.branch}",
-           currentBranch: true,
-           destinationDir: 'voltha',
-           forceSync: true,
-           resetFirst: true,
-           quiet: true,
-           jobs: 4,
-           showAllChanges: true]
-         )
-      }
-    }
-    stage ('Initialize')
-    {
-        steps
-	{
-	    sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-	    script
-	    {
-		deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-	    }
-		
-        installVoltctl("${branch}")
-
-        sh returnStdout: false, script: """
-        mkdir -p "$WORKSPACE/bin"
-
-        # install kail
-        make -C "$WORKSPACE/voltha-system-tests" KAIL_PATH="$WORKSPACE/bin" kail
-
-        # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
-        # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
-        # We should change this. In the meantime here is a workaround.
-        if [ "${params.branch}" == "master" ]; then
-           set +e
-
-
-        # Remove noise from voltha-core logs
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        # Remove noise from openolt logs
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        fi
-        """
-      }
-    }
-
-    stage('Functional Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-        ROBOT_FILE="Voltha_TT_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/FunctionalTests"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if ( ${powerSwitch} ); then
-             export ROBOT_MISC_ARGS="--removekeywords wuks -i functionalTT -i PowerSwitch -i sanityTT -i sanityTT-MCAST -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-             if ( ${powerCycleOlt} ); then
-                  ROBOT_MISC_ARGS+=" -v power_cycle_olt:True"
-             fi
-        else
-             export ROBOT_MISC_ARGS="--removekeywords wuks -i functionalTT -e PowerSwitch -i sanityTT -i sanityTT-MCAST -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        fi
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
-        """
-      }
-    }
-
-    stage('Failure/Recovery Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-        ROBOT_FILE="Voltha_TT_FailureScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/FailureScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.enableMultiUni} = false ]; then
-          if ( ${powerSwitch} ); then
-               export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          else
-               export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          fi
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Multi-Tcont Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-        ROBOT_FILE="Voltha_TT_MultiTcontTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/MultiTcontScenarios"
-        ROBOT_TEST_INPUT_FILE="$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-TT-multi-tcont-tests-input.yaml"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.enableMultiUni} = false ]; then
-          if ( ${powerSwitch} ); then
-            export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel} -V $ROBOT_TEST_INPUT_FILE"
-          else
-            export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel} -V $ROBOT_TEST_INPUT_FILE"
-          fi
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Multicast Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-        ROBOT_FILE="Voltha_TT_MulticastTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/MulticastTests"
-        ROBOT_TEST_INPUT_FILE="$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-TT-multicast-tests-input.yaml"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.enableMultiUni} = true ]; then
-          if ( ${powerSwitch} ); then
-             export ROBOT_MISC_ARGS="--removekeywords wuks -i multicastTT -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel} -V $ROBOT_TEST_INPUT_FILE"
-          else
-             export ROBOT_MISC_ARGS="--removekeywords wuks -i multicastTT -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel} -V $ROBOT_TEST_INPUT_FILE"
-          fi
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
-        fi
-        """
-      }
-    }
-
-  }
-  post {
-    always {
-      getPodsInfo("$WORKSPACE/pods")
-      sh returnStdout: false, script: '''
-      set +e
-
-      # collect logs collected in the Robot Framework StartLogging keyword
-      cd $WORKSPACE
-      gzip *-combined.log || true
-      rm *-combined.log || true
-      '''
-      script {
-        deployment_config.olts.each { olt ->
-          if (olt.type == null || olt.type == "" || olt.type == "openolt") {
-             sh returnStdout: false, script: """
-             sshpass -p ${olt.pass} scp ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
-             sshpass -p ${olt.pass} scp ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
-             sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log  # Remove escape sequences
-             sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log  # Remove escape sequences
-             """
-          }
-        }
-      }
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: '**/log*.html',
-        otherFiles: '',
-        outputFileName: '**/output*.xml',
-        outputPath: 'RobotLogs',
-        passThreshold: 100,
-        reportFileName: '**/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true
-        ]);
-      archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.8/bbsim-tests.groovy b/jjb/pipeline/voltha/voltha-2.8/bbsim-tests.groovy
deleted file mode 100755
index 2dad953..0000000
--- a/jjb/pipeline/voltha/voltha-2.8/bbsim-tests.groovy
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// voltha-2.x e2e tests for openonu-go
-// uses bbsim to simulate OLT/ONUs
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-def clusterName = "kind-ci"
-
-def execute_test(testTarget, workflow, testLogging, teardown, testSpecificHelmFlags = "") {
-    def infraNamespace = "default"
-    def volthaNamespace = "voltha"
-    def logsDir = "$WORKSPACE/${testTarget}"
-
-    stage('IAM')
-    {
-	script
-	{
-	    String iam = [
-		'ci-management',
-		'jjb',
-		'pipeline',
-		'voltha',
-		'voltha-2.8',
-		'bbsim-tests.groovy'
-	    ].join('/')
-            println("** ${iam}: ENTER")
-            println("** ${iam}: LEAVE")
-	}
-    }
-
-    stage('Cleanup') {
-    if (teardown) {
-      timeout(15) {
-        script {
-          helmTeardown(["default", infraNamespace, volthaNamespace])
-        }
-        timeout(1) {
-          sh returnStdout: false, script: '''
-          # remove orphaned port-forward from different namespaces
-          ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-          '''
-        }
-      }
-    }
-  }
-  stage('Deploy Voltha') {
-    if (teardown) {
-      timeout(10) {
-        script {
-
-          sh """
-          mkdir -p ${logsDir}
-          _TAG=kail-startup kail -n ${infraNamespace} -n ${volthaNamespace} > ${logsDir}/onos-voltha-startup-combined.log &
-          """
-
-          // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
-          def localCharts = false
-          if (gerritProject == "voltha-helm-charts" || branch != "master") {
-            localCharts = true
-          }
-
-          // NOTE temporary workaround expose ONOS node ports
-          def localHelmFlags = extraHelmFlags.trim() + " --set global.log_level=${logLevel.toUpperCase()} " +
-          " --set onos-classic.onosSshPort=30115 " +
-          " --set onos-classic.onosApiPort=30120 " +
-          " --set onos-classic.onosOfPort=31653 " +
-          " --set onos-classic.individualOpenFlowNodePorts=true " + testSpecificHelmFlags
-
-          if (gerritProject != "") {
-            localHelmFlags = "${localHelmFlags} " + getVolthaImageFlags("${gerritProject}")
-          }
-
-          volthaDeploy([
-            infraNamespace: infraNamespace,
-            volthaNamespace: volthaNamespace,
-            workflow: workflow.toLowerCase(),
-            extraHelmFlags: localHelmFlags,
-            localCharts: localCharts,
-            bbsimReplica: olts.toInteger(),
-            dockerRegistry: registry,
-            ])
-        }
-
-        // stop logging
-        sh """
-          P_IDS="\$(ps e -ww -A | grep "_TAG=kail-startup" | grep -v grep | awk '{print \$1}')"
-          if [ -n "\$P_IDS" ]; then
-            echo \$P_IDS
-            for P_ID in \$P_IDS; do
-              kill -9 \$P_ID
-            done
-          fi
-          cd ${logsDir}
-          gzip -k onos-voltha-startup-combined.log
-          rm onos-voltha-startup-combined.log
-        """
-      }
-      sh """
-      JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
-      JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-infra-etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd 2379:2379; done"&
-      JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-infra-kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
-      bbsimDmiPortFwd=50075
-      for i in {0..${olts.toInteger() - 1}}; do
-        JENKINS_NODE_COOKIE="dontKillMe" _TAG="bbsim\${i}" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/bbsim\${i} \${bbsimDmiPortFwd}:50075; done"&
-        ((bbsimDmiPortFwd++))
-      done
-      ps aux | grep port-forward
-      """
-    }
-  }
-  stage('Run test ' + testTarget + ' on ' + workflow + ' workFlow') {
-    sh """
-    mkdir -p ${logsDir}
-    export ROBOT_MISC_ARGS="-d ${logsDir} "
-    ROBOT_MISC_ARGS+="-v ONOS_SSH_PORT:30115 -v ONOS_REST_PORT:30120 -v INFRA_NAMESPACE:${infraNamespace} -v container_log_dir:${logsDir} -v logging:${testLogging}"
-    export KVSTOREPREFIX=voltha/voltha_voltha
-
-    make -C $WORKSPACE/voltha-system-tests ${testTarget} || true
-    """
-    getPodsInfo("${logsDir}")
-    sh """
-      set +e
-      # collect logs collected in the Robot Framework StartLogging keyword
-      cd ${logsDir}
-      gzip *-combined.log || true
-      rm *-combined.log || true
-    """
-  }
-}
-
-def collectArtifacts(exitStatus) {
-  getPodsInfo("$WORKSPACE/${exitStatus}")
-  sh """
-  kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/${exitStatus}/voltha.log || true
-  """
-  archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.txt,**/*.html'
-  sh '''
-    sync
-    pkill kail || true
-    which voltctl
-    md5sum $(which voltctl)
-  '''
-  step([$class: 'RobotPublisher',
-    disableArchiveOutput: false,
-    logFileName: "**/*/log*.html",
-    otherFiles: '',
-    outputFileName: "**/*/output*.xml",
-    outputPath: '.',
-    passThreshold: 100,
-    reportFileName: "**/*/report*.html",
-    unstableThreshold: 0,
-    onlyCritical: true]);
-}
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: "${timeout}", unit: 'MINUTES')
-  }
-  environment {
-    KUBECONFIG="$HOME/.kube/kind-${clusterName}"
-    VOLTCONFIG="$HOME/.volt/config"
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-    ROBOT_MISC_ARGS="-e PowerSwitch ${params.extraRobotArgs}"
-    DIAGS_PROFILE="VOLTHA_PROFILE"
-  }
-  stages {
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${branch}",
-          gerritProject: "${gerritProject}",
-          gerritRefspec: "${gerritRefspec}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage('Build patch') {
-      // build the patch only if gerritProject is specified
-      when {
-        expression {
-          return !gerritProject.isEmpty()
-        }
-      }
-      steps {
-        // NOTE that the correct patch has already been checked out
-        // during the getVolthaCode step
-        buildVolthaComponent("${gerritProject}")
-      }
-    }
-    stage('Create K8s Cluster') {
-      steps {
-        script {
-          def clusterExists = sh returnStdout: true, script: """
-          kind get clusters | grep ${clusterName} | wc -l
-          """
-          if (clusterExists.trim() == "0") {
-            createKubernetesCluster([branch: "${branch}", nodes: 3, name: clusterName])
-          }
-        }
-      }
-    }
-    stage('Replace voltctl') {
-      // if the project is voltctl override the downloaded one with the built one
-      when {
-        expression {
-          return gerritProject == "voltctl"
-        }
-      }
-      steps{
-        sh """
-        mv `ls $WORKSPACE/voltctl/release/voltctl-*-linux-amd*` $WORKSPACE/bin/voltctl
-        chmod +x $WORKSPACE/bin/voltctl
-        """
-      }
-    }
-    stage('Load image in kind nodes') {
-      when {
-        expression {
-          return !gerritProject.isEmpty()
-        }
-      }
-      steps {
-        loadToKind()
-      }
-    }
-    stage('Parse and execute tests') {
-        steps {
-          script {
-            def tests = readYaml text: testTargets
-
-            for(int i = 0;i<tests.size();i++) {
-              def test = tests[i]
-              def target = test["target"]
-              def workflow = test["workflow"]
-              def flags = test["flags"]
-              def teardown = test["teardown"].toBoolean()
-              def logging = test["logging"].toBoolean()
-              def testLogging = 'False'
-              if (logging) {
-                  testLogging = 'True'
-              }
-              println "Executing test ${target} on workflow ${workflow} with logging ${testLogging} and extra flags ${flags}"
-              execute_test(target, workflow, testLogging, teardown, flags)
-            }
-          }
-        }
-    }
-  }
-  post {
-    aborted {
-      collectArtifacts("aborted")
-    }
-    failure {
-      collectArtifacts("failed")
-    }
-    always {
-      collectArtifacts("always")
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.8/device-management-mock-tests.groovy b/jjb/pipeline/voltha/voltha-2.8/device-management-mock-tests.groovy
deleted file mode 100644
index 4636824..0000000
--- a/jjb/pipeline/voltha/voltha-2.8/device-management-mock-tests.groovy
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-def localCharts = false
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: 90, unit: 'MINUTES')
-  }
-  environment {
-    KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-    ROBOT_MISC_ARGS="-d $WORKSPACE/RobotLogs"
-  }
-
-  stages {
-
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${branch}",
-          gerritProject: "${gerritProject}",
-          gerritRefspec: "${gerritRefspec}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage('Build Redfish Importer Image') {
-      steps {
-        sh """
-           make -C $WORKSPACE/device-management/\$1 DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build-importer
-           """
-      }
-    }
-    stage('Build demo_test Image') {
-      steps {
-        sh """
-           make -C $WORKSPACE/device-management/\$1/demo_test DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build
-           """
-      }
-    }
-    stage('Build mock-redfish-server  Image') {
-      steps {
-        sh """
-           make -C $WORKSPACE/device-management/\$1/mock-redfish-server DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build
-           """
-      }
-    }
-    stage('Create K8s Cluster') {
-      steps {
-        createKubernetesCluster([branch: "${branch}", nodes: 3])
-      }
-    }
-    stage('Load image in kind nodes') {
-      steps {
-        loadToKind()
-      }
-    }
-    stage('Deploy Voltha') {
-      steps {
-        script {
-          if (branch != "master" || volthaHelmChartsChange != "") {
-            // if we're using a release or testing changes in the charts, then use the local clone
-            localCharts = true
-          }
-        }
-        volthaDeploy([
-          workflow: "att",
-          extraHelmFlags: extraHelmFlags,
-          dockerRegistry: "mirror.registry.opennetworking.org",
-          localCharts: localCharts,
-        ])
-        // start logging
-        sh """
-        mkdir -p $WORKSPACE/att
-        _TAG=kail-att kail -n infra -n voltha -n default > $WORKSPACE/att/onos-voltha-combined.log &
-        """
-        // forward ONOS and VOLTHA ports
-        sh """
-        _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8101:8101&
-        _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8181:8181&
-        _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha svc/voltha-voltha-api 55555:55555&
-        """
-      }
-    }
-
-    stage('Run E2E Tests') {
-      steps {
-        sh '''
-           mkdir -p $WORKSPACE/RobotLogs
-
-           # tell the kubernetes script to use images tagged citest and pullPolicy:Never
-           sed -i 's/master/citest/g' $WORKSPACE/device-management/kubernetes/deploy*.yaml
-           sed -i 's/imagePullPolicy: Always/imagePullPolicy: Never/g' $WORKSPACE/device-management/kubernetes/deploy*.yaml
-           make -C $WORKSPACE/device-management functional-mock-test || true
-           '''
-      }
-    }
-  }
-
-  post {
-    always {
-      sh '''
-         set +e
-         kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
-         kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
-         kubectl get nodes -o wide
-         kubectl get pods -o wide --all-namespaces
-
-         sync
-         pkill kail || true
-
-         ## Pull out errors from log files
-         extract_errors_go() {
-           echo
-           echo "Error summary for $1:"
-           grep $1 $WORKSPACE/att/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
-           echo
-         }
-
-         extract_errors_python() {
-           echo
-           echo "Error summary for $1:"
-           grep $1 $WORKSPACE/att/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
-           echo
-         }
-
-         extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
-         extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
-         extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
-         extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
-
-         gzip $WORKSPACE/att/onos-voltha-combined.log
-         '''
-         step([$class: 'RobotPublisher',
-            disableArchiveOutput: false,
-            logFileName: 'RobotLogs/log*.html',
-            otherFiles: '',
-            outputFileName: 'RobotLogs/output*.xml',
-            outputPath: '.',
-            passThreshold: 80,
-            reportFileName: 'RobotLogs/report*.html',
-            unstableThreshold: 0]);
-         archiveArtifacts artifacts: '**/*.log,**/*.gz'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.8/physical-build.groovy b/jjb/pipeline/voltha/voltha-2.8/physical-build.groovy
deleted file mode 100644
index 80df9d5..0000000
--- a/jjb/pipeline/voltha/voltha-2.8/physical-build.groovy
+++ /dev/null
@@ -1,423 +0,0 @@
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// used to deploy VOLTHA and configure ONOS physical PODs
-
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-// -----------------------------------------------------------------------
-// -----------------------------------------------------------------------
-def getIam(String func)
-{
-    // Cannot rely on a stack trace due to jenkins manipulation
-    String src = 'jjb/pipeline/voltha-2.8/physical-build.groovy'
-    String iam = [src, func].join('::')
-    return iam
-}
-
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-
-// -----------------------------------------------------------------------
-// -----------------------------------------------------------------------
-def deploy_custom_oltAdapterChart(namespace, name, chart, extraHelmFlags) {
-    String iam = getIam('deploy_custom_oltAdapterChart')
-    println("** ${iam}: ENTER")
-
-    sh """
-    helm install --create-namespace --set defaults.image_pullPolicy=Always --namespace ${namespace} ${extraHelmFlags} ${name} ${chart}
-   """
-
-    println("** ${iam}: LEAVE")
-    return
-}
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: 35, unit: 'MINUTES')
-  }
-  environment {
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-  }
-
-  stages{
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${branch}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage ("Parse deployment configuration file") {
-      steps {
-        sh returnStdout: true, script: "rm -rf ${configBaseDir}"
-        sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-          if ( params.workFlow == "DT" ) {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-          }
-          else if ( params.workFlow == "TT" )
-          {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-          }
-          else
-          {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-          }
-        }
-      }
-    }
-    stage('Clean up') {
-      steps {
-        timeout(15) {
-          script {
-            helmTeardown(["default", infraNamespace, volthaNamespace])
-          }
-          timeout(1) {
-            sh returnStdout: false, script: '''
-            # remove orphaned port-forward from different namespaces
-            ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-            '''
-          }
-        }
-      }
-    }
-    stage('Install Voltha')  {
-      steps {
-        timeout(20) {
-          installVoltctl("$branch")
-          script {
-            // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
-            def localCharts = false
-            if (volthaHelmChartsChange != "" || branch != "master") {
-              localCharts = true
-            }
-
-            // should the config file be suffixed with the workflow? see "deployment_config"
-            def localHelmFlags = "-f $WORKSPACE/${configBaseDir}/${configKubernetesDir}/voltha/${configFileName}.yml --set global.log_level=${logLevel} "
-
-            if (workFlow.toLowerCase() == "dt") {
-              localHelmFlags += " --set radius.enabled=false "
-            }
-            if (workFlow.toLowerCase() == "tt") {
-              localHelmFlags += " --set radius.enabled=false --set global.incremental_evto_update=true "
-                if (enableMultiUni.toBoolean()) {
-                    localHelmFlags += " --set voltha-adapter-openonu.adapter_open_onu.uni_port_mask=${uniPortMask} "
-                }
-            }
-
-            // NOTE temporary workaround expose ONOS node ports (pod-config needs to be updated to contain these values)
-            // and to connect the ofagent to all instances of ONOS
-            localHelmFlags = localHelmFlags + " --set onos-classic.onosSshPort=30115 " +
-            "--set onos-classic.onosApiPort=30120 " +
-            "--set onos-classic.onosOfPort=31653 " +
-            "--set onos-classic.individualOpenFlowNodePorts=true " +
-            "--set voltha.onos_classic.replicas=${params.NumOfOnos}"
-
-            if (bbsimReplicas.toInteger() != 0) {
-              localHelmFlags = localHelmFlags + " --set onu=${onuNumber},pon=${ponNumber} "
-            }
-
-            // adding user specified helm flags at the end so they'll have priority over everything else
-            localHelmFlags = localHelmFlags + " ${extraHelmFlags}"
-
-            // in VOLTHA-2.8 there is no need to wait for the adapters
-            def numberOfAdaptersToWait = 0
-
-            volthaDeploy([
-              workflow: workFlow.toLowerCase(),
-              extraHelmFlags: localHelmFlags,
-              localCharts: localCharts,
-              kubeconfig: "$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf",
-              onosReplica: params.NumOfOnos,
-              atomixReplica: params.NumOfAtomix,
-              kafkaReplica: params.NumOfKafka,
-              etcdReplica: params.NumOfEtcd,
-              bbsimReplica: bbsimReplicas.toInteger(),
-              adaptersToWait: numberOfAdaptersToWait,
-              ])
-
-            if(openoltAdapterChart != "onf/voltha-adapter-openolt"){
-              extraHelmFlags = extraHelmFlags + " --set global.log_level=${logLevel}"
-              deploy_custom_oltAdapterChart(volthaNamespace, oltAdapterReleaseName, openoltAdapterChart, extraHelmFlags)
-              waitForAdapters([
-                adaptersToWait: 0 // in VOLTHA-2.8 there is no need to wait for the adapters
-              ])
-            }
-          }
-          sh """
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd ${params.VolthaEtcdPort}:2379; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG="kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
-          ps aux | grep port-forward
-          """
-          getPodsInfo("$WORKSPACE")
-        }
-      }
-    }
-    stage('Push Tech-Profile') {
-      steps {
-        script {
-          if ( params.configurePod && params.profile != "Default" ) {
-            for(int i=0; i < deployment_config.olts.size(); i++) {
-              def tech_prof_directory = "XGS-PON"
-              if (deployment_config.olts[i].containsKey("board_technology")){
-                tech_prof_directory = deployment_config.olts[i]["board_technology"]
-              }
-              timeout(1) {
-                sh returnStatus: true, script: """
-                export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
-                etcd_container=\$(kubectl get pods -n ${infraNamespace} | grep etcd | awk 'NR==1{print \$1}')
-                if [[ "${workFlow}" == "TT" ]]; then
-                   if [[ "${params.enableMultiUni}" == "true" ]]; then
-                      kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-HSIA.json \$etcd_container:/tmp/hsia.json
-                      kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64'
-                      kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-VoIP.json \$etcd_container:/tmp/voip.json
-                      kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65'
-                      kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-MCAST-AdditionalBW-None.json \$etcd_container:/tmp/mcast_additionalBW_none.json
-                      kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_none.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66'
-                      kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-MCAST-AdditionalBW-NA.json \$etcd_container:/tmp/mcast_additionalBW_na.json
-                      kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_na.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/67'
-                   else
-                      kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-HSIA.json \$etcd_container:/tmp/hsia.json
-                      kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64'
-                      kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-VoIP.json \$etcd_container:/tmp/voip.json
-                      kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65'
-                      kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST-AdditionalBW-None.json \$etcd_container:/tmp/mcast_additionalBW_none.json
-                      kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_none.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66'
-                      kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST-AdditionalBW-NA.json \$etcd_container:/tmp/mcast_additionalBW_na.json
-                      kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_na.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/67'
-                   fi
-                else
-                   kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-${profile}.json \$etcd_container:/tmp/flexpod.json
-                   kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64'
-                fi
-                """
-              }
-              timeout(1) {
-                sh returnStatus: true, script: """
-                export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
-                etcd_container=\$(kubectl get pods -n ${infraNamespace} | grep etcd | awk 'NR==1{print \$1}')
-                kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'ETCDCTL_API=3 etcdctl get --prefix service/voltha/technology_profiles/${tech_prof_directory}/64'
-                """
-              }
-            }
-          }
-        }
-      }
-    }
-    stage('Push MIB templates') {
-      steps {
-        sh """
-        export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
-        etcd_container=\$(kubectl get pods -n ${infraNamespace} | grep etcd | awk 'NR==1{print \$1}')
-        kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/MIB_Alpha.json \$etcd_container:/tmp/MIB_Alpha.json
-        kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Alpha.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/go_templates/BRCM/BVM4K00BRA0915-0083/5023_020O02414'
-        kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Alpha.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/templates/BRCM/BVM4K00BRA0915-0083/5023_020O02414'
-        kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/MIB_Scom.json \$etcd_container:/tmp/MIB_Scom.json
-        kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Scom.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/go_templates/SCOM/Glasfaser-Modem/090140.1.0.304'
-        kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Scom.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/templates/SCOM/Glasfaser-Modem/090140.1.0.304'
-        """
-      }
-    }
-    stage('Push Sadis-config') {
-      steps {
-        timeout(1) {
-          sh returnStatus: true, script: """
-          if [[ "${workFlow}" == "DT" ]]; then
-            curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-DT.json
-          elif [[ "${workFlow}" == "TT" ]]; then
-            curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-TT.json
-          else
-            # this is the ATT case, rename the file in *-sadis-ATT.json so that we can avoid special cases and just load the file
-            curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis.json
-          fi
-          """
-        }
-      }
-    }
-    stage('Switch Configurations in ONOS') {
-      steps {
-        script {
-          if ( deployment_config.fabric_switches.size() > 0 ) {
-            timeout(1) {
-              def netcfg = "$WORKSPACE/${configBaseDir}/${configToscaDir}/voltha/${configFileName}-onos-netcfg-switch.json"
-              if (params.inBandManagement){
-                netcfg = "$WORKSPACE/${configBaseDir}/${configToscaDir}/voltha/${configFileName}-onos-netcfg-switch-inband.json"
-              }
-              sh """
-              curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @${netcfg}
-              curl -sSL --user karaf:karaf -X POST http://${deployment_config.nodes[0].ip}:30120/onos/v1/applications/org.onosproject.segmentrouting/active
-              """
-            }
-            timeout(1) {
-              waitUntil {
-                sr_active_out = sh returnStatus: true, script: """
-                sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.dhcpl2relay"
-                sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.aaa"
-                sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.olt"
-                #TRACE in the pipeliner is too chatty, moving to DEBUG
-                sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.opencord.olt.driver"
-                sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.FlowObjectiveManager"
-                sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager"
-                curl -sSL --user karaf:karaf -X GET http://${deployment_config.nodes[0].ip}:30120/onos/v1/applications/org.onosproject.segmentrouting | jq '.state' | grep ACTIVE
-                sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled false"
-                sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "cfg set org.onosproject.net.flow.impl.FlowRuleManager purgeOnDisconnection false"
-                sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "cfg set org.onosproject.net.meter.impl.MeterManager purgeOnDisconnection false"
-                """
-                return sr_active_out == 0
-              }
-            }
-            timeout(7) {
-              for(int i=0; i < deployment_config.hosts.src.size(); i++) {
-                for(int j=0; j < deployment_config.olts.size(); j++) {
-                  def aggPort = -1
-                  if(deployment_config.olts[j].serial == deployment_config.hosts.src[i].olt){
-                      aggPort = deployment_config.olts[j].aggPort
-                      if(aggPort == -1){
-                        throw new Exception("Upstream port for the olt is not configured, field aggPort is empty")
-                      }
-                      sh """
-                      sleep 30 # NOTE why are we sleeping?
-                      curl -X POST --user karaf:karaf --header 'Content-Type: application/json' --header 'Accept: application/json' -d '{"deviceId": "${deployment_config.fabric_switches[0].device_id}", "vlanId": "${deployment_config.hosts.src[i].s_tag}", "endpoints": [${deployment_config.fabric_switches[0].bngPort},${aggPort}]}' 'http://${deployment_config.nodes[0].ip}:30120/onos/segmentrouting/xconnect'
-                      """
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-    stage('Reinstall OLT software') {
-      steps {
-        script {
-          if ( params.reinstallOlt ) {
-            for(int i=0; i < deployment_config.olts.size(); i++) {
-              // NOTE what is oltDebVersion23? is that for VOLTHA-2.3? do we still need this differentiation?
-              sh returnStdout: true, script: """
-              ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
-              if [ "${params.inBandManagement}" == "true" ]; then
-                sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'kill -9 `pgrep -f "[b]ash /opt/openolt/openolt_dev_mgmt_daemon_process_watchdog"` || true'
-              fi
-              sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} "dpkg --install ${deployment_config.olts[i].oltDebVersion}"
-              sleep 10
-              """
-              timeout(5) {
-                waitUntil {
-                  olt_sw_present = sh returnStdout: true, script: """
-                  if [[ "${deployment_config.olts[i].oltDebVersion}" == *"asfvolt16"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep asfvolt16 | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"asgvolt64"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep asgvolt64 | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-1600x-w"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-1600x-w | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-1600g-w"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-1600g-w | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-3200g-w"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-3200g-w | wc -l'
-                  elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"sda3016ss"* ]]; then
-                    sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep sda3016ss | wc -l'
-                  else
-                    echo Unknown Debian package for openolt
-                  fi
-                  if (${deployment_config.olts[i].fortygig}); then
-                    if [[ "${params.inBandManagement}" == "true" ]]; then
-                      ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'mkdir -p /opt/openolt/'
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp /root/watchdog-script/* /opt/openolt/'
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp /root/bal_cli_appl/example_user_appl /broadcom'
-                      sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp in-band-startup-script/* /etc/init.d/'
-                    fi
-                  fi
-                  """
-                  return olt_sw_present.toInteger() > 0
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-    stage('Restart OLT processes') {
-      steps {
-        script {
-          //rebooting OLTs
-          for(int i=0; i < deployment_config.olts.size(); i++) {
-            timeout(15) {
-              sh returnStdout: true, script: """
-              ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
-              sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; rm -f /var/log/openolt_process_watchdog.log; reboot > /dev/null &' || true
-              """
-            }
-          }
-          sh returnStdout: true, script: """
-          sleep ${params.waitTimerForOltUp}
-          """
-          //Checking dev_management_deamon and openoltprocesses
-          for(int i=0; i < deployment_config.olts.size(); i++) {
-            if ( params.oltAdapterReleaseName != "open-olt" ) {
-              timeout(15) {
-                waitUntil {
-                  devprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep dev_mgmt_daemon | wc -l'"
-                  return devprocess.toInteger() > 0
-                }
-              }
-              timeout(15) {
-                waitUntil {
-                  openoltprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep openolt | wc -l'"
-                  return openoltprocess.toInteger() > 0
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-  }
-
-  post {
-    aborted {
-      getPodsInfo("$WORKSPACE/failed")
-      sh """
-      kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.log || true
-      """
-      archiveArtifacts artifacts: '**/*.log,**/*.txt'
-    }
-    failure {
-      getPodsInfo("$WORKSPACE/failed")
-      sh """
-      kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.logs || true
-      """
-      archiveArtifacts artifacts: '**/*.log,**/*.txt'
-    }
-    always {
-      archiveArtifacts artifacts: '*.txt'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.8/software-upgrades.groovy b/jjb/pipeline/voltha/voltha-2.8/software-upgrades.groovy
deleted file mode 100755
index c362b4d..0000000
--- a/jjb/pipeline/voltha/voltha-2.8/software-upgrades.groovy
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// voltha-2.x e2e tests
-// uses bbsim to simulate OLT/ONUs
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-def test_software_upgrade(name) {
-  def infraNamespace = "infra"
-  def volthaNamespace = "voltha"
-  def logsDir = "$WORKSPACE/${name}"
-  stage('Deploy Voltha - '+ name) {
-    timeout(10) {
-      // start logging
-      sh """
-      rm -rf ${logsDir} || true
-      mkdir -p ${logsDir}
-      _TAG=kail-${name} kail -n ${infraNamespace} -n ${volthaNamespace} > ${logsDir}/onos-voltha-startup-combined.log &
-      """
-      def extraHelmFlags = extraHelmFlags.trim()
-      if ("${name}" == "onos-app-upgrade" || "${name}" == "onu-software-upgrade" || "${name}" == "voltha-component-upgrade" || "${name}" == "voltha-component-rolling-upgrade") {
-          extraHelmFlags = " --set global.log_level=${logLevel.toUpperCase()},onu=1,pon=1 --set onos-classic.replicas=3,onos-classic.atomix.replicas=3 " + extraHelmFlags
-      }
-      if ("${name}" == "onu-image-dwl-simultaneously") {
-          extraHelmFlags = " --set global.log_level=${logLevel.toUpperCase()},onu=2,pon=2 --set onos-classic.replicas=3,onos-classic.atomix.replicas=3 " + extraHelmFlags
-      }
-
-      extraHelmFlags = " --set onos-classic.onosSshPort=30115 --set onos-classic.onosApiPort=30120 " + extraHelmFlags
-      extraHelmFlags = " --set voltha.onos_classic.replicas=3 " + extraHelmFlags
-      //ONOS custom image handling
-      if ( onosImg.trim() != '' ) {
-         String[] split;
-         onosImg = onosImg.trim()
-         split = onosImg.split(':')
-        extraHelmFlags = extraHelmFlags + " --set onos-classic.image.repository=" + split[0] +",onos-classic.image.tag=" + split[1] + " "
-      }
-      def olts = 1
-      if ("${name}" == "onu-image-dwl-simultaneously") {
-          olts = 2
-      }
-      def localCharts = false
-      if (branch != "master") {
-         localCharts = true
-      }
-      // Currently only testing with ATT workflow
-      // TODO: Support for other workflows
-      volthaDeploy([bbsimReplica: olts.toInteger(), workflow: "att", extraHelmFlags: extraHelmFlags, localCharts: localCharts])
-      // stop logging
-      sh """
-        P_IDS="\$(ps e -ww -A | grep "_TAG=kail-${name}" | grep -v grep | awk '{print \$1}')"
-        if [ -n "\$P_IDS" ]; then
-          echo \$P_IDS
-          for P_ID in \$P_IDS; do
-            kill -9 \$P_ID
-          done
-        fi
-        cd ${logsDir}
-        gzip -k onos-voltha-startup-combined.log
-        rm onos-voltha-startup-combined.log
-      """
-      // forward ONOS and VOLTHA ports
-      sh """
-      _TAG=onos-port-forward bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8101:8101; done &"
-      _TAG=onos-port-forward bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8181:8181; done &"
-      _TAG=port-forward-voltha-api bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n voltha svc/voltha-voltha-api 55555:55555; done &"
-      """
-      sh """
-      sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 log:set DEBUG org.opencord
-      """
-    }
-  }
-  stage('Test - '+ name) {
-    timeout(60) {
-      sh """
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/${name}"
-        mkdir -p \$ROBOT_LOGS_DIR
-        if [[ ${name} == 'onos-app-upgrade' ]]; then
-          export ONOS_APPS_UNDER_TEST+=''
-          if [ ${aaaVer.trim()} != '' ] && [ ${aaaOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.aaa,${aaaVer.trim()},${aaaOarUrl.trim()}*"
-          fi
-          if [ ${oltVer.trim()} != '' ] && [ ${oltOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.olt,${oltVer.trim()},${oltOarUrl.trim()}*"
-          fi
-          if [ ${dhcpl2relayVer.trim()} != '' ] && [ ${dhcpl2relayOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.dhcpl2relay,${dhcpl2relayVer.trim()},${dhcpl2relayOarUrl.trim()}*"
-          fi
-          if [ ${igmpproxyVer.trim()} != '' ] && [ ${igmpproxyOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.igmpproxy,${igmpproxyVer.trim()},${igmpproxyOarUrl.trim()}*"
-          fi
-          if [ ${sadisVer.trim()} != '' ] && [ ${sadisOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.sadis,${sadisVer.trim()},${sadisOarUrl.trim()}*"
-          fi
-          if [ ${mcastVer.trim()} != '' ] && [ ${mcastOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.mcast,${mcastVer.trim()},${mcastOarUrl.trim()}*"
-          fi
-          if [ ${kafkaVer.trim()} != '' ] && [ ${kafkaOarUrl.trim()} != '' ]; then
-            ONOS_APPS_UNDER_TEST+="org.opencord.kafka,${kafkaVer.trim()},${kafkaOarUrl.trim()}*"
-          fi
-          export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v onos_apps_under_test:\$ONOS_APPS_UNDER_TEST -e PowerSwitch"
-          export TARGET=onos-app-upgrade-test
-        fi
-        if [[ ${name} == 'voltha-component-upgrade' ]]; then
-          export VOLTHA_COMPS_UNDER_TEST+=''
-          if [ ${adapterOpenOltImage.trim()} != '' ]; then
-            VOLTHA_COMPS_UNDER_TEST+="adapter-open-olt,adapter-open-olt,${adapterOpenOltImage.trim()}*"
-          fi
-          if [ ${adapterOpenOnuImage.trim()} != '' ]; then
-            VOLTHA_COMPS_UNDER_TEST+="adapter-open-onu,adapter-open-onu,${adapterOpenOnuImage.trim()}*"
-          fi
-          if [ ${rwCoreImage.trim()} != '' ]; then
-            VOLTHA_COMPS_UNDER_TEST+="rw-core,voltha,${rwCoreImage.trim()}*"
-          fi
-          if [ ${ofAgentImage.trim()} != '' ]; then
-            VOLTHA_COMPS_UNDER_TEST+="ofagent,ofagent,${ofAgentImage.trim()}*"
-          fi
-          export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v voltha_comps_under_test:\$VOLTHA_COMPS_UNDER_TEST -e PowerSwitch"
-          export TARGET=voltha-comp-upgrade-test
-        fi
-        if [[ ${name} == 'onu-software-upgrade' ]]; then
-          export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v image_version:${onuImageVersion.trim()} -v image_url:${onuImageUrl.trim()} -v image_vendor:${onuImageVendor.trim()} -v image_activate_on_success:${onuImageActivateOnSuccess.trim()} -v image_commit_on_success:${onuImageCommitOnSuccess.trim()} -v image_crc:${onuImageCrc.trim()} -e PowerSwitch"
-          export TARGET=onu-upgrade-test
-        fi
-        if [[ ${name} == 'onu-image-dwl-simultaneously' ]]; then
-          export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v image_version:${onuImageVersion.trim()} -v image_url:${onuImageUrl.trim()} -v image_vendor:${onuImageVendor.trim()} -v image_activate_on_success:${onuImageActivateOnSuccess.trim()} -v image_commit_on_success:${onuImageCommitOnSuccess.trim()} -v image_crc:${onuImageCrc.trim()} -e PowerSwitch"
-          export TARGET=onu-upgrade-test-multiolt-kind-att
-        fi
-        testLogging='False'
-        if [ ${logging} = true ]; then
-          testLogging='True'
-        fi
-        export VOLTCONFIG=$HOME/.volt/config-minimal
-        export KUBECONFIG=$HOME/.kube/kind-config-voltha-minimal
-        ROBOT_MISC_ARGS+=" -v ONOS_SSH_PORT:30115 -v ONOS_REST_PORT:30120 -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace} -v container_log_dir:${logsDir} -v logging:\$testLogging"
-        # Run the specified tests
-        make -C $WORKSPACE/voltha-system-tests \$TARGET || true
-      """
-      // remove port-forwarding
-      sh """
-        # remove orphaned port-forward from different namespaces
-        ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
-      """
-      // collect pod details
-      get_pods_info("$WORKSPACE/${name}")
-      sh """
-        set +e
-        # collect logs collected in the Robot Framework StartLogging keyword
-        cd ${logsDir}
-        gzip *-combined.log || true
-        rm *-combined.log || true
-      """
-      helmTeardown(['infra', 'voltha'])
-    }
-  }
-}
-def get_pods_info(dest) {
-  // collect pod details, this is here in case of failure
-  sh """
-  mkdir -p ${dest} || true
-  kubectl get pods --all-namespaces -o wide > ${dest}/pods.txt || true
-  kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee ${dest}/pod-images.txt || true
-  kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee ${dest}/pod-imagesId.txt || true
-  kubectl describe pods --all-namespaces -l app.kubernetes.io/part-of=voltha > ${dest}/voltha-pods-describe.txt
-  kubectl describe pods -n infra -l app=onos-classic > ${dest}/onos-pods-describe.txt
-  helm ls --all-namespaces > ${dest}/helm-charts.txt
-  """
-  sh '''
-  # copy the ONOS logs directly from the container to avoid the color codes
-  printf '%s\\n' $(kubectl get pods -n infra -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c 'kubectl -n infra cp #:apache-karaf-4.2.9/data/log/karaf.log ''' + dest + '''/#.log' || true
-  '''
-}
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: 120, unit: 'MINUTES')
-  }
-  environment {
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-    KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
-    SSHPASS="karaf"
-  }
-  stages{
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${branch}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage('Cleanup') {
-      steps {
-        // remove port-forwarding
-        sh """
-          # remove orphaned port-forward from different namespaces
-          ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
-        """
-        helmTeardown(['infra', 'voltha'])
-      }
-    }
-    stage('Create K8s Cluster') {
-      steps {
-        createKubernetesCluster([branch: "${branch}", nodes: 3])
-      }
-    }
-    stage('Run Test') {
-      steps {
-        test_software_upgrade("onos-app-upgrade")
-        test_software_upgrade("voltha-component-upgrade")
-        test_software_upgrade("onu-software-upgrade")
-        test_software_upgrade("onu-image-dwl-simultaneously")
-      }
-    }
-  }
-  post {
-    aborted {
-      get_pods_info("$WORKSPACE/failed")
-    }
-    failure {
-      get_pods_info("$WORKSPACE/failed")
-    }
-    always {
-      step([$class: 'RobotPublisher',
-         disableArchiveOutput: false,
-         logFileName: 'RobotLogs/*/log*.html',
-         otherFiles: '',
-         outputFileName: 'RobotLogs/*/output*.xml',
-         outputPath: '.',
-         passThreshold: 100,
-         reportFileName: 'RobotLogs/*/report*.html',
-         unstableThreshold: 0,
-         onlyCritical: true]);
-      archiveArtifacts artifacts: '*.log,**/*.log,**/*.gz,*.gz,*.txt,**/*.txt'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.8/tucson-build-and-test.groovy b/jjb/pipeline/voltha/voltha-2.8/tucson-build-and-test.groovy
deleted file mode 100644
index ddf1278..0000000
--- a/jjb/pipeline/voltha/voltha-2.8/tucson-build-and-test.groovy
+++ /dev/null
@@ -1,365 +0,0 @@
-
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// used to deploy VOLTHA and configure ONOS physical PODs
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-def clusterName = "kind-ci"
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: 120, unit: 'MINUTES')
-  }
-  environment {
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-    KUBECONFIG="$HOME/.kube/kind-${clusterName}"
-    VOLTCONFIG="$HOME/.volt/config"
-    LOG_FOLDER="$WORKSPACE/${workflow}/"
-    APPS_TO_LOG="etcd kafka onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server onos-config-loader"
-
-  }
-  stages{
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${branch}",
-          gerritProject: "${gerritProject}",
-          gerritRefspec: "${gerritRefspec}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage ("Parse deployment configuration file") {
-      steps {
-        sh returnStdout: true, script: "rm -rf ${configBaseDir}"
-        sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-
-          if (params.workflow.toUpperCase() == "TT") {
-            error("The Tucson POD does not support TT workflow at the moment")
-          }
-
-          if ( params.workflow.toUpperCase() == "DT" ) {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-          }
-          else if ( params.workflow.toUpperCase() == "TT" ) {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-          }
-          else {
-            deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-          }
-        }
-      }
-    }
-    stage('Clean up') {
-      steps {
-        timeout(15) {
-          script {
-            helmTeardown(["default", infraNamespace, volthaNamespace])
-          }
-          timeout(1) {
-            sh returnStdout: false, script: '''
-            # remove orphaned port-forward from different namespaces
-            ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-            '''
-          }
-        }
-      }
-    }
-    stage('Build patch') {
-      steps {
-        // NOTE that the correct patch has already been checked out
-        // during the getVolthaCode step
-        buildVolthaComponent("${gerritProject}")
-      }
-    }
-    stage('Create K8s Cluster') {
-      steps {
-        script {
-          def clusterExists = sh returnStdout: true, script: """
-          kind get clusters | grep ${clusterName} | wc -l
-          """
-          if (clusterExists.trim() == "0") {
-            createKubernetesCluster([branch: "${branch}", nodes: 3, name: clusterName])
-          }
-        }
-      }
-    }
-    stage('Load image in kind nodes') {
-      steps {
-        loadToKind()
-      }
-    }
-    stage('Install Voltha')  {
-      steps {
-        timeout(20) {
-          script {
-            imageFlags = getVolthaImageFlags(gerritProject)
-            // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
-            def localCharts = false
-            if (volthaHelmChartsChange != "" || gerritProject == "voltha-helm-charts" || branch != "master") {
-              localCharts = true
-            }
-            def flags = "-f $WORKSPACE/${configBaseDir}/${configKubernetesDir}/voltha/${configFileName}.yml ${imageFlags} "
-            // NOTE temporary workaround expose ONOS node ports (pod-config needs to be updated to contain these values)
-            flags = flags + "--set onos-classic.onosSshPort=30115 " +
-            "--set onos-classic.onosApiPort=30120 " +
-            "--set onos-classic.onosOfPort=31653 " +
-            "--set onos-classic.individualOpenFlowNodePorts=true " + extraHelmFlags
-            volthaDeploy([
-              workflow: workFlow.toLowerCase(),
-              extraHelmFlags: flags,
-              localCharts: localCharts,
-              kubeconfig: "$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf",
-              onosReplica: 3,
-              atomixReplica: 3,
-              kafkaReplica: 3,
-              etcdReplica: 3,
-              ])
-          }
-          // start logging
-          sh """
-          rm -rf $WORKSPACE/${workFlow}/
-          mkdir -p $WORKSPACE/${workFlow}
-          _TAG=kail-${workFlow} kail -n infra -n voltha > $WORKSPACE/${workFlow}/onos-voltha-combined.log &
-          """
-          sh returnStdout: false, script: '''
-          # start logging with kail
-
-          mkdir -p $LOG_FOLDER
-
-          list=($APPS_TO_LOG)
-          for app in "${list[@]}"
-          do
-            echo "Starting logs for: ${app}"
-            _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
-          done
-          '''
-          sh """
-          JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd 2379:2379; done"&
-          JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
-          ps aux | grep port-forward
-          """
-          getPodsInfo("$WORKSPACE")
-        }
-      }
-    }
-    stage('Deploy Kafka Dump Chart') {
-      steps {
-        script {
-          sh returnStdout: false, script: """
-              helm repo add cord https://charts.opencord.org
-              helm repo update
-              if helm version -c --short|grep v2 -q; then
-                helm install -n voltha-kafka-dump cord/voltha-kafka-dump
-              else
-                helm install voltha-kafka-dump cord/voltha-kafka-dump
-              fi
-          """
-        }
-      }
-    }
-    stage('Push Tech-Profile') {
-      when {
-        expression { params.profile != "Default" }
-      }
-      steps {
-        sh returnStdout: false, script: """
-        etcd_container=\$(kubectl get pods -n voltha | grep voltha-etcd-cluster | awk 'NR==1{print \$1}')
-        kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-${profile}.json voltha/\$etcd_container:/tmp/flexpod.json
-        kubectl exec -it \$etcd_container -n voltha -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/XGS-PON/64'
-        """
-      }
-    }
-
-    stage('Push Sadis-config') {
-      steps {
-        sh returnStdout: false, script: """
-        ssh-keygen -R [${deployment_config.nodes[0].ip}]:30115
-        ssh-keyscan -p 30115 -H ${deployment_config.nodes[0].ip} >> ~/.ssh/known_hosts
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.dhcpl2relay"
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.aaa"
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.olt"
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.FlowObjectiveManager"
-        sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager"
-
-        if [[ "${workFlow.toUpperCase()}" == "DT" ]]; then
-          curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-DT.json
-        elif [[ "${workFlow.toUpperCase()}" == "TT" ]]; then
-          curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-TT.json
-        else
-          # this is the ATT case, rename the file in *-sadis-ATT.json so that we can avoid special cases and just load the file
-          curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis.json
-        fi
-        """
-      }
-    }
-    stage('Reinstall OLT software') {
-      when {
-        expression { params.reinstallOlt }
-      }
-      steps {
-        script {
-          deployment_config.olts.each { olt ->
-            sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --remove asfvolt16 && dpkg --purge asfvolt16'"
-            waitUntil {
-              olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
-              return olt_sw_present.toInteger() == 0
-            }
-            if ( params.branch == 'voltha-2.3' ) {
-              oltDebVersion = oltDebVersionVoltha23
-            } else {
-              oltDebVersion = oltDebVersionMaster
-            }
-            sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --install ${oltDebVersion}'"
-            waitUntil {
-              olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
-              return olt_sw_present.toInteger() == 1
-            }
-            if ( olt.fortygig ) {
-              // If the OLT is connected to a 40G switch interface, set the NNI port to be downgraded
-              sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'echo port ce128 sp=40000 >> /broadcom/qax.soc ; /opt/bcm68620/svk_init.sh'"
-            }
-          }
-        }
-      }
-    }
-
-    stage('Restart OLT processes') {
-      steps {
-        script {
-          deployment_config.olts.each { olt ->
-            sh returnStdout: false, script: """
-            ssh-keyscan -H ${olt.ip} >> ~/.ssh/known_hosts
-            sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; reboot'
-            sleep 120
-            """
-            waitUntil {
-              onu_discovered = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'grep \"onu discover indication\" /var/log/openolt.log | wc -l'"
-              return onu_discovered.toInteger() > 0
-            }
-          }
-        }
-      }
-    }
-    stage('Run E2E Tests') {
-      steps {
-        script {
-          // different workflows need different make targets and different robot files
-          if ( params.workflow.toUpperCase() == "DT" ) {
-            robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-            robotFile = "Voltha_DT_PODTests.robot"
-            makeTarget = "voltha-dt-test"
-            robotFunctionalKeyword = "-i functionalDt"
-            robotDataplaneKeyword = "-i dataplaneDt"
-          }
-          else if ( params.workflow.toUpperCase() == "TT" ) {
-            // TODO the TT tests have diffent tags, address once/if TT is support on the Tucson POD
-            robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-            robotFile = "Voltha_TT_PODTests.robot"
-            makeTarget = "voltha-tt-test"
-            robotFunctionalKeyword = "-i functionalTt"
-            robotDataplaneKeyword = "-i dataplaneTt"
-          }
-          else {
-            robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-            robotFile = "Voltha_PODTests.robot"
-            makeTarget = "voltha-test"
-            robotFunctionalKeyword = "-i functional"
-            robotDataplaneKeyword = "-i dataplane"
-          }
-        }
-        sh returnStdout: false, script: """
-        mkdir -p $WORKSPACE/RobotLogs
-
-        export ROBOT_CONFIG_FILE="$WORKSPACE/${robotConfigFile}"
-        export ROBOT_MISC_ARGS="${params.extraRobotArgs} --removekeywords wuks -d $WORKSPACE/RobotLogs -v container_log_dir:$WORKSPACE "
-        export ROBOT_FILE="${robotFile}"
-
-        # If the Gerrit comment contains a line with "functional tests" then run the full
-        # functional test suite.  This covers tests tagged either 'sanity' or 'functional'.
-        # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
-        REGEX="functional tests"
-        if [[ "${gerritComment}" =~ \$REGEX ]]; then
-          ROBOT_MISC_ARGS+="${robotFunctionalKeyword} "
-        fi
-        # Likewise for dataplane tests
-        REGEX="dataplane tests"
-        if [[ "${gerritComment}" =~ \$REGEX ]]; then
-          ROBOT_MISC_ARGS+="${robotDataplaneKeyword}"
-        fi
-
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests ${makeTarget} || true
-        """
-      }
-    }
-  }
-  post {
-    always {
-      // stop logging
-      sh """
-        P_IDS="\$(ps e -ww -A | grep "_TAG=kail-${workFlow}" | grep -v grep | awk '{print \$1}')"
-        if [ -n "\$P_IDS" ]; then
-          echo \$P_IDS
-          for P_ID in \$P_IDS; do
-            kill -9 \$P_ID
-          done
-        fi
-        gzip $WORKSPACE/${workFlow}/onos-voltha-combined.log || true
-      """
-      sh '''
-      # stop the kail processes
-      list=($APPS_TO_LOG)
-      for app in "${list[@]}"
-      do
-        echo "Stopping logs for: ${app}"
-        _TAG="kail-$app"
-        P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
-        if [ -n "$P_IDS" ]; then
-          echo $P_IDS
-          for P_ID in $P_IDS; do
-            kill -9 $P_ID
-          done
-        fi
-      done
-      '''
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: 'RobotLogs/log*.html',
-        otherFiles: '',
-        outputFileName: 'RobotLogs/output*.xml',
-        outputPath: '.',
-        passThreshold: 100,
-        reportFileName: 'RobotLogs/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true]);
-      archiveArtifacts artifacts: '**/*.txt,**/*.gz,*.gz,**/*.log'
-    }
-  }
-}
-
-// refs/changes/06/24206/5
diff --git a/jjb/pipeline/voltha/voltha-2.8/voltha-dt-physical-functional-tests.groovy b/jjb/pipeline/voltha/voltha-2.8/voltha-dt-physical-functional-tests.groovy
deleted file mode 100644
index 0a34a35..0000000
--- a/jjb/pipeline/voltha/voltha-2.8/voltha-dt-physical-functional-tests.groovy
+++ /dev/null
@@ -1,262 +0,0 @@
-#!/usr/bin/env groovy
-// -----------------------------------------------------------------------
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// -----------------------------------------------------------------------
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-node {
-  // Need this so that deployment_config has global scope when it's read later
-  deployment_config = null
-}
-
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: "${timeout}", unit: 'MINUTES')
-  }
-
-  environment {
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-    VOLTCONFIG="$HOME/.volt/config-minimal"
-    PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-  }
-
-  stages {
-    stage('Clone voltha-system-tests') {
-      steps {
-        step([$class: 'WsCleanup'])
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/voltha-system-tests",
-            refspec: "${volthaSystemTestsChange}"
-          ]],
-          branches: [[ name: "${branch}", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-        script {
-          sh(script:"""
-            if [ '${volthaSystemTestsChange}' != '' ] ; then
-              cd $WORKSPACE/voltha-system-tests;
-              git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
-            fi
-            """)
-        }
-      }
-    }
-    stage ('Initialize') {
-      steps {
-        sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-           deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        }
-        installVoltctl("${branch}")
-
-	sh(returnStdout: false, script: """
-        mkdir -p "$WORKSPACE/bin"
-
-        # install kail
-        make -C "$WORKSPACE/voltha-system-tests" KAIL_PATH="$WORKSPACE/bin" kail
-
-        # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
-        # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
-        # We should change this. In the meantime here is a workaround.
-        if [ "${params.branch}" == "master" ]; then
-           set +e
-
-        # Remove noise from voltha-core logs
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        # Remove noise from openolt logs
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        fi
-        """)
-      }
-    }
-
-    stage('Functional Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/FunctionalTests"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if ( ${powerSwitch} ); then
-             export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanityDt -i functionalDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        else
-             export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i sanityDt -i functionalDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        fi
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        """
-      }
-    }
-
-    stage('Failure/Recovery Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_FailureScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/FailureScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if ( ${powerSwitch} ); then
-             export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalDt -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        else
-             export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalDt -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        fi
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        """
-      }
-    }
-
-    stage('Dataplane Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/DataplaneTests"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        export ROBOT_MISC_ARGS="--removekeywords wuks -i dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        """
-      }
-    }
-    stage('HA Tests') {
-       environment {
-       ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-       ROBOT_FILE="Voltha_ONOSHATests.robot"
-       ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ONOSHAScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v workflow:${params.workFlow} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        """
-      }
-    }
-
-    stage('Multiple OLT Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_MultiOLT_Tests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/MultipleOLTScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if ( ${powerSwitch} ); then
-             export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalDt -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        else
-             export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalDt -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        fi
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        """
-      }
-    }
-
-
-    stage('Error Scenario Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_ErrorScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/ErrorScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v workflow:${params.workFlow} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        """
-      }
-    }
-  }
-  post {
-    always {
-      sh returnStdout: false, script: '''
-      set +e
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
-      kubectl get nodes -o wide
-      kubectl get pods -n voltha -o wide
-      kubectl get pods -o wide
-
-      # store information on running charts
-      helm ls > $WORKSPACE/helm-list.txt || true
-
-      # store information on the running pods
-      kubectl get pods --all-namespaces -o wide > $WORKSPACE/pods.txt || true
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-images.txt || true
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-imagesId.txt || true
-      '''
-      script {
-        deployment_config.olts.each { olt ->
-            if (olt.type == null || olt.type == "" || olt.type == "openolt") {
-              sh returnStdout: false, script: """
-              sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
-              sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log  # Remove escape sequences
-              sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
-              sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log  # Remove escape sequences
-              sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/startup.log $WORKSPACE/startup-${olt.sship}.log || true
-              sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/startup-${olt.sship}.log || true # Remove escape sequences
-              sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt_process_watchdog.log $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true
-              sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true # Remove escape sequences
-              """
-            }
-        }
-      }
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: '**/log*.html',
-        otherFiles: '',
-        outputFileName: '**/output*.xml',
-        outputPath: 'RobotLogs',
-        passThreshold: 100,
-        reportFileName: '**/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true
-        ]);
-      archiveArtifacts artifacts: '**/*.log,**/*.tgz,*.txt'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.8/voltha-physical-functional-tests.groovy b/jjb/pipeline/voltha/voltha-2.8/voltha-physical-functional-tests.groovy
deleted file mode 100644
index b8c5fd8..0000000
--- a/jjb/pipeline/voltha/voltha-2.8/voltha-physical-functional-tests.groovy
+++ /dev/null
@@ -1,259 +0,0 @@
-#!/usr/bin/env groovy
-// -----------------------------------------------------------------------
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// -----------------------------------------------------------------------
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-node {
-  // Need this so that deployment_config has global scope when it's read later
-  deployment_config = null
-}
-
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: "${timeout}", unit: 'MINUTES')
-  }
-
-  environment {
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-    VOLTCONFIG="$HOME/.volt/config-minimal"
-    PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-  }
-  stages {
-    stage('Clone voltha-system-tests') {
-      steps {
-        step([$class: 'WsCleanup'])
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/voltha-system-tests",
-            refspec: "${volthaSystemTestsChange}"
-          ]],
-          branches: [[ name: "${branch}", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-      }
-    }
-    stage('Download All the VOLTHA repos') {
-      when {
-        expression {
-          return "${branch}" == 'master';
-        }
-      }
-      steps {
-       checkout(changelog: true,
-         poll: false,
-         scm: [$class: 'RepoScm',
-           manifestRepositoryUrl: "${params.manifestUrl}",
-           manifestBranch: "${params.branch}",
-           currentBranch: true,
-           destinationDir: 'voltha',
-           forceSync: true,
-           resetFirst: true,
-           quiet: true,
-           jobs: 4,
-           showAllChanges: true]
-         )
-      }
-    }
-    stage ('Initialize') {
-      steps {
-        sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-          deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        }
-        installVoltctl("${branch}")
-
-	sh(returnStdout: false, script: """
-        mkdir -p "$WORKSPACE/bin"
-
-        # install kail
-        make -C "$WORKSPACE/voltha-system-tests" KAIL_PATH="$WORKSPACE/bin" kail
-
-        if [ "${params.branch}" == "master" ]; then
-        # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
-        # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
-        # We should change this. In the meantime here is a workaround.
-           set +e
-
-        # Remove noise from voltha-core logs
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        # Remove noise from openolt logs
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        fi
-        """)
-      }
-    }
-
-    stage('Functional Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        ROBOT_FILE="Voltha_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/FunctionalTests"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if ( ${powerSwitch} ); then
-             export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanity -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        else
-             export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i sanity -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        fi
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        """
-      }
-    }
-
-    stage('Failure/Recovery Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        ROBOT_FILE="Voltha_FailureScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/FailureScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if ( ${powerSwitch} ); then
-             export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        else
-             export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        fi
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        """
-      }
-    }
-
-    stage('Dataplane Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        ROBOT_FILE="Voltha_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/DataplaneTests"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        export ROBOT_MISC_ARGS="--removekeywords wuks -i dataplane -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        """
-      }
-    }
-    stage('HA Tests') {
-       environment {
-       ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-       ROBOT_FILE="Voltha_ONOSHATests.robot"
-       ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ONOSHAScenarios"
-      }
-      steps {
-       sh """
-       mkdir -p $ROBOT_LOGS_DIR
-       export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v workflow:${params.workFlow} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-       ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-       make -C $WORKSPACE/voltha-system-tests voltha-test || true
-       """
-      }
-    }
-
-    stage('Error Scenario Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
-        ROBOT_FILE="Voltha_ErrorScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ErrorScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-test || true
-        """
-      }
-    }
-  }
-  post {
-    always {
-      getPodsInfo("$WORKSPACE/pods")
-      sh returnStdout: false, script: '''
-      set +e
-
-      # collect logs collected in the Robot Framework StartLogging keyword
-      cd $WORKSPACE
-      gzip *-combined.log || true
-      rm *-combined.log || true
-
-      # store information on the running pods
-      kubectl get pods --all-namespaces -o wide > $WORKSPACE/pods.txt || true
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-images.txt || true
-      kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-imagesId.txt || true
-
-
-      # collect ETCD cluster logs
-      mkdir -p $WORKSPACE/etcd
-      printf '%s\n' $(kubectl get pods -l app=etcd -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I% bash -c "kubectl logs % > $WORKSPACE/etcd/%.log"
-      '''
-      script {
-        deployment_config.olts.each { olt ->
-          if (olt.type == null || olt.type == "" || olt.type == "openolt") {
-            sh returnStdout: false, script: """
-            sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
-            sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log  # Remove escape sequences
-            sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
-            sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log  # Remove escape sequences
-            sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/startup.log $WORKSPACE/startup-${olt.sship}.log || true
-            sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/startup-${olt.sship}.log || true # Remove escape sequences
-            sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt_process_watchdog.log $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true
-            sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true # Remove escape sequences
-            """
-          }
-        }
-      }
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: '**/log*.html',
-        otherFiles: '',
-        outputFileName: '**/output*.xml',
-        outputPath: 'RobotLogs',
-        passThreshold: 100,
-        reportFileName: '**/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true
-        ]);
-      archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
-    }
-  }
-}
-
-// [EOF]
diff --git a/jjb/pipeline/voltha/voltha-2.8/voltha-physical-soak-dt-tests.groovy b/jjb/pipeline/voltha/voltha-2.8/voltha-physical-soak-dt-tests.groovy
deleted file mode 100644
index d795813..0000000
--- a/jjb/pipeline/voltha/voltha-2.8/voltha-physical-soak-dt-tests.groovy
+++ /dev/null
@@ -1,246 +0,0 @@
-#!/usr/bin/env groovy
-// -----------------------------------------------------------------------
-// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// -----------------------------------------------------------------------
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-node {
-  // Need this so that deployment_config has global scope when it's read later
-  deployment_config = null
-}
-
-def volthaNamespace = "voltha"
-def infraNamespace = "infra"
-
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: "${timeout}", unit: 'MINUTES')
-  }
-
-  environment {
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-    VOLTCONFIG="$HOME/.volt/config-minimal"
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-  }
-
-
-  stages {
-    stage('Clone voltha-system-tests') {
-      steps {
-        step([$class: 'WsCleanup'])
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/voltha-system-tests",
-            refspec: "${volthaSystemTestsChange}"
-          ]],
-          branches: [[ name: "${branch}", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-        script {
-          sh(script:"""
-            if [ '${volthaSystemTestsChange}' != '' ] ; then
-              cd $WORKSPACE/voltha-system-tests;
-              git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
-            fi
-            """)
-        }
-      }
-    }
-    // This checkout allows us to show changes in Jenkins
-    // we only do this on master as we don't branch all the repos for all the releases
-    // (we should compute the difference by tracking the container version, not the code)
-    stage('Download All the VOLTHA repos') {
-      when {
-        expression {
-          return "${branch}" == 'master';
-        }
-      }
-      steps {
-       checkout(changelog: true,
-         poll: false,
-         scm: [$class: 'RepoScm',
-           manifestRepositoryUrl: "${params.manifestUrl}",
-           manifestBranch: "${params.branch}",
-           currentBranch: true,
-           destinationDir: 'voltha',
-           forceSync: true,
-           resetFirst: true,
-           quiet: true,
-           jobs: 4,
-           showAllChanges: true]
-         )
-      }
-    }
-    stage ('Initialize') {
-      steps {
-        sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-           deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        }
-        installVoltctl("${branch}")
-
-        sh(returnStdout: false, script: """
-        mkdir -p "$WORKSPACE/bin"
-
-        # install kail
-        make -C "$WORKSPACE/voltha-system-tests" KAIL_PATH="$WORKSPACE/bin" kail
-
-        if [ "${params.branch}" == "master" ]; then
-        # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
-        # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
-        # We should change this. In the meantime here is a workaround.
-           set +e
-
-        # Remove noise from voltha-core logs
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        # Remove noise from openolt logs
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        fi
-        """)
-      }
-    }
-
-    stage('Functional Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/FunctionalTests"
-      }
-      steps {
-        sh """
-        JENKINS_NODE_COOKIE="dontKillMe" _TAG="prometheus" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n cattle-prometheus svc/access-prometheus 31301:80; done"&
-        ps aux | grep port-forward
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ "${params.testType}" == "Functional" ]; then
-            if ( ${powerSwitch} ); then
-                 export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i soak -e dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-            else
-                 export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i soak -e dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-            fi
-            ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-            make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Failure/Recovery Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_FailureScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/FailureScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ "${params.testType}" == "Failure" ]; then
-           export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i soak -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-           ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-           make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Dataplane Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
-        ROBOT_FILE="Voltha_DT_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/DataplaneTests"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ "${params.testType}" == "Dataplane" ]; then
-           export ROBOT_MISC_ARGS="--removekeywords wuks -i soakDataplane -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
-           ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-           make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
-        fi
-        """
-      }
-    }
-
-  }
-  post {
-    always {
-      getPodsInfo("$WORKSPACE/pods")
-      sh returnStdout: false, script: '''
-      set +e
-
-      # collect logs collected in the Robot Framework StartLogging keyword
-      cd $WORKSPACE
-      gzip *-combined.log || true
-      rm *-combined.log || true
-
-      # collect ETCD cluster logs
-      mkdir -p $WORKSPACE/etcd
-      printf '%s\n' $(kubectl get pods -l app=etcd -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I% bash -c "kubectl logs % > $WORKSPACE/etcd/%.log"
-      '''
-      script {
-        deployment_config.olts.each { olt ->
-          sh returnStdout: false, script: """
-          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
-          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log  # Remove escape sequences
-          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
-          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log  # Remove escape sequences
-          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/startup.log $WORKSPACE/startup-${olt.sship}.log || true
-          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/startup-${olt.sship}.log || true # Remove escape sequences
-          sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt_process_watchdog.log $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true
-          sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true # Remove escape sequences
-          """
-        }
-      }
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: '**/log*.html',
-        otherFiles: '',
-        outputFileName: '**/output*.xml',
-        outputPath: 'RobotLogs',
-        passThreshold: 100,
-        reportFileName: '**/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true
-        ]);
-      // get cpu usage by container
-      sh """
-      mkdir -p $WORKSPACE/plots || true
-      cd $WORKSPACE/voltha-system-tests
-      source ./vst_venv/bin/activate || true
-      sleep 60 # we have to wait for prometheus to collect all the information
-      python scripts/sizing.py -o $WORKSPACE/plots -a 0.0.0.0:31301 -n ${volthaNamespace} -s 3600 || true
-      """
-      archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt,plots/*'
-    }
-  }
-}
-
-// [EOF]
diff --git a/jjb/pipeline/voltha/voltha-2.8/voltha-scale-multi-stack.groovy b/jjb/pipeline/voltha/voltha-2.8/voltha-scale-multi-stack.groovy
deleted file mode 100644
index 50fe24c..0000000
--- a/jjb/pipeline/voltha/voltha-2.8/voltha-scale-multi-stack.groovy
+++ /dev/null
@@ -1,466 +0,0 @@
-// Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// deploy VOLTHA using kind-voltha and performs a scale test
-
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-      timeout(time: 120, unit: 'MINUTES')
-  }
-  environment {
-    JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
-    KUBECONFIG="$HOME/.kube/config"
-    SSHPASS="karaf"
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-
-    LOG_FOLDER="$WORKSPACE/logs"
-  }
-
-  stages {
-    stage ('Cleanup') {
-      steps {
-        timeout(time: 11, unit: 'MINUTES') {
-          script {
-            def namespaces = ["infra"]
-            // FIXME we may have leftovers from more VOLTHA stacks (eg: run1 had 10 stacks, run2 had 2 stacks)
-            volthaStacks.toInteger().times {
-              namespaces += "voltha${it + 1}"
-            }
-            helmTeardown(namespaces)
-          }
-          sh returnStdout: false, script: '''
-            helm repo add onf https://charts.opencord.org
-            helm repo update
-
-            # remove all persistent volume claims
-            kubectl delete pvc --all-namespaces --all
-            PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-            while [[ \$PVCS != 0 ]]; do
-              sleep 5
-              PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-            done
-
-            # remove orphaned port-forward from different namespaces
-            ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-
-            cd $WORKSPACE
-            rm -rf $WORKSPACE/*
-          '''
-        }
-      }
-    }
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${release}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage('Deploy common infrastructure') {
-      // includes monitoring
-      steps {
-        sh '''
-        if [ ${withMonitoring} = true ] ; then
-          helm install -n infra nem-monitoring cord/nem-monitoring \
-          -f $HOME/voltha-scale/grafana.yaml \
-          --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
-          --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
-        fi
-        '''
-      }
-    }
-    stage('Start logging') {
-      steps {
-        script {
-          startComponentsLogs([
-            appsToLog: [
-              'app.kubernetes.io/name=etcd',
-              'app.kubernetes.io/name=kafka',
-              'app=onos-classic',
-              'app=adapter-open-onu',
-              'app=adapter-open-olt',
-              'app=rw-core',
-              'app=ofagent',
-              'app=bbsim',
-              'app=radius',
-              'app=bbsim-sadis-server',
-              'app=onos-config-loader',
-            ]
-          ])
-        }
-      }
-    }
-    stage('Deploy VOLTHA infrastructure') {
-      steps {
-        timeout(time: 5, unit: 'MINUTES') {
-          script {
-            def localCharts = false
-            if (volthaHelmChartsChange != "" || release != "master") {
-              localCharts = true
-            }
-
-            def infraHelmFlags =
-                "--set global.log_level=${logLevel} " +
-                "--set radius.enabled=${withEapol} " +
-                "--set onos-classic.onosSshPort=30115 " +
-                "--set onos-classic.onosApiPort=30120 " +
-                params.extraHelmFlags
-
-            volthaInfraDeploy([
-              workflow: workflow,
-              infraNamespace: "infra",
-              extraHelmFlags: infraHelmFlags,
-              localCharts: localCharts,
-              onosReplica: onosReplicas,
-              atomixReplica: atomixReplicas,
-              kafkaReplica: kafkaReplicas,
-              etcdReplica: etcdReplicas,
-            ])
-          }
-        }
-      }
-    }
-    stage('Deploy Voltha') {
-      steps {
-        installVoltctl("${release}")
-        deploy_voltha_stacks(params.volthaStacks)
-      }
-    }
-    stage('Configuration') {
-      steps {
-        script {
-          sh returnStdout: false, script: """
-
-          # forward ETCD port
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG=etcd-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/etcd 9999:2379; done 2>&1 " &
-
-          # forward ONOS ports
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG=onos-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/voltha-infra-onos-classic-hs 8101:8101; done 2>&1 " &
-          JENKINS_NODE_COOKIE="dontKillMe" _TAG=onos-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/voltha-infra-onos-classic-hs 8181:8181; done 2>&1 " &
-
-          # make sure the the port-forward has started before moving forward
-          sleep 5
-          """
-          sh returnStdout: false, script: """
-          # TODO this needs to be repeated per stack
-          # kubectl exec \$(kubectl get pods | grep -E "bbsim[0-9]" | awk 'NR==1{print \$1}') -- bbsimctl log ${logLevel.toLowerCase()} false
-
-          #Setting link discovery
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 900
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.opencord.olt.impl.Olt provisionDelay 1000
-
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.onosproject
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.opencord
-
-          # Set Flows/Ports/Meters poll frequency
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
-
-          #SR is not needed in scale tests and not currently used by operators in production, can be disabled.
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.onosproject.segmentrouting
-
-
-          if [ ${withFlows} = false ]; then
-            sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.olt
-          fi
-          """
-        }
-      }
-    }
-    stage('Setup Test') {
-      steps {
-        sh '''
-          mkdir -p $WORKSPACE/RobotLogs
-          cd $WORKSPACE/voltha-system-tests
-          make vst_venv
-        '''
-      }
-    }
-    stage('Run Test') {
-      steps {
-        test_voltha_stacks(params.volthaStacks)
-      }
-    }
-  }
-  post {
-    always {
-      stopComponentsLogs([compress: true])
-      // collect result, done in the "post" step so it's executed even in the
-      // event of a timeout in the tests
-      plot([
-        csvFileName: 'scale-test.csv',
-        csvSeries: [
-          [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-        ],
-        group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (Stacks: ${params.volthaStacks}, OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus})", yaxis: 'Time (s)', useDescr: true
-      ])
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: 'RobotLogs/**/log.html',
-        otherFiles: '',
-        outputFileName: 'RobotLogs/**/output.xml',
-        outputPath: '.',
-        passThreshold: 100,
-        reportFileName: 'RobotLogs/**/report.html',
-        onlyCritical: true,
-        unstableThreshold: 0]);
-      // get all the logs from kubernetes PODs
-      sh returnStdout: false, script: '''
-
-        # store information on running charts
-        helm ls --all-namespaces > $LOG_FOLDER/helm-list.txt || true
-
-        # store information on the running pods
-        kubectl get pods --all-namespaces -o wide > $LOG_FOLDER/pods.txt || true
-        kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
-        kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
-
-        # copy the ONOS logs directly from the container to avoid the color codes
-        printf '%s\n' $(kubectl get pods -n infra -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp -n infra #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
-
-      '''
-      // dump all the BBSim(s) ONU information
-      script {
-        for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
-          stack_ns="voltha"+i
-          sh """
-          mkdir -p \$LOG_FOLDER/${stack_ns}
-          BBSIM_IDS=\$(kubectl -n ${stack_ns} get pods | grep bbsim | grep -v server | awk '{print \$1}')
-          IDS=(\$BBSIM_IDS)
-
-          for bbsim in "\${IDS[@]}"
-          do
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl onu list > \$LOG_FOLDER/${stack_ns}/\$bbsim-device-list.txt || true
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl service list > \$LOG_FOLDER/${stack_ns}/\$bbsim-service-list.txt || true
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl olt resources GEM_PORT > \$LOG_FOLDER/${stack_ns}/\$bbsim-flows-gem-ports.txt || true
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl olt resources ALLOC_ID > \$LOG_FOLDER/${stack_ns}/\$bbsim-flows-alloc-ids.txt || true
-            kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl olt pons > \$LOG_FOLDER/${stack_ns}/\$bbsim-pon-resources.txt || true
-          done
-          """
-        }
-      }
-      // get ONOS debug infos
-      sh '''
-
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt || true
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt || true
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt || true
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt || true
-
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 netcfg > $LOG_FOLDER/onos-netcfg.txt || true
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg get > $LOG_FOLDER/onos-cfg.txt || true
-
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt || true
-        sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt || true
-
-        if [ ${withFlows} = true ] ; then
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt || true
-        fi
-
-        if [ ${provisionSubscribers} = true ]; then
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt || true
-        fi
-
-        if [ ${withEapol} = true ] ; then
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt || true
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt || true
-        fi
-
-        if [ ${withDhcp} = true ] ; then
-          sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt || true
-        fi
-      '''
-      // collect etcd metrics
-      sh '''
-        mkdir -p $WORKSPACE/etcd-metrics
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-backend-write-time.json || true
-      '''
-      // get VOLTHA debug infos
-      script {
-        for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
-          stack_ns="voltha"+i
-          voltcfg="~/.volt/config-voltha"+i
-          try {
-            sh """
-
-            # _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555& > /dev/null 2>&1
-            _TAG="voltha-port-forward" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555 > /dev/null 2>&1; done"&
-
-            voltctl -m 8MB device list -o json > $LOG_FOLDER/${stack_ns}/device-list.json || true
-            python -m json.tool $LOG_FOLDER/${stack_ns}/device-list.json > $LOG_FOLDER/${stack_ns}/voltha-devices-list.json || true
-            rm $LOG_FOLDER/${stack_ns}/device-list.json || true
-            voltctl -m 8MB device list > $LOG_FOLDER/${stack_ns}/voltha-devices-list.txt || true
-
-            DEVICE_LIST=
-            printf '%s\n' \$(voltctl -m 8MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device flows # > $LOG_FOLDER/${stack_ns}/voltha-device-flows-#.txt" || true
-            printf '%s\n' \$(voltctl -m 8MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/${stack_ns}/voltha-device-ports-#.txt" || true
-
-            printf '%s\n' \$(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice flows # > $LOG_FOLDER/${stack_ns}/voltha-logicaldevice-flows-#.txt" || true
-            printf '%s\n' \$(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice port list # > $LOG_FOLDER/${stack_ns}/voltha-logicaldevice-ports-#.txt" || true
-
-            # remove VOLTHA port-forward
-            ps aux | grep port-forw | grep voltha-api | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
-            """
-          } catch(e) {
-            println e
-            sh '''
-            echo "Can't get device list from voltctl"
-            '''
-          }
-        }
-      }
-      // get cpu usage by container
-      sh '''
-      if [ ${withMonitoring} = true ] ; then
-        cd $WORKSPACE/voltha-system-tests
-        source ./vst_venv/bin/activate
-        sleep 60 # we have to wait for prometheus to collect all the information
-        python scripts/sizing.py -o $WORKSPACE/plots || true
-      fi
-      '''
-      archiveArtifacts artifacts: 'kind-voltha/install-*.log,execution-time-*.txt,logs/**/*.txt,logs/**/*.tar.gz,logs/**/*.tgz,RobotLogs/**/*,plots/*,etcd-metrics/*'
-    }
-  }
-}
-
-def deploy_voltha_stacks(numberOfStacks) {
-  for (int i = 1; i <= numberOfStacks.toInteger(); i++) {
-    timeout(time: 5, unit: 'MINUTES') {
-      stage("Deploy VOLTHA stack " + i) {
-
-        def localCharts = false
-        if (volthaHelmChartsChange != "" || release != "master") {
-          localCharts = true
-        }
-
-        def volthaHelmFlags =
-                "--set global.log_level=${logLevel} " +
-                "--set enablePerf=true,onu=${onus},pon=${pons} " +
-                "--set securityContext.enabled=false " +
-                params.extraHelmFlags
-
-        volthaStackDeploy([
-          bbsimReplica: olts.toInteger(),
-          infraNamespace: "infra",
-          volthaNamespace: "voltha${i}",
-          stackName: "voltha${i}",
-          stackId: i,
-          workflow: workflow,
-          extraHelmFlags: volthaHelmFlags,
-          localCharts: localCharts,
-          onosReplica: onosReplicas,
-          adaptersToWait: 0 // in 2.8 there's no need to wait for adapters
-        ])
-      }
-    }
-  }
-}
-
-def test_voltha_stacks(numberOfStacks) {
-  for (int i = 1; i <= numberOfStacks.toInteger(); i++) {
-    stage("Test VOLTHA stack " + i) {
-      timeout(time: 15, unit: 'MINUTES') {
-        sh """
-
-        # we are restarting the voltha-api port-forward for each stack, no need to have a different voltconfig file
-        voltctl -s 127.0.0.1:55555 config > $HOME/.volt/config
-        export VOLTCONFIG=$HOME/.volt/config
-
-        # _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555& > /dev/null 2>&1
-        _TAG="voltha-port-forward" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555 > /dev/null 2>&1; done"&
-
-
-          ROBOT_PARAMS="-v stackId:${i} \
-            -v olt:${olts} \
-            -v pon:${pons} \
-            -v onu:${onus} \
-            -v workflow:${workflow} \
-            -v withEapol:${withEapol} \
-            -v withDhcp:${withDhcp} \
-            -v withIgmp:${withIgmp} \
-            --noncritical non-critical \
-            -e igmp \
-            -e onu-upgrade \
-            -e teardown "
-
-          if [ ${withEapol} = false ] ; then
-            ROBOT_PARAMS+="-e authentication "
-          fi
-
-          if [ ${withDhcp} = false ] ; then
-            ROBOT_PARAMS+="-e dhcp "
-          fi
-
-          if [ ${provisionSubscribers} = false ] ; then
-            # if we're not considering subscribers then we don't care about authentication and dhcp
-            ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
-          fi
-
-          if [ ${withFlows} = false ] ; then
-            ROBOT_PARAMS+="-i setup -i activation "
-          fi
-
-          cd $WORKSPACE/voltha-system-tests
-          source ./vst_venv/bin/activate
-          robot -d $WORKSPACE/RobotLogs/voltha${i} \
-          \$ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
-
-          # collect results
-          python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/voltha${i}/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time-voltha${i}.txt || true
-          cat $WORKSPACE/execution-time-voltha${i}.txt
-        """
-        sh """
-          # remove VOLTHA port-forward
-          ps aux | grep port-forw | grep voltha-api | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 2>&1 > /dev/null || true
-        """
-      }
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.8/voltha-scale-test.groovy b/jjb/pipeline/voltha/voltha-2.8/voltha-scale-test.groovy
deleted file mode 100644
index 5455e4e..0000000
--- a/jjb/pipeline/voltha/voltha-2.8/voltha-scale-test.groovy
+++ /dev/null
@@ -1,749 +0,0 @@
-// Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// deploy VOLTHA and performs a scale test
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-// this function generates the correct parameters for ofAgent
-// to connect to multiple ONOS instances
-def ofAgentConnections(numOfOnos, releaseName, namespace) {
-    def params = " "
-    numOfOnos.times {
-        params += "--set voltha.services.controller[${it}].address=${releaseName}-onos-classic-${it}.${releaseName}-onos-classic-hs.${namespace}.svc:6653 "
-    }
-    return params
-}
-
-pipeline {
-
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-      timeout(time: 60, unit: 'MINUTES')
-  }
-  environment {
-    JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
-    KUBECONFIG="$HOME/.kube/config"
-    VOLTCONFIG="$HOME/.volt/config"
-    SSHPASS="karaf"
-    VOLTHA_LOG_LEVEL="${logLevel}"
-    NUM_OF_BBSIM="${olts}"
-    NUM_OF_OPENONU="${openonuAdapterReplicas}"
-    NUM_OF_ONOS="${onosReplicas}"
-    NUM_OF_ATOMIX="${atomixReplicas}"
-    EXTRA_HELM_FLAGS=" "
-
-    APPS_TO_LOG="etcd kafka onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server onos-config-loader"
-    LOG_FOLDER="$WORKSPACE/logs"
-
-    GERRIT_PROJECT="${GERRIT_PROJECT}"
-    PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
-  }
-
-  stages {
-    stage ('Cleanup') {
-      steps {
-        script {
-          try {
-            timeout(time: 5, unit: 'MINUTES') {
-              sh returnStdout: false, script: '''
-              cd $WORKSPACE
-              rm -rf $WORKSPACE/*
-              '''
-              // removing the voltha-infra chart first
-              // if we don't ONOS might get stuck because of all the events when BBSim goes down
-              sh returnStdout: false, script: '''
-              set +x
-              helm del voltha-infra || true
-              echo -ne "\nWaiting for ONOS to be removed..."
-              onos=$(kubectl get pod -n default -l app=onos-classic --no-headers | wc -l)
-              while [[ $onos != 0 ]]; do
-                onos=$(kubectl get pod -n default -l app=onos-classic --no-headers | wc -l)
-                sleep 5
-                echo -ne "."
-              done
-              '''
-            }
-          } catch(org.jenkinsci.plugins.workflow.steps.FlowInterruptedException e) {
-            // if we have a timeout in the Cleanup fase most likely ONOS got stuck somewhere, thuse force remove the pods
-            sh '''
-              kubectl get pods | grep Terminating | awk '{print $1}' | xargs kubectl delete pod --force --grace-period=0
-            '''
-          }
-          timeout(time: 10, unit: 'MINUTES') {
-            script {
-              helmTeardown(["default"])
-            }
-            sh returnStdout: false, script: '''
-              helm repo add onf https://charts.opencord.org
-              helm repo update
-
-              # remove all persistent volume claims
-              kubectl delete pvc --all-namespaces --all
-              PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-              while [[ \$PVCS != 0 ]]; do
-                sleep 5
-                PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
-              done
-
-              # remove orphaned port-forward from different namespaces
-              ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
-            '''
-          }
-        }
-      }
-    }
-    stage('Download Code') {
-      steps {
-        getVolthaCode([
-          branch: "${release}",
-          volthaSystemTestsChange: "${volthaSystemTestsChange}",
-          volthaHelmChartsChange: "${volthaHelmChartsChange}",
-        ])
-      }
-    }
-    stage('Build patch') {
-      when {
-        expression {
-          return params.GERRIT_PROJECT
-        }
-      }
-      steps {
-        sh """
-        git clone https://\$GERRIT_HOST/\$GERRIT_PROJECT
-        cd \$GERRIT_PROJECT
-        git fetch https://\$GERRIT_HOST/\$GERRIT_PROJECT \$GERRIT_REFSPEC && git checkout FETCH_HEAD
-
-        DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-build
-        DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-push
-        """
-      }
-    }
-    stage('Deploy common infrastructure') {
-      steps {
-        sh '''
-        if [ ${withMonitoring} = true ] ; then
-          helm install nem-monitoring onf/nem-monitoring \
-          -f $HOME/voltha-scale/grafana.yaml \
-          --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
-          --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
-        fi
-        '''
-      }
-    }
-    stage('Deploy Voltha') {
-      steps {
-        timeout(time: 10, unit: 'MINUTES') {
-          installVoltctl("${release}")
-          script {
-            sh returnStdout: false, script: '''
-            # start logging with kail
-
-            mkdir -p $LOG_FOLDER
-
-            list=($APPS_TO_LOG)
-            for app in "${list[@]}"
-            do
-              echo "Starting logs for: ${app}"
-              _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
-            done
-            '''
-            def returned_flags = sh (returnStdout: true, script: """
-
-              export EXTRA_HELM_FLAGS+=' '
-
-              # BBSim custom image handling
-              if [ '${bbsimImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'bbsim' ]; then
-                IFS=: read -r bbsimRepo bbsimTag <<< '${bbsimImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set images.bbsim.repository=\$bbsimRepo,images.bbsim.tag=\$bbsimTag "
-              fi
-
-              # VOLTHA custom image handling
-              if [ '${rwCoreImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-go' ]; then
-                IFS=: read -r rwCoreRepo rwCoreTag <<< '${rwCoreImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set voltha.images.rw_core.repository=\$rwCoreRepo,voltha.images.rw_core.tag=\$rwCoreTag "
-              fi
-
-              # ofAgent custom image handling
-              if [ '${ofAgentImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'of-agent' ]; then
-                IFS=: read -r ofAgentRepo ofAgentTag <<< '${ofAgentImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set voltha.images.ofagent.repository=\$ofAgentRepo,voltha.images.ofagent.tag=\$ofAgentTag "
-              fi
-
-              # OpenOLT custom image handling
-              if [ '${openoltAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openolt-adapter' ]; then
-                IFS=: read -r openoltAdapterRepo openoltAdapterTag <<< '${openoltAdapterImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set voltha-adapter-openolt.images.adapter_open_olt.repository=\$openoltAdapterRepo,voltha-adapter-openolt.images.adapter_open_olt.tag=\$openoltAdapterTag "
-              fi
-
-              # OpenONU custom image handling
-              if [ '${openonuAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter' ]; then
-                IFS=: read -r openonuAdapterRepo openonuAdapterTag <<< '${openonuAdapterImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set voltha-adapter-openonu.images.adapter_open_onu.repository=\$openonuAdapterRepo,voltha-adapter-openonu.images.adapter_open_onu.tag=\$openonuAdapterTag "
-              fi
-
-              # OpenONU GO custom image handling
-              if [ '${openonuAdapterGoImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter-go' ]; then
-                IFS=: read -r openonuAdapterGoRepo openonuAdapterGoTag <<< '${openonuAdapterGoImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set voltha-adapter-openonu.images.adapter_open_onu_go.repository=\$openonuAdapterGoRepo,voltha-adapter-openonu.images.adapter_open_onu_go.tag=\$openonuAdapterGoTag "
-              fi
-
-              # ONOS custom image handling
-              if [ '${onosImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-onos' ]; then
-                IFS=: read -r onosRepo onosTag <<< '${onosImg.trim()}'
-                EXTRA_HELM_FLAGS+="--set onos-classic.image.repository=\$onosRepo,onos-classic.image.tag=\$onosTag "
-              fi
-
-              # set BBSim parameters
-              EXTRA_HELM_FLAGS+='--set enablePerf=true,pon=${pons},onu=${onus} '
-
-              # disable the securityContext, this is a development cluster
-              EXTRA_HELM_FLAGS+='--set securityContext.enabled=false '
-
-              # No persistent-volume-claims in Atomix
-              EXTRA_HELM_FLAGS+="--set onos-classic.atomix.persistence.enabled=false "
-
-              # Use custom built images
-
-              if [ '\$GERRIT_PROJECT' == 'voltha-go' ]; then
-                EXTRA_HELM_FLAGS+="--set voltha.images.rw_core.repository=${dockerRegistry}/voltha/voltha-rw-core,voltha.images.rw_core.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'voltha-openolt-adapter' ]; then
-                EXTRA_HELM_FLAGS+="--set voltha-openolt-adapter.images.adapter_open_olt.repository=${dockerRegistry}/voltha/voltha-openolt-adapter,voltha-openolt-adapter.images.adapter_open_olt.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter' ]; then
-                EXTRA_HELM_FLAGS+="--set voltha-openonu-adapter.images.adapter_open_onu.repository=${dockerRegistry}/voltha/voltha-openonu-adapter,voltha-openonu-adapter.images.adapter_open_onu.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter-go' ]; then
-                EXTRA_HELM_FLAGS+="--set voltha-openonu-adapter-go.images.adapter_open_onu_go.repository=${dockerRegistry}/voltha/voltha-openonu-adapter-go,voltha-openonu-adapter-go.images.adapter_open_onu_go.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'ofagent-go' ]; then
-                EXTRA_HELM_FLAGS+="--set voltha.images.ofagent.repository=${dockerRegistry}/voltha/voltha-ofagent-go,ofagent-go.images.ofagent.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'voltha-onos' ]; then
-                EXTRA_HELM_FLAGS+="--set onos-classic.image.repository=${dockerRegistry}/voltha/voltha-onos,onos-classic.image.tag=voltha-scale "
-              fi
-
-              if [ '\$GERRIT_PROJECT' == 'bbsim' ]; then
-                EXTRA_HELM_FLAGS+="--set images.bbsim.repository=${dockerRegistry}/voltha/bbsim,images.bbsim.tag=voltha-scale "
-              fi
-              echo \$EXTRA_HELM_FLAGS
-
-            """).trim()
-
-            def extraHelmFlags = returned_flags
-            // The added space before params.extraHelmFlags is required due to the .trim() above
-            def infraHelmFlags =
-              "--set global.log_level=${logLevel} " +
-              "--set radius.enabled=${withEapol} " +
-              "--set onos-classic.onosSshPort=30115 " +
-              "--set onos-classic.onosApiPort=30120 " +
-              extraHelmFlags + " " + params.extraHelmFlags
-
-            println "Passing the following parameters to the VOLTHA infra deploy: ${infraHelmFlags}."
-
-            // in a released version we always want to use the local version of the helm-charts
-            def localCharts = true
-
-            volthaInfraDeploy([
-              workflow: workflow,
-              infraNamespace: "default",
-              extraHelmFlags: infraHelmFlags,
-              localCharts: localCharts,
-              onosReplica: onosReplicas,
-              atomixReplica: atomixReplicas,
-              kafkaReplica: kafkaReplicas,
-              etcdReplica: etcdReplicas,
-            ])
-
-            stackHelmFlags = " --set onu=${onus},pon=${pons} --set global.log_level=${logLevel.toLowerCase()} "
-            stackHelmFlags += " --set voltha.ingress.enabled=true --set voltha.ingress.enableVirtualHosts=true --set voltha.fullHostnameOverride=voltha.scale1.dev "
-            stackHelmFlags += extraHelmFlags + " " + params.extraHelmFlags
-
-            volthaStackDeploy([
-              bbsimReplica: olts.toInteger(),
-              infraNamespace: "default",
-              volthaNamespace: "default",
-              stackName: "voltha1", // TODO support custom charts
-              workflow: workflow,
-              extraHelmFlags: stackHelmFlags,
-              localCharts: localCharts,
-              adaptersToWait: 0, // no need to wait for adapters, 2.8 is kafka based
-              onosReplica: onosReplicas,
-            ])
-            sh """
-              set +x
-
-              echo -ne "\nWaiting for VOLTHA and ONOS to start..."
-              voltha=\$(kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
-              onos=\$(kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l)
-              while [[ \$voltha != 0 || \$onos != 0 ]]; do
-                sleep 5
-                echo -ne "."
-                voltha=\$(kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
-                onos=\$(kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l)
-              done
-              echo -ne "\nVOLTHA and ONOS pods ready\n"
-              kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l
-              kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l
-            """
-            start_port_forward(olts)
-          }
-        }
-      }
-    }
-    stage('Configuration') {
-      steps {
-        script {
-          def tech_prof_directory = "XGS-PON"
-          sh returnStdout: false, script: """
-          #Setting link discovery
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
-
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
-
-
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 900
-
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
-
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.opencord.olt.impl.Olt provisionDelay 1000
-
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set ${logLevel} org.onosproject
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set ${logLevel} org.opencord
-
-          # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.opencord.cordmcast
-          # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.onosproject.mcast
-          # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.opencord.igmpproxy
-          # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.opencord.olt
-          # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.onosproject.net.flowobjective.impl.FlowObjectiveManager
-
-          kubectl exec \$(kubectl get pods | grep -E "bbsim[0-9]" | awk 'NR==1{print \$1}') -- bbsimctl log ${logLevel.toLowerCase()} false
-
-          # Set Flows/Ports/Meters/Groups poll frequency
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.group.impl.OpenFlowGroupProvider groupPollInterval ${onosGroupInterval}
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.FlowObjectiveManager numThreads ${flowObjWorkerThreads}
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager objectiveTimeoutMs 300000
-
-          if [ ${withFlows} = false ]; then
-            sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 app deactivate org.opencord.olt
-          fi
-
-          if [ '${workflow}' = 'tt' ]; then
-            etcd_container=\$(kubectl get pods --all-namespaces | grep etcd-0 | awk 'NR==1{print \$2}')
-            kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-HSIA.json \$etcd_container:/tmp/hsia.json
-            put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64')
-            kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-VoIP.json \$etcd_container:/tmp/voip.json
-            put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65')
-            kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST-AdditionalBW-NA.json \$etcd_container:/tmp/mcast.json
-            put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66')
-          fi
-
-          if [ ${withPcap} = true ] ; then
-            # Start the tcp-dump in ofagent
-            export OF_AGENT=\$(kubectl get pods -l app=ofagent -o name)
-            kubectl exec \$OF_AGENT -- apk update
-            kubectl exec \$OF_AGENT -- apk add tcpdump
-            kubectl exec \$OF_AGENT -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
-            _TAG=ofagent-tcpdump kubectl exec \$OF_AGENT -- tcpdump -nei eth0 -w out.pcap&
-
-            # Start the tcp-dump in radius
-            export RADIUS=\$(kubectl get pods -l app=radius -o name)
-            kubectl exec \$RADIUS -- apt-get update
-            kubectl exec \$RADIUS -- apt-get install -y tcpdump
-            _TAG=radius-tcpdump kubectl exec \$RADIUS -- tcpdump -w out.pcap&
-
-            # Start the tcp-dump in ONOS
-            for i in \$(seq 0 \$ONOSES); do
-              INSTANCE="onos-onos-classic-\$i"
-              kubectl exec \$INSTANCE -- apt-get update
-              kubectl exec \$INSTANCE -- apt-get install -y tcpdump
-              kubectl exec \$INSTANCE -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
-              _TAG=\$INSTANCE kubectl exec \$INSTANCE -- /usr/bin/tcpdump -nei eth0 port 1812 -w out.pcap&
-            done
-          fi
-          """
-        }
-      }
-    }
-    stage('Load MIB Template') {
-      when {
-        expression {
-          return params.withMibTemplate
-        }
-      }
-      steps {
-        sh """
-        # load MIB template
-        wget https://raw.githubusercontent.com/opencord/voltha-openonu-adapter-go/voltha-2.8/templates/BBSM-12345123451234512345-00000000000001-v1.json
-        cat BBSM-12345123451234512345-BBSM_IMG_00001-v1.json | kubectl exec -it \$(kubectl get pods |grep etcd-0 | awk 'NR==1{print \$1}') -- etcdctl put service/voltha/omci_mibs/go_templates/BBSM/12345123451234512345/BBSM_IMG_00001
-        """
-      }
-    }
-    stage('Run Test') {
-      steps {
-        sh '''
-          mkdir -p $WORKSPACE/RobotLogs
-          cd $WORKSPACE/voltha-system-tests
-          make vst_venv
-        '''
-        sh '''
-          if [ ${withProfiling} = true ] ; then
-            mkdir -p $LOG_FOLDER/pprof
-            echo $PATH
-            #Creating Python script for ONU Detection
-            cat << EOF > $WORKSPACE/pprof.sh
-timestamp() {
-  date +"%T"
-}
-
-i=0
-while [[ true ]]; do
-  ((i++))
-  ts=$(timestamp)
-  go tool pprof -png http://127.0.0.1:6060/debug/pprof/heap > $LOG_FOLDER/pprof/rw-core-heap-\\$i-\\$ts.png
-  go tool pprof -png http://127.0.0.1:6060/debug/pprof/goroutine > $LOG_FOLDER/pprof/rw-core-goroutine-\\$i-\\$ts.png
-  curl -o $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof http://127.0.0.1:6060/debug/pprof/profile?seconds=10
-  go tool pprof -png $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.png
-
-  go tool pprof -png http://127.0.0.1:6061/debug/pprof/heap > $LOG_FOLDER/pprof/openolt-heap-\\$i-\\$ts.png
-  go tool pprof -png http://127.0.0.1:6061/debug/pprof/goroutine > $LOG_FOLDER/pprof/openolt-goroutine-\\$i-\\$ts.png
-  curl -o $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof http://127.0.0.1:6061/debug/pprof/profile?seconds=10
-  go tool pprof -png $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.png
-
-  go tool pprof -png http://127.0.0.1:6062/debug/pprof/heap > $LOG_FOLDER/pprof/ofagent-heap-\\$i-\\$ts.png
-  go tool pprof -png http://127.0.0.1:6062/debug/pprof/goroutine > $LOG_FOLDER/pprof/ofagent-goroutine-\\$i-\\$ts.png
-  curl -o $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof http://127.0.0.1:6062/debug/pprof/profile?seconds=10
-  go tool pprof -png $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.png
-
-  sleep 10
-done
-EOF
-
-            _TAG="pprof"
-            _TAG=$_TAG bash $WORKSPACE/pprof.sh &
-          fi
-        '''
-        timeout(time: 15, unit: 'MINUTES') {
-          sh '''
-            ROBOT_PARAMS="--exitonfailure \
-              -v olt:${olts} \
-              -v pon:${pons} \
-              -v onu:${onus} \
-              -v ONOS_SSH_PORT:30115 \
-              -v ONOS_REST_PORT:30120 \
-              -v workflow:${workflow} \
-              -v withEapol:${withEapol} \
-              -v withDhcp:${withDhcp} \
-              -v withIgmp:${withIgmp} \
-              --noncritical non-critical \
-              -e igmp -e teardown "
-
-            if [ ${withEapol} = false ] ; then
-              ROBOT_PARAMS+="-e authentication "
-            fi
-
-            if [ ${withDhcp} = false ] ; then
-              ROBOT_PARAMS+="-e dhcp "
-            fi
-
-            if [ ${provisionSubscribers} = false ] ; then
-              # if we're not considering subscribers then we don't care about authentication and dhcp
-              ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
-            fi
-
-            if [ ${withFlows} = false ] ; then
-              ROBOT_PARAMS+="-i setup -i activation "
-            fi
-
-            cd $WORKSPACE/voltha-system-tests
-            source ./vst_venv/bin/activate
-            robot -d $WORKSPACE/RobotLogs \
-            $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
-          '''
-        }
-      }
-    }
-  }
-  post {
-    always {
-      // collect result, done in the "post" step so it's executed even in the
-      // event of a timeout in the tests
-      sh '''
-
-        # stop the kail processes
-        list=($APPS_TO_LOG)
-        for app in "${list[@]}"
-        do
-          echo "Stopping logs for: ${app}"
-          _TAG="kail-$app"
-          P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
-          if [ -n "$P_IDS" ]; then
-            echo $P_IDS
-            for P_ID in $P_IDS; do
-              kill -9 $P_ID
-            done
-          fi
-        done
-
-        if [ ${withPcap} = true ] ; then
-          # stop ofAgent tcpdump
-          P_ID="\$(ps e -ww -A | grep "_TAG=ofagent-tcpdump" | grep -v grep | awk '{print \$1}')"
-          if [ -n "\$P_ID" ]; then
-            kill -9 \$P_ID
-          fi
-
-          # stop radius tcpdump
-          P_ID="\$(ps e -ww -A | grep "_TAG=radius-tcpdump" | grep -v grep | awk '{print \$1}')"
-          if [ -n "\$P_ID" ]; then
-            kill -9 \$P_ID
-          fi
-
-          # stop onos tcpdump
-          LIMIT=$(($NUM_OF_ONOS - 1))
-          for i in $(seq 0 $LIMIT); do
-            INSTANCE="onos-onos-classic-$i"
-            P_ID="\$(ps e -ww -A | grep "_TAG=$INSTANCE" | grep -v grep | awk '{print \$1}')"
-            if [ -n "\$P_ID" ]; then
-              kill -9 \$P_ID
-            fi
-          done
-
-          # copy the file
-          export OF_AGENT=$(kubectl get pods -l app=ofagent | awk 'NR==2{print $1}') || true
-          kubectl cp $OF_AGENT:out.pcap $LOG_FOLDER/ofagent.pcap || true
-
-          export RADIUS=$(kubectl get pods -l app=radius | awk 'NR==2{print $1}') || true
-          kubectl cp $RADIUS:out.pcap $LOG_FOLDER/radius.pcap || true
-
-          LIMIT=$(($NUM_OF_ONOS - 1))
-          for i in $(seq 0 $LIMIT); do
-            INSTANCE="onos-onos-classic-$i"
-            kubectl cp $INSTANCE:out.pcap $LOG_FOLDER/$INSTANCE.pcap || true
-          done
-        fi
-
-        cd voltha-system-tests
-        source ./vst_venv/bin/activate || true
-        python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time.txt || true
-        cat $WORKSPACE/execution-time.txt
-      '''
-      sh '''
-        if [ ${withProfiling} = true ] ; then
-          _TAG="pprof"
-          P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
-          if [ -n "$P_IDS" ]; then
-            echo $P_IDS
-            for P_ID in $P_IDS; do
-              kill -9 $P_ID
-            done
-          fi
-        fi
-      '''
-      plot([
-        csvFileName: 'scale-test.csv',
-        csvSeries: [
-          [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-          [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
-        ],
-        group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus})", yaxis: 'Time (s)', useDescr: true
-      ])
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: '**/log*.html',
-        otherFiles: '',
-        outputFileName: '**/output*.xml',
-        outputPath: 'RobotLogs',
-        passThreshold: 100,
-        reportFileName: '**/report*.html',
-        onlyCritical: true,
-        unstableThreshold: 0]);
-      // get all the logs from kubernetes PODs
-      sh returnStdout: false, script: '''
-
-        # store information on running charts
-        helm ls > $LOG_FOLDER/helm-list.txt || true
-
-        # store information on the running pods
-        kubectl get pods -o wide > $LOG_FOLDER/pods.txt || true
-        kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
-        kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
-
-        # copy the ONOS logs directly from the container to avoid the color codes
-        printf '%s\n' $(kubectl get pods -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
-
-        # get ONOS cfg from the 3 nodes
-        # kubectl exec -t voltha-infra-onos-classic-0 -- sh /root/onos/apache-karaf-4.2.9/bin/client cfg get > ~/voltha-infra-onos-classic-0-cfg.txt || true
-        # kubectl exec -t voltha-infra-onos-classic-1 -- sh /root/onos/apache-karaf-4.2.9/bin/client cfg get > ~/voltha-infra-onos-classic-1-cfg.txt || true
-        # kubectl exec -t voltha-infra-onos-classic-2 -- sh /root/onos/apache-karaf-4.2.9/bin/client cfg get > ~/voltha-infra-onos-classic-2-cfg.txt || true
-
-        # kubectl exec -t voltha-infra-onos-classic-0 -- sh /root/onos/apache-karaf-4.2.9/bin/client obj-next-ids > ~/voltha-infra-onos-classic-0-next-objs.txt || true
-        # kubectl exec -t voltha-infra-onos-classic-1 -- sh /root/onos/apache-karaf-4.2.9/bin/client obj-next-ids > ~/voltha-infra-onos-classic-1-next-objs.txt || true
-        # kubectl exec -t voltha-infra-onos-classic-2 -- sh /root/onos/apache-karaf-4.2.9/bin/client obj-next-ids > ~/voltha-infra-onos-classic-2-next-objs.txt || true
-
-        # get radius logs out of the container
-        kubectl cp $(kubectl get pods -l app=radius --no-headers  | awk '{print $1}'):/var/log/freeradius/radius.log $LOG_FOLDER/radius.log || true
-      '''
-      // dump all the BBSim(s) ONU information
-      sh '''
-      BBSIM_IDS=$(kubectl get pods | grep bbsim | grep -v server | awk '{print $1}')
-      IDS=($BBSIM_IDS)
-
-      for bbsim in "${IDS[@]}"
-      do
-        kubectl exec -t $bbsim -- bbsimctl onu list > $LOG_FOLDER/$bbsim-device-list.txt || true
-        kubectl exec -t $bbsim -- bbsimctl service list > $LOG_FOLDER/$bbsim-service-list.txt || true
-        kubectl exec -t $bbsim -- bbsimctl olt resources GEM_PORT > $LOG_FOLDER/$bbsim-flows-gem-ports.txt || true
-        kubectl exec -t $bbsim -- bbsimctl olt resources ALLOC_ID > $LOG_FOLDER/$bbsim-flows-alloc-ids.txt || true
-        kubectl exec -t $bbsim -- bbsimctl olt pons > $LOG_FOLDER/$bbsim-pon-resources.txt || true
-      done
-      '''
-      script {
-        // first make sure the port-forward is still running,
-        // sometimes Jenkins kills it relardless of the JENKINS_NODE_COOKIE=dontKillMe
-        def running = sh (
-            script: 'ps aux | grep port-forw | grep -E "onos|voltha" | grep -v grep | wc -l',
-            returnStdout: true
-        ).trim()
-        // if any of the voltha-api, onos-rest, onos-ssh port-forwards are not there
-        // kill all and restart
-        if (running != "3") {
-          start_port_forward(olts)
-        }
-      }
-      // get ONOS debug infos
-      sh '''
-
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg get > $LOG_FOLDER/onos-cfg.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 netcfg > $LOG_FOLDER/onos-netcfg.txt
-
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt
-        sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt
-
-        if [ ${withFlows} = true ] ; then
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt
-        fi
-
-        if [ ${provisionSubscribers} = true ]; then
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt
-        fi
-
-        if [ ${withEapol} = true ] ; then
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt
-        fi
-
-        if [ ${withDhcp} = true ] ; then
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt
-        fi
-
-        if [ ${withIgmp} = true ] ; then
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 mcast-host-routes > $LOG_FOLDER/onos-mcast-host-routes.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 mcast-host-show > $LOG_FOLDER/onos-mcast-host-show.txt
-          sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 groups > $LOG_FOLDER/onos-groups.txt
-        fi
-      '''
-      // collect etcd metrics
-      sh '''
-        mkdir -p $WORKSPACE/etcd-metrics
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-backend-write-time.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-backend-write-time-sum.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_bucket' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-backend-write-time-bucket.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_wal_fsync_duration_seconds_bucket' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-wal-fsync-time-bucket.json || true
-        curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_network_peer_round_trip_time_seconds_bucket' | jq '.data'  > $WORKSPACE/etcd-metrics/etcd-network-peer-round-trip-time-seconds.json || true
-        etcd_namespace=\$(kubectl get pods --all-namespaces | grep etcd-0 | awk 'NR==1{print \$1}')
-        etcd_container=\$(kubectl get pods --all-namespaces | grep etcd-0 | awk 'NR==1{print \$2}')
-        kubectl exec -it -n  \$etcd_namespace \$etcd_container -- etcdctl defrag --cluster || true
-        kubectl exec -it -n  \$etcd_namespace \$etcd_container -- etcdctl endpoint status -w table > $WORKSPACE/etcd-metrics/etcd-status-table.txt || true
-
-      '''
-      // get VOLTHA debug infos
-      script {
-        try {
-          sh '''
-          voltctl -m 8MB device list -o json > $LOG_FOLDER/device-list.json || true
-          python -m json.tool $LOG_FOLDER/device-list.json > $LOG_FOLDER/voltha-devices-list.json || true
-          rm $LOG_FOLDER/device-list.json || true
-          voltctl -m 8MB device list > $LOG_FOLDER/voltha-devices-list.txt || true
-
-          printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device flows # > $LOG_FOLDER/voltha-device-flows-#.txt" || true
-              printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/voltha-device-ports-#.txt" || true
-
-          printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice flows # > $LOG_FOLDER/voltha-logicaldevice-flows-#.txt" || true
-          printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice port list # > $LOG_FOLDER/voltha-logicaldevice-ports-#.txt" || true
-          '''
-        } catch(e) {
-          sh '''
-          echo "Can't get device list from voltclt"
-          '''
-        }
-      }
-      // get cpu usage by container
-      sh '''
-      if [ ${withMonitoring} = true ] ; then
-        cd $WORKSPACE/voltha-system-tests
-        source ./vst_venv/bin/activate || true
-        sleep 60 # we have to wait for prometheus to collect all the information
-        python scripts/sizing.py -o $WORKSPACE/plots || true
-      fi
-      '''
-      archiveArtifacts artifacts: 'execution-time.txt,logs/*,logs/pprof/*,RobotLogs/**/*,plots/*,etcd-metrics/*'
-    }
-  }
-}
-
-def start_port_forward(olts) {
-  sh """
-  bbsimRestPortFwd=50071
-  for i in {0..${olts.toInteger() - 1}}; do
-    daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward --address 0.0.0.0 -n default svc/bbsim\${i} \${bbsimRestPortFwd}:50071
-    ((bbsimRestPortFwd++))
-  done
-  """
-}
diff --git a/jjb/pipeline/voltha/voltha-2.8/voltha-tt-physical-functional-tests.groovy b/jjb/pipeline/voltha/voltha-2.8/voltha-tt-physical-functional-tests.groovy
deleted file mode 100644
index 40b61fa..0000000
--- a/jjb/pipeline/voltha/voltha-2.8/voltha-tt-physical-functional-tests.groovy
+++ /dev/null
@@ -1,233 +0,0 @@
-#!/usr/bin/env groovy
-// -----------------------------------------------------------------------
-// Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// -----------------------------------------------------------------------
-
-library identifier: 'cord-jenkins-libraries@master',
-    retriever: modernSCM([
-      $class: 'GitSCMSource',
-      remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-node {
-  // Need this so that deployment_config has global scope when it's read later
-  deployment_config = null
-}
-
-def volthaNamespace = "voltha"
-def infraNamespace = "infra"
-
-pipeline {
-  /* no label, executor is determined by JJB */
-  agent {
-    label "${params.buildNode}"
-  }
-  options {
-    timeout(time: "${timeout}", unit: 'MINUTES')
-  }
-
-  environment {
-    KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
-    VOLTCONFIG="$HOME/.volt/config-minimal"
-    PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-  }
-
-  stages {
-    stage('Clone voltha-system-tests') {
-      steps {
-        step([$class: 'WsCleanup'])
-        checkout([
-          $class: 'GitSCM',
-          userRemoteConfigs: [[
-            url: "https://gerrit.opencord.org/voltha-system-tests",
-            refspec: "${volthaSystemTestsChange}"
-          ]],
-          branches: [[ name: "${branch}", ]],
-          extensions: [
-            [$class: 'WipeWorkspace'],
-            [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
-            [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
-          ],
-        ])
-        script {
-          sh(script:"""
-            if [ '${volthaSystemTestsChange}' != '' ] ; then
-              cd $WORKSPACE/voltha-system-tests;
-              git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
-            fi
-            """)
-        }
-      }
-    }
-    // This checkout allows us to show changes in Jenkins
-    // we only do this on master as we don't branch all the repos for all the releases
-    // (we should compute the difference by tracking the container version, not the code)
-    stage('Download All the VOLTHA repos') {
-      when {
-        expression {
-          return "${branch}" == 'master';
-        }
-      }
-      steps {
-       checkout(changelog: true,
-         poll: false,
-         scm: [$class: 'RepoScm',
-           manifestRepositoryUrl: "${params.manifestUrl}",
-           manifestBranch: "${params.branch}",
-           currentBranch: true,
-           destinationDir: 'voltha',
-           forceSync: true,
-           resetFirst: true,
-           quiet: true,
-           jobs: 4,
-           showAllChanges: true]
-         )
-      }
-    }
-    stage ('Initialize') {
-      steps {
-        sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
-        script {
-           deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-        }
-        installVoltctl("${branch}")
-        sh returnStdout: false, script: """
-        mkdir -p "$WORKSPACE/bin"
-
-        # install kail
-        make -C "$WORKSPACE/voltha-system-tests" KAIL_PATH="$WORKSPACE/bin" kail
-
-        # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
-        # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
-        # We should change this. In the meantime here is a workaround.
-        if [ "${params.branch}" == "master" ]; then
-           set +e
-
-        # Remove noise from voltha-core logs
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
-           voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        # Remove noise from openolt logs
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
-           voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
-        fi
-        """
-      }
-    }
-
-    stage('Functional Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-        ROBOT_FILE="Voltha_TT_PODTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/FunctionalTests"
-      }
-      steps {
-        sh """
-        mkdir -p "$ROBOT_LOGS_DIR"
-        if ( ${powerSwitch} ); then
-             export ROBOT_MISC_ARGS="--removekeywords wuks -i functionalTT -i PowerSwitch -i sanityTT -i sanityTT-MCAST -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        else
-             export ROBOT_MISC_ARGS="--removekeywords wuks -i functionalTT -e PowerSwitch -i sanityTT -i sanityTT-MCAST -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-        fi
-        ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-        make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
-        """
-      }
-    }
-
-    stage('Failure/Recovery Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-        ROBOT_FILE="Voltha_TT_FailureScenarios.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/FailureScenarios"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.enableMultiUni} = false ]; then
-          if ( ${powerSwitch} ); then
-               export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          else
-               export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
-          fi
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
-        fi
-        """
-      }
-    }
-
-    stage('Multi-Tcont Tests') {
-      environment {
-        ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
-        ROBOT_FILE="Voltha_TT_MultiTcontTests.robot"
-        ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/MultiTcontScenarios"
-        ROBOT_TEST_INPUT_FILE="$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-TT-multi-tcont-tests-input.yaml"
-      }
-      steps {
-        sh """
-        mkdir -p $ROBOT_LOGS_DIR
-        if [ ${params.enableMultiUni} = false ]; then
-          if ( ${powerSwitch} ); then
-            export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel} -V $ROBOT_TEST_INPUT_FILE"
-          else
-            export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel} -V $ROBOT_TEST_INPUT_FILE"
-          fi
-          ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
-          make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
-        fi
-        """
-      }
-    }
-
-  }
-  post {
-    always {
-      getPodsInfo("$WORKSPACE/pods")
-      sh returnStdout: false, script: '''
-      set +e
-
-      # collect logs collected in the Robot Framework StartLogging keyword
-      cd $WORKSPACE
-      gzip *-combined.log || true
-      rm *-combined.log || true
-      '''
-      script {
-        deployment_config.olts.each { olt ->
-          if (olt.type == null || olt.type == "" || olt.type == "openolt") {
-             sh returnStdout: false, script: """
-             sshpass -p ${olt.pass} scp ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
-             sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log  # Remove escape sequences
-             sshpass -p ${olt.pass} scp ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
-             sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log  # Remove escape sequences
-             """
-          }
-        }
-      }
-      step([$class: 'RobotPublisher',
-        disableArchiveOutput: false,
-        logFileName: '**/log*.html',
-        otherFiles: '',
-        outputFileName: '**/output*.xml',
-        outputPath: 'RobotLogs',
-        passThreshold: 100,
-        reportFileName: '**/report*.html',
-        unstableThreshold: 0,
-        onlyCritical: true
-        ]);
-      archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
-    }
-  }
-}
diff --git a/jjb/pipeline/voltha/master/voltha-dt-physical-functional-tests.groovy b/jjb/pipeline/voltha/voltha-dt-physical-functional-tests.groovy
similarity index 100%
rename from jjb/pipeline/voltha/master/voltha-dt-physical-functional-tests.groovy
rename to jjb/pipeline/voltha/voltha-dt-physical-functional-tests.groovy
diff --git a/jjb/pipeline/voltha/master/voltha-physical-functional-tests.groovy b/jjb/pipeline/voltha/voltha-physical-functional-tests.groovy
similarity index 100%
rename from jjb/pipeline/voltha/master/voltha-physical-functional-tests.groovy
rename to jjb/pipeline/voltha/voltha-physical-functional-tests.groovy
diff --git a/jjb/pipeline/voltha/master/voltha-physical-soak-dt-tests.groovy b/jjb/pipeline/voltha/voltha-physical-soak-dt-tests.groovy
similarity index 100%
rename from jjb/pipeline/voltha/master/voltha-physical-soak-dt-tests.groovy
rename to jjb/pipeline/voltha/voltha-physical-soak-dt-tests.groovy
diff --git a/jjb/pipeline/voltha/master/voltha-scale-lwc-test.groovy b/jjb/pipeline/voltha/voltha-scale-lwc-test.groovy
similarity index 100%
rename from jjb/pipeline/voltha/master/voltha-scale-lwc-test.groovy
rename to jjb/pipeline/voltha/voltha-scale-lwc-test.groovy
diff --git a/jjb/pipeline/voltha/master/voltha-scale-multi-stack.groovy b/jjb/pipeline/voltha/voltha-scale-multi-stack.groovy
similarity index 100%
rename from jjb/pipeline/voltha/master/voltha-scale-multi-stack.groovy
rename to jjb/pipeline/voltha/voltha-scale-multi-stack.groovy
diff --git a/jjb/pipeline/voltha/master/voltha-scale-test.groovy b/jjb/pipeline/voltha/voltha-scale-test.groovy
similarity index 100%
rename from jjb/pipeline/voltha/master/voltha-scale-test.groovy
rename to jjb/pipeline/voltha/voltha-scale-test.groovy
diff --git a/jjb/pipeline/voltha/master/voltha-tt-physical-functional-tests.groovy b/jjb/pipeline/voltha/voltha-tt-physical-functional-tests.groovy
similarity index 100%
rename from jjb/pipeline/voltha/master/voltha-tt-physical-functional-tests.groovy
rename to jjb/pipeline/voltha/voltha-tt-physical-functional-tests.groovy
diff --git a/jjb/software-upgrades.yaml b/jjb/software-upgrades.yaml
index f23b929..f8df3f2 100644
--- a/jjb/software-upgrades.yaml
+++ b/jjb/software-upgrades.yaml
@@ -10,7 +10,7 @@
     jobs:
       - 'software-upgrades-test-master':
           name: 'periodic-software-upgrade-test-bbsim'
-          pipeline-script: 'voltha/master/software-upgrades.groovy'
+          pipeline-script: 'voltha/software-upgrades.groovy'
           pipeline-branch: 'master'
           build-node: 'ubuntu18.04-basebuild-8c-15g'
           code-branch: 'master'
diff --git a/jjb/verify/device-management.yaml b/jjb/verify/device-management.yaml
index e6322b5..258e9a7 100644
--- a/jjb/verify/device-management.yaml
+++ b/jjb/verify/device-management.yaml
@@ -21,7 +21,7 @@
           junit-allow-empty-results: true
           build-node: 'ubuntu18.04-basebuild-2c-4g'
       - 'device-management-patch-test':
-          pipeline-script: 'voltha/master/device-management-mock-tests.groovy'
+          pipeline-script: 'voltha/device-management-mock-tests.groovy'
 
 - job-group:
     name: 'publish-device-management-jobs'
diff --git a/jjb/voltha-e2e.yaml b/jjb/voltha-e2e.yaml
index 22add45..03a0b68 100755
--- a/jjb/voltha-e2e.yaml
+++ b/jjb/voltha-e2e.yaml
@@ -2092,7 +2092,7 @@
 - job-template:
     id: 'voltha-periodic-test'
     name: '{name}'
-    pipeline-script: 'voltha/master/bbsim-tests.groovy'
+    pipeline-script: 'voltha/bbsim-tests.groovy'
     pipeline-branch: 'master'
     build-node: 'ubuntu18.04-basebuild-8c-15g'
     robot-args: ''
@@ -2252,7 +2252,7 @@
     id: 'voltha-patch-test'
     name: 'verify_{project}_sanity-test{name-extension}'
     build-node: 'ubuntu18.04-basebuild-4c-8g'
-    pipeline-script: 'voltha/master/bbsim-tests.groovy'
+    pipeline-script: 'voltha/bbsim-tests.groovy'
     pipeline-branch: 'master'
     override-branch: '$GERRIT_BRANCH'
     sandbox: true
@@ -2582,7 +2582,7 @@
                   Created by Andy Bavier, andy@opennetworking.org <br />
                   Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
     sandbox: true
-    pipeline-script: 'voltha/master/tucson-build-and-test.groovy'
+    pipeline-script: 'voltha/tucson-build-and-test.groovy'
     pipeline-branch: 'master'
     default-test-args: '-i sanityORDeleteOLT -i PowerSwitch -X'
     build-node: 'tucson-pod'
@@ -2647,7 +2647,7 @@
     config-pod: 'tucson-pod'
     oltDebVersionMaster: 'openolt_asfvolt16-3.7.4-3b190f027136e845c5850a5b1a97897ce2b74ebf-40G-NNI.deb'
     oltDebVersionVoltha23: 'openolt_asfvolt16-3.4.9-e2a9597f3d690fe3a0ea0df244571dfc9e8c2833-40G-NNI.deb'
-    pipeline-script: 'voltha/master/tucson-build-and-test.groovy'
+    pipeline-script: 'voltha/tucson-build-and-test.groovy'
     pipeline-branch: 'master'
     trigger-string: 'hardware test'
     default-test-args: '-i sanityORDeleteOLT -i PowerSwitch -X'
diff --git a/jjb/voltha-e2e/master.yaml b/jjb/voltha-e2e/master.yaml
index 0710265..d29f779 100644
--- a/jjb/voltha-e2e/master.yaml
+++ b/jjb/voltha-e2e/master.yaml
@@ -85,7 +85,7 @@
           # 20230828 - while master branch did not (?)  Script does
           # 20230828 - show in the jenkins UI but w/o updates. 
           # ---------------------------------------------------------
-          pipeline-script: 'voltha/master/bbsim-tests.groovy'
+          pipeline-script: 'voltha/bbsim-tests.groovy'
           pipeline-branch: 'master'
           time-trigger: "H H/23 * * *"         # Build daily at 11pm
 #          time-trigger: "H/30 * * * *"        # Build every 30 min
@@ -154,7 +154,7 @@
       - 'voltha-periodic-test':
           name: 'periodic-voltha-dt-test-bbsim-master'
           build-node: 'ubuntu18.04-basebuild-4c-8g'
-          pipeline-script: 'voltha/master/bbsim-tests.groovy'
+          pipeline-script: 'voltha/bbsim-tests.groovy'
           pipeline-branch: 'master'
           code-branch: 'master'
           time-trigger: "@daily"
diff --git a/jjb/voltha-scale.yaml b/jjb/voltha-scale.yaml
index e970d1b..6b41b15 100644
--- a/jjb/voltha-scale.yaml
+++ b/jjb/voltha-scale.yaml
@@ -30,7 +30,7 @@
       - 'voltha-scale-measurements':
           name: 'voltha-scale-measurements-master-experimental-multi-stack'
           build-node: 'voltha-scale-2'
-          pipeline-script: 'voltha/master/voltha-scale-multi-stack.groovy'
+          pipeline-script: 'voltha/voltha-scale-multi-stack.groovy'
           pipeline-branch: 'master'
           # trigger on Feb 29th (a.k.a only trigger it manually)
           time-trigger: "H 0 29 2 *"
@@ -223,7 +223,7 @@
       - 'voltha-scale-measurements':
           name: 'voltha-scale-measurements-master-10-stacks-2-16-32-att-subscribers'
           build-node: 'voltha-scale-2'
-          pipeline-script: 'voltha/master/voltha-scale-multi-stack.groovy'
+          pipeline-script: 'voltha/voltha-scale-multi-stack.groovy'
           pipeline-branch: 'master'
           time-trigger: "H H/4 * * *"
           disable-job: true
@@ -247,7 +247,7 @@
       - 'voltha-scale-measurements':
           name: 'voltha-scale-measurements-master-10-stacks-2-16-32-dt-subscribers'
           build-node: 'voltha-scale-2'
-          pipeline-script: 'voltha/master/voltha-scale-multi-stack.groovy'
+          pipeline-script: 'voltha/voltha-scale-multi-stack.groovy'
           pipeline-branch: 'master'
           time-trigger: "H H/4 * * *"
           disable-job: true
@@ -271,7 +271,7 @@
       - 'voltha-scale-measurements':
           name: 'voltha-scale-measurements-master-10-stacks-2-16-32-tt-subscribers'
           build-node: 'voltha-scale-2'
-          pipeline-script: 'voltha/master/voltha-scale-multi-stack.groovy'
+          pipeline-script: 'voltha/voltha-scale-multi-stack.groovy'
           pipeline-branch: 'master'
           time-trigger: "H H/4 * * *"
           disable-job: true
@@ -910,7 +910,7 @@
       # LWC pipeline
       - 'voltha-scale-measurements':
           name: 'voltha-scale-measurements-lwc-dt-512'
-          pipeline-script: 'voltha/master/voltha-scale-lwc-test.groovy'
+          pipeline-script: 'voltha/voltha-scale-lwc-test.groovy'
           pipeline-branch: 'master'
           build-node: 'berlin-community-pod-2'
           time-trigger: "H H * * *"
@@ -930,7 +930,7 @@
 
       - 'voltha-scale-measurements':
           name: 'voltha-scale-measurements-lwc-dt-256'
-          pipeline-script: 'voltha/master/voltha-scale-lwc-test.groovy'
+          pipeline-script: 'voltha/voltha-scale-lwc-test.groovy'
           pipeline-branch: 'master'
           build-node: 'berlin-community-pod-2'
           time-trigger: "H H * * *"
@@ -1214,7 +1214,7 @@
           jenkins-ssh-credential: '{jenkins-ssh-credential}'
 
     # default values
-    pipeline-script: 'voltha/master/voltha-scale-test.groovy'
+    pipeline-script: 'voltha/voltha-scale-test.groovy'
     pipeline-branch: 'master'
     release: master
 
diff --git a/jjb/voltha-test/voltha-nightly-jobs.yaml b/jjb/voltha-test/voltha-nightly-jobs.yaml
index 1a5d03a..21a702a 100644
--- a/jjb/voltha-test/voltha-nightly-jobs.yaml
+++ b/jjb/voltha-test/voltha-nightly-jobs.yaml
@@ -226,7 +226,7 @@
 
     <<: *voltha-pipe-job-boiler-plate
     # default values
-    pipeline-script: 'voltha/master/physical-build.groovy'
+    pipeline-script: 'voltha/physical-build.groovy'
     VolthaEtcdPort: 2379
     num-of-openonu: 1
     num-of-onos: 1
@@ -277,7 +277,7 @@
 
     <<: *voltha-pipe-job-boiler-plate
     # default values
-    pipeline-script: 'voltha/master/physical-build.groovy'
+    pipeline-script: 'voltha/physical-build.groovy'
     VolthaEtcdPort: 2379
     num-of-openonu: 1
     num-of-onos: 3
@@ -390,7 +390,7 @@
                   Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
 
     <<: *voltha-pipe-job-boiler-plate
-    pipeline-script: 'voltha/master/voltha-physical-functional-tests.groovy'
+    pipeline-script: 'voltha/voltha-physical-functional-tests.groovy'
     pipeline-branch: 'master'
     manifest-url: 'https://gerrit.opencord.org/voltha-test-manifest.git'
     manifest-branch: 'master'
@@ -695,7 +695,7 @@
                   Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
 
     <<: *voltha-pipe-job-boiler-plate
-    pipeline-script: 'voltha/master/voltha-physical-functional-tests.groovy'
+    pipeline-script: 'voltha/voltha-physical-functional-tests.groovy'
     pipeline-branch: 'master'
     manifest-url: 'https://gerrit.opencord.org/voltha-test-manifest.git'
     manifest-branch: 'master'
@@ -841,7 +841,7 @@
                   Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
 
     <<: *voltha-pipe-job-boiler-plate
-    pipeline-script: 'voltha/master/voltha-physical-functional-tests.groovy'
+    pipeline-script: 'voltha/voltha-physical-functional-tests.groovy'
     pipeline-branch: 'master'
     manifest-url: 'https://gerrit.opencord.org/voltha-test-manifest.git'
     manifest-branch: 'master'
@@ -987,7 +987,7 @@
 
     <<: *voltha-pipe-job-boiler-plate
     # default values
-    pipeline-script: 'voltha/master/dmi-build-and-test.groovy'
+    pipeline-script: 'voltha/dmi-build-and-test.groovy'
     installVolthaInfra: true
     installVolthaStack: true
     VolthaEtcdPort: 2379
diff --git a/jjb/voltha-test/voltha.yaml b/jjb/voltha-test/voltha.yaml
index 335e7b6..55ffd2e 100644
--- a/jjb/voltha-test/voltha.yaml
+++ b/jjb/voltha-test/voltha.yaml
@@ -54,7 +54,7 @@
           test-repo: 'voltha-system-tests'
           profile: '1T8GEM'
           power-switch: true
-          pipeline-script: 'voltha/master/voltha-dt-physical-functional-tests.groovy'
+          pipeline-script: 'voltha/voltha-dt-physical-functional-tests.groovy'
 
       # Berlin pod with gpon olt/onu - master 1T8GEM tech profile and timer based job
       - 'build_voltha_pod_release_timer':
@@ -83,7 +83,7 @@
           test-repo: 'voltha-system-tests'
           profile: '1T8GEM'
           power-switch: true
-          pipeline-script: 'voltha/master/voltha-dt-physical-functional-tests.groovy'
+          pipeline-script: 'voltha/voltha-dt-physical-functional-tests.groovy'
 
       # Berlin pod with gpon olt/onu - master 1T8GEM tech profile and timer based job
       - 'build_voltha_pod_release_timer':
@@ -240,7 +240,7 @@
           test-repo: 'voltha-system-tests'
           profile: '1T4GEM-FTTB'
           power-switch: true
-          pipeline-script: 'voltha/master/voltha-dt-physical-functional-tests.groovy'
+          pipeline-script: 'voltha/voltha-dt-physical-functional-tests.groovy'
 
       # Berlin pod with gpon olt/onu - 2.11 1T4GEM tech profile and timer based job
       - 'build_voltha_pod_release_timer':
@@ -325,7 +325,7 @@
           reinstall-olt: true
           num-of-onos: '3'
           num-of-atomix: '3'
-          pipeline-script: 'voltha/master/physical-build.groovy'
+          pipeline-script: 'voltha/physical-build.groovy'
           VolthaEtcdPort: 9999
           waitTimerForOltUp: 360
           time: '1'
@@ -343,7 +343,7 @@
           test-repo: 'voltha-system-tests'
           profile: '1T8GEM'
           power-switch: true
-          pipeline-script: 'voltha/master/voltha-dt-physical-functional-tests.groovy'
+          pipeline-script: 'voltha/voltha-dt-physical-functional-tests.groovy'
 
       # Berlin pod with zyxel xgspon olt/onu - master 1T8GEM tech profile and timer based job
       - 'build_voltha_pod_release_timer':
@@ -358,7 +358,7 @@
           reinstall-olt: true
           num-of-onos: '3'
           num-of-atomix: '3'
-          pipeline-script: 'voltha/master/physical-build.groovy'
+          pipeline-script: 'voltha/physical-build.groovy'
           VolthaEtcdPort: 9999
           waitTimerForOltUp: 360
           profile: '1T8GEM'
@@ -377,7 +377,7 @@
           test-repo: 'voltha-system-tests'
           profile: '1T8GEM'
           power-switch: true
-          pipeline-script: 'voltha/master/voltha-dt-physical-functional-tests.groovy'
+          pipeline-script: 'voltha/voltha-dt-physical-functional-tests.groovy'
 
       # TT workflow on Zyxel XGSPON OLT at Berlin pod - build job
       - 'build_voltha_pod_release_timer':
@@ -407,7 +407,7 @@
           work-flow: 'TT'
           power-switch: true
           power-cycle-olt: true
-          pipeline-script: 'voltha/master/voltha-tt-physical-functional-tests.groovy'
+          pipeline-script: 'voltha/voltha-tt-physical-functional-tests.groovy'
           test-repo: 'voltha-system-tests'
           profile: 'TP'
           timeout: 360
@@ -440,7 +440,7 @@
           work-flow: 'TT'
           power-switch: true
           power-cycle-olt: false
-          pipeline-script: 'voltha/master/voltha-tt-physical-functional-tests.groovy'
+          pipeline-script: 'voltha/voltha-tt-physical-functional-tests.groovy'
           test-repo: 'voltha-system-tests'
           profile: 'TP'
           timeout: 360