VOL-4926 - godownloader no longer available.
jjb/pipeline/voltha/voltha-2.9
jjb/pipeline/voltha/voltha-2.10
jjb/pipeline/voltha/voltha-2.11
-------------------------------
o Remove prototype versioned pipelines: 2.8, 2.9 & 2.10 are not needed.
jjb/pipeline/voltha/master/bbsim-tests.groovy
---------------------------------------------
o prototype: attempt to install kail, we may still need to checkout voltha-system-tests.
Change-Id: Ia566077fa511c7dfb6494dd076d58c04a791c10e
diff --git a/jjb/pipeline/voltha/master/bbsim-tests.groovy b/jjb/pipeline/voltha/master/bbsim-tests.groovy
old mode 100755
new mode 100644
index c24a586..ffa516c
--- a/jjb/pipeline/voltha/master/bbsim-tests.groovy
+++ b/jjb/pipeline/voltha/master/bbsim-tests.groovy
@@ -1,4 +1,4 @@
-// Copyright 2021-2022 Open Networking Foundation (ONF) and the ONF Contributors
+// Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -24,9 +24,9 @@
def clusterName = "kind-ci"
def execute_test(testTarget, workflow, testLogging, teardown, testSpecificHelmFlags = "") {
- def infraNamespace = "default"
- def volthaNamespace = "voltha"
- def logsDir = "$WORKSPACE/${testTarget}"
+ def infraNamespace = "default"
+ def volthaNamespace = "voltha"
+ def logsDir = "$WORKSPACE/${testTarget}"
stage('IAM')
{
@@ -54,22 +54,36 @@
}
stage('Cleanup') {
- if (teardown) {
- timeout(15) {
- script {
- helmTeardown(["default", infraNamespace, volthaNamespace])
- }
- timeout(1) {
- sh returnStdout: false, script: '''
+ if (teardown) {
+ timeout(15) {
+ script {
+ helmTeardown(["default", infraNamespace, volthaNamespace])
+ }
+ timeout(1) {
+ sh returnStdout: false, script: '''
# remove orphaned port-forward from different namespaces
ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
'''
- }
- }
+ }
+ }
+ }
}
- }
- stage('Deploy common infrastructure') {
- sh '''
+
+ stage ('Initialize')
+ {
+ // VOL-4926 - Is voltha-system-tests available ?
+ String cmd = [
+ 'make',
+ '-C', "$WORKSPACE/voltha-system-tests",
+ "KAIL_PATH=\"$WORKSPACE/bin\"",
+ 'kail',
+ ].join('/')
+ println(" ** Running: ${cmd}:\n")
+ sh("${cmd} || true")
+ }
+
+ stage('Deploy common infrastructure') {
+ sh '''
helm repo add onf https://charts.opencord.org
helm repo update
if [ ${withMonitoring} = true ] ; then
@@ -78,8 +92,9 @@
--set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
fi
'''
- }
- stage('Deploy Voltha') {
+ }
+
+ stage('Deploy Voltha') {
if (teardown) {
timeout(10) {
script {
@@ -163,11 +178,12 @@
}
}
}
+
stage('Run test ' + testTarget + ' on ' + workflow + ' workFlow') {
sh """
if [ ${withMonitoring} = true ] ; then
- mkdir -p $WORKSPACE/voltha-pods-mem-consumption-${workflow}
- cd $WORKSPACE/voltha-system-tests
+ mkdir -p "$WORKSPACE/voltha-pods-mem-consumption-${workflow}"
+ cd "$WORKSPACE/voltha-system-tests"
make vst_venv
source ./vst_venv/bin/activate || true
# Collect initial memory consumption
@@ -180,7 +196,7 @@
ROBOT_MISC_ARGS+="-v ONOS_SSH_PORT:30115 -v ONOS_REST_PORT:30120 -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace} -v container_log_dir:${logsDir} -v logging:${testLogging}"
export KVSTOREPREFIX=voltha/voltha_voltha
- make -C $WORKSPACE/voltha-system-tests ${testTarget} || true
+ make -C "$WORKSPACE/voltha-system-tests" ${testTarget} || true
"""
getPodsInfo("${logsDir}")
sh """
@@ -192,7 +208,7 @@
"""
sh """
if [ ${withMonitoring} = true ] ; then
- cd $WORKSPACE/voltha-system-tests
+ cd "$WORKSPACE/voltha-system-tests"
source ./vst_venv/bin/activate || true
# Collect memory consumption of voltha pods once all the tests are complete
python scripts/mem_consumption.py -o $WORKSPACE/voltha-pods-mem-consumption-${workflow} -a 0.0.0.0:31301 -n ${volthaNamespace} || true
diff --git a/jjb/pipeline/voltha/voltha-2.10/bbsim-tests.groovy b/jjb/pipeline/voltha/voltha-2.10/bbsim-tests.groovy
deleted file mode 100755
index 98b0b23..0000000
--- a/jjb/pipeline/voltha/voltha-2.10/bbsim-tests.groovy
+++ /dev/null
@@ -1,332 +0,0 @@
-// Copyright 2021-2022 Open Networking Foundation (ONF) and the ONF Contributors//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// voltha-2.x e2e tests for openonu-go
-// uses bbsim to simulate OLT/ONUs
-
-library identifier: 'cord-jenkins-libraries@master',
- retriever: modernSCM([
- $class: 'GitSCMSource',
- remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-def clusterName = "kind-ci"
-
-def execute_test(testTarget, workflow, testLogging, teardown, testSpecificHelmFlags = "") {
- def infraNamespace = "default"
- def volthaNamespace = "voltha"
- def logsDir = "$WORKSPACE/${testTarget}"
-
- stage('IAM')
- {
- script
- {
- String iam = [
- 'ci-management',
- 'jjb',
- 'pipeline',
- 'voltha',
- 'voltha-2.10',
- 'bbsim-tests.groovy'
- ].join('/')
- println("** ${iam}: ENTER")
- println("** ${iam}: LEAVE")
- }
- }
-
- stage('Cleanup') {
- if (teardown) {
- timeout(15) {
- script {
- helmTeardown(["default", infraNamespace, volthaNamespace])
- }
- timeout(1) {
- sh returnStdout: false, script: '''
- # remove orphaned port-forward from different namespaces
- ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
- '''
- }
- }
- }
- }
- stage('Deploy common infrastructure') {
- sh '''
- helm repo add onf https://charts.opencord.org
- helm repo update
- if [ ${withMonitoring} = true ] ; then
- helm install nem-monitoring onf/nem-monitoring \
- --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
- --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
- fi
- '''
- }
- stage('Deploy Voltha') {
- if (teardown) {
- timeout(10) {
- script {
-
- sh """
- mkdir -p ${logsDir}
- _TAG=kail-startup kail -n ${infraNamespace} -n ${volthaNamespace} > ${logsDir}/onos-voltha-startup-combined.log &
- """
-
- // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
- def localCharts = false
- if (volthaHelmChartsChange != "" || gerritProject == "voltha-helm-charts") {
- localCharts = true
- }
-
- // NOTE temporary workaround expose ONOS node ports
- def localHelmFlags = extraHelmFlags.trim() + " --set global.log_level=${logLevel.toUpperCase()} " +
- " --set onos-classic.onosSshPort=30115 " +
- " --set onos-classic.onosApiPort=30120 " +
- " --set onos-classic.onosOfPort=31653 " +
- " --set onos-classic.individualOpenFlowNodePorts=true " + testSpecificHelmFlags
-
- if (gerritProject != "") {
- localHelmFlags = "${localHelmFlags} " + getVolthaImageFlags("${gerritProject}")
- }
-
- volthaDeploy([
- infraNamespace: infraNamespace,
- volthaNamespace: volthaNamespace,
- workflow: workflow.toLowerCase(),
- withMacLearning: enableMacLearning.toBoolean(),
- extraHelmFlags: localHelmFlags,
- localCharts: localCharts,
- bbsimReplica: olts.toInteger(),
- dockerRegistry: registry,
- ])
- }
-
- // stop logging
- sh """
- P_IDS="\$(ps e -ww -A | grep "_TAG=kail-startup" | grep -v grep | awk '{print \$1}')"
- if [ -n "\$P_IDS" ]; then
- echo \$P_IDS
- for P_ID in \$P_IDS; do
- kill -9 \$P_ID
- done
- fi
- cd ${logsDir}
- gzip -k onos-voltha-startup-combined.log
- rm onos-voltha-startup-combined.log
- """
- }
- sh """
- JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
- JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-infra-etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd 2379:2379; done"&
- JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-infra-kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
- bbsimDmiPortFwd=50075
- for i in {0..${olts.toInteger() - 1}}; do
- JENKINS_NODE_COOKIE="dontKillMe" _TAG="bbsim\${i}" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/bbsim\${i} \${bbsimDmiPortFwd}:50075; done"&
- ((bbsimDmiPortFwd++))
- done
- if [ ${withMonitoring} = true ] ; then
- JENKINS_NODE_COOKIE="dontKillMe" _TAG="nem-monitoring-prometheus-server" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n default svc/nem-monitoring-prometheus-server 31301:80; done"&
- fi
- ps aux | grep port-forward
- """
- // setting ONOS log level
- script {
- setOnosLogLevels([
- onosNamespace: infraNamespace,
- apps: [
- 'org.opencord.dhcpl2relay',
- 'org.opencord.olt',
- 'org.opencord.aaa',
- 'org.opencord.maclearner',
- 'org.onosproject.net.flowobjective.impl.FlowObjectiveManager',
- 'org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager'
- ],
- logLevel: logLevel
- ])
- }
- }
- }
- stage('Run test ' + testTarget + ' on ' + workflow + ' workFlow') {
- sh """
- if [ ${withMonitoring} = true ] ; then
- mkdir -p $WORKSPACE/voltha-pods-mem-consumption-${workflow}
- cd $WORKSPACE/voltha-system-tests
- make vst_venv
- source ./vst_venv/bin/activate || true
- # Collect initial memory consumption
- python scripts/mem_consumption.py -o $WORKSPACE/voltha-pods-mem-consumption-${workflow} -a 0.0.0.0:31301 -n ${volthaNamespace} || true
- fi
- """
- sh """
- mkdir -p ${logsDir}
- export ROBOT_MISC_ARGS="-d ${logsDir} ${params.extraRobotArgs} "
- ROBOT_MISC_ARGS+="-v ONOS_SSH_PORT:30115 -v ONOS_REST_PORT:30120 -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace} -v container_log_dir:${logsDir} -v logging:${testLogging}"
- export KVSTOREPREFIX=voltha/voltha_voltha
-
- make -C $WORKSPACE/voltha-system-tests ${testTarget} || true
- """
- getPodsInfo("${logsDir}")
- sh """
- set +e
- # collect logs collected in the Robot Framework StartLogging keyword
- cd ${logsDir}
- gzip *-combined.log || true
- rm *-combined.log || true
- """
- sh """
- if [ ${withMonitoring} = true ] ; then
- cd $WORKSPACE/voltha-system-tests
- source ./vst_venv/bin/activate || true
- # Collect memory consumption of voltha pods once all the tests are complete
- python scripts/mem_consumption.py -o $WORKSPACE/voltha-pods-mem-consumption-${workflow} -a 0.0.0.0:31301 -n ${volthaNamespace} || true
- fi
- """
- }
-}
-
-def collectArtifacts(exitStatus) {
- getPodsInfo("$WORKSPACE/${exitStatus}")
- sh """
- kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/${exitStatus}/voltha.log || true
- """
- archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.txt,**/*.html,**/voltha-pods-mem-consumption-att/*,**/voltha-pods-mem-consumption-dt/*,**/voltha-pods-mem-consumption-tt/*'
- sh '''
- sync
- pkill kail || true
- which voltctl
- md5sum $(which voltctl)
- '''
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: "**/*/log*.html",
- otherFiles: '',
- outputFileName: "**/*/output*.xml",
- outputPath: '.',
- passThreshold: 100,
- reportFileName: "**/*/report*.html",
- unstableThreshold: 0,
- onlyCritical: true]);
-}
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: "${timeout}", unit: 'MINUTES')
- }
- environment {
- KUBECONFIG="$HOME/.kube/kind-${clusterName}"
- VOLTCONFIG="$HOME/.volt/config"
- PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- DIAGS_PROFILE="VOLTHA_PROFILE"
- SSHPASS="karaf"
- }
- stages {
- stage('Download Code') {
- steps {
- getVolthaCode([
- branch: "${branch}",
- gerritProject: "${gerritProject}",
- gerritRefspec: "${gerritRefspec}",
- volthaSystemTestsChange: "${volthaSystemTestsChange}",
- volthaHelmChartsChange: "${volthaHelmChartsChange}",
- ])
- }
- }
- stage('Build patch') {
- // build the patch only if gerritProject is specified
- when {
- expression {
- return !gerritProject.isEmpty()
- }
- }
- steps {
- // NOTE that the correct patch has already been checked out
- // during the getVolthaCode step
- buildVolthaComponent("${gerritProject}")
- }
- }
- stage('Create K8s Cluster') {
- steps {
- script {
- def clusterExists = sh returnStdout: true, script: """
- kind get clusters | grep ${clusterName} | wc -l
- """
- if (clusterExists.trim() == "0") {
- createKubernetesCluster([nodes: 3, name: clusterName])
- }
- }
- }
- }
- stage('Replace voltctl') {
- // if the project is voltctl override the downloaded one with the built one
- when {
- expression {
- return gerritProject == "voltctl"
- }
- }
- steps{
- sh """
- mv `ls $WORKSPACE/voltctl/release/voltctl-*-linux-amd*` $WORKSPACE/bin/voltctl
- chmod +x $WORKSPACE/bin/voltctl
- """
- }
- }
- stage('Load image in kind nodes') {
- when {
- expression {
- return !gerritProject.isEmpty()
- }
- }
- steps {
- loadToKind()
- }
- }
- stage('Parse and execute tests') {
- steps {
- script {
- def tests = readYaml text: testTargets
-
- for(int i = 0;i<tests.size();i++) {
- def test = tests[i]
- def target = test["target"]
- def workflow = test["workflow"]
- def flags = test["flags"]
- def teardown = test["teardown"].toBoolean()
- def logging = test["logging"].toBoolean()
- def testLogging = 'False'
- if (logging) {
- testLogging = 'True'
- }
- println "Executing test ${target} on workflow ${workflow} with logging ${testLogging} and extra flags ${flags}"
- execute_test(target, workflow, testLogging, teardown, flags)
- }
- }
- }
- }
- }
- post {
- aborted {
- collectArtifacts("aborted")
- }
- failure {
- collectArtifacts("failed")
- }
- always {
- collectArtifacts("always")
- }
- }
-}
-
-// EOF
diff --git a/jjb/pipeline/voltha/voltha-2.11/README.md b/jjb/pipeline/voltha/voltha-2.11/README.md
deleted file mode 100644
index fd9d0a0..0000000
--- a/jjb/pipeline/voltha/voltha-2.11/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# Voltha 2.11 release tests.
-
-This directory will remain empty until the release.
-Modify test sources in the master/ directory until then.
-
-Refresh all scripts in this directory at release time.
\ No newline at end of file
diff --git a/jjb/pipeline/voltha/voltha-2.11/bbsim-tests.groovy b/jjb/pipeline/voltha/voltha-2.11/bbsim-tests.groovy
deleted file mode 100755
index b35a8b1..0000000
--- a/jjb/pipeline/voltha/voltha-2.11/bbsim-tests.groovy
+++ /dev/null
@@ -1,332 +0,0 @@
-// -*- groovy -*-
-// Copyright 2021-2022 Open Networking Foundation (ONF) and the ONF Contributors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// voltha-2.x e2e tests for openonu-go
-// uses bbsim to simulate OLT/ONUs
-
-library identifier: 'cord-jenkins-libraries@master',
- retriever: modernSCM([
- $class: 'GitSCMSource',
- remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-def clusterName = "kind-ci"
-
-def execute_test(testTarget, workflow, testLogging, teardown, testSpecificHelmFlags = "") {
- def infraNamespace = "default"
- def volthaNamespace = "voltha"
- def logsDir = "$WORKSPACE/${testTarget}"
-
- stage('IAM')
- {
- script
- {
- String iam = [
- 'ci-management',
- 'jjb',
- 'pipeline',
- 'voltha',
- 'voltha-11',
- 'bbsim-tests.groovy'
- ].join('/')
- println("** ${iam}: ENTER")
- println("** ${iam}: LEAVE")
- }
- }
-
- stage('Cleanup') {
- if (teardown) {
- timeout(15) {
- script {
- helmTeardown(["default", infraNamespace, volthaNamespace])
- }
- timeout(1) {
- sh returnStdout: false, script: '''
- # remove orphaned port-forward from different namespaces
- ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
- '''
- }
- }
- }
- }
- stage('Deploy common infrastructure') {
- sh '''
- helm repo add onf https://charts.opencord.org
- helm repo update
- if [ ${withMonitoring} = true ] ; then
- helm install nem-monitoring onf/nem-monitoring \
- --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
- --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
- fi
- '''
- }
- stage('Deploy Voltha') {
- if (teardown) {
- timeout(10) {
- script {
-
- sh """
- mkdir -p ${logsDir}
- _TAG=kail-startup kail -n ${infraNamespace} -n ${volthaNamespace} > ${logsDir}/onos-voltha-startup-combined.log &
- """
-
- // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
- def localCharts = false
- if (volthaHelmChartsChange != "" || gerritProject == "voltha-helm-charts") {
- localCharts = true
- }
-
- // NOTE temporary workaround expose ONOS node ports
- def localHelmFlags = extraHelmFlags.trim() + " --set global.log_level=${logLevel.toUpperCase()} " +
- " --set onos-classic.onosSshPort=30115 " +
- " --set onos-classic.onosApiPort=30120 " +
- " --set onos-classic.onosOfPort=31653 " +
- " --set onos-classic.individualOpenFlowNodePorts=true " + testSpecificHelmFlags
-
- if (gerritProject != "") {
- localHelmFlags = "${localHelmFlags} " + getVolthaImageFlags("${gerritProject}")
- }
-
- volthaDeploy([
- infraNamespace: infraNamespace,
- volthaNamespace: volthaNamespace,
- workflow: workflow.toLowerCase(),
- withMacLearning: enableMacLearning.toBoolean(),
- extraHelmFlags: localHelmFlags,
- localCharts: localCharts,
- bbsimReplica: olts.toInteger(),
- dockerRegistry: registry,
- ])
- }
-
- // stop logging
- sh """
- P_IDS="\$(ps e -ww -A | grep "_TAG=kail-startup" | grep -v grep | awk '{print \$1}')"
- if [ -n "\$P_IDS" ]; then
- echo \$P_IDS
- for P_ID in \$P_IDS; do
- kill -9 \$P_ID
- done
- fi
- cd ${logsDir}
- gzip -k onos-voltha-startup-combined.log
- rm onos-voltha-startup-combined.log
- """
- }
- sh """
- JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
- JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-infra-etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd 2379:2379; done"&
- JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-infra-kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
- bbsimDmiPortFwd=50075
- for i in {0..${olts.toInteger() - 1}}; do
- JENKINS_NODE_COOKIE="dontKillMe" _TAG="bbsim\${i}" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/bbsim\${i} \${bbsimDmiPortFwd}:50075; done"&
- ((bbsimDmiPortFwd++))
- done
- if [ ${withMonitoring} = true ] ; then
- JENKINS_NODE_COOKIE="dontKillMe" _TAG="nem-monitoring-prometheus-server" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n default svc/nem-monitoring-prometheus-server 31301:80; done"&
- fi
- ps aux | grep port-forward
- """
- // setting ONOS log level
- script {
- setOnosLogLevels([
- onosNamespace: infraNamespace,
- apps: [
- 'org.opencord.dhcpl2relay',
- 'org.opencord.olt',
- 'org.opencord.aaa',
- 'org.opencord.maclearner',
- 'org.onosproject.net.flowobjective.impl.FlowObjectiveManager',
- 'org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager'
- ],
- logLevel: logLevel
- ])
- }
- }
- }
- stage('Run test ' + testTarget + ' on ' + workflow + ' workFlow') {
- sh """
- if [ ${withMonitoring} = true ] ; then
- mkdir -p $WORKSPACE/voltha-pods-mem-consumption-${workflow}
- cd $WORKSPACE/voltha-system-tests
- make vst_venv
- source ./vst_venv/bin/activate || true
- # Collect initial memory consumption
- python scripts/mem_consumption.py -o $WORKSPACE/voltha-pods-mem-consumption-${workflow} -a 0.0.0.0:31301 -n ${volthaNamespace} || true
- fi
- """
- sh """
- mkdir -p ${logsDir}
- export ROBOT_MISC_ARGS="-d ${logsDir} ${params.extraRobotArgs} "
- ROBOT_MISC_ARGS+="-v ONOS_SSH_PORT:30115 -v ONOS_REST_PORT:30120 -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace} -v container_log_dir:${logsDir} -v logging:${testLogging}"
- export KVSTOREPREFIX=voltha/voltha_voltha
-
- make -C $WORKSPACE/voltha-system-tests ${testTarget} || true
- """
- getPodsInfo("${logsDir}")
- sh """
- set +e
- # collect logs collected in the Robot Framework StartLogging keyword
- cd ${logsDir}
- gzip *-combined.log || true
- rm *-combined.log || true
- """
- sh """
- if [ ${withMonitoring} = true ] ; then
- cd $WORKSPACE/voltha-system-tests
- source ./vst_venv/bin/activate || true
- # Collect memory consumption of voltha pods once all the tests are complete
- python scripts/mem_consumption.py -o $WORKSPACE/voltha-pods-mem-consumption-${workflow} -a 0.0.0.0:31301 -n ${volthaNamespace} || true
- fi
- """
- }
-}
-
-def collectArtifacts(exitStatus) {
- getPodsInfo("$WORKSPACE/${exitStatus}")
- sh """
- kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/${exitStatus}/voltha.log || true
- """
- archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.txt,**/*.html,**/voltha-pods-mem-consumption-att/*,**/voltha-pods-mem-consumption-dt/*,**/voltha-pods-mem-consumption-tt/*'
- sh '''
- sync
- pkill kail || true
- which voltctl
- md5sum $(which voltctl)
- '''
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: "**/*/log*.html",
- otherFiles: '',
- outputFileName: "**/*/output*.xml",
- outputPath: '.',
- passThreshold: 100,
- reportFileName: "**/*/report*.html",
- unstableThreshold: 0,
- onlyCritical: true]);
-}
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: "${timeout}", unit: 'MINUTES')
- }
- environment {
- KUBECONFIG="$HOME/.kube/kind-${clusterName}"
- VOLTCONFIG="$HOME/.volt/config"
- PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- DIAGS_PROFILE="VOLTHA_PROFILE"
- SSHPASS="karaf"
- }
- stages {
- stage('Download Code') {
- steps {
- getVolthaCode([
- branch: "${branch}",
- gerritProject: "${gerritProject}",
- gerritRefspec: "${gerritRefspec}",
- volthaSystemTestsChange: "${volthaSystemTestsChange}",
- volthaHelmChartsChange: "${volthaHelmChartsChange}",
- ])
- }
- }
- stage('Build patch') {
- // build the patch only if gerritProject is specified
- when {
- expression {
- return !gerritProject.isEmpty()
- }
- }
- steps {
- // NOTE that the correct patch has already been checked out
- // during the getVolthaCode step
- buildVolthaComponent("${gerritProject}")
- }
- }
- stage('Create K8s Cluster') {
- steps {
- script {
- def clusterExists = sh returnStdout: true, script: """
- kind get clusters | grep ${clusterName} | wc -l
- """
- if (clusterExists.trim() == "0") {
- createKubernetesCluster([nodes: 3, name: clusterName])
- }
- }
- }
- }
- stage('Replace voltctl') {
- // if the project is voltctl override the downloaded one with the built one
- when {
- expression {
- return gerritProject == "voltctl"
- }
- }
- steps{
- sh """
- mv `ls $WORKSPACE/voltctl/release/voltctl-*-linux-amd*` $WORKSPACE/bin/voltctl
- chmod +x $WORKSPACE/bin/voltctl
- """
- }
- }
- stage('Load image in kind nodes') {
- when {
- expression {
- return !gerritProject.isEmpty()
- }
- }
- steps {
- loadToKind()
- }
- }
- stage('Parse and execute tests') {
- steps {
- script {
- def tests = readYaml text: testTargets
-
- for(int i = 0;i<tests.size();i++) {
- def test = tests[i]
- def target = test["target"]
- def workflow = test["workflow"]
- def flags = test["flags"]
- def teardown = test["teardown"].toBoolean()
- def logging = test["logging"].toBoolean()
- def testLogging = 'False'
- if (logging) {
- testLogging = 'True'
- }
- println "Executing test ${target} on workflow ${workflow} with logging ${testLogging} and extra flags ${flags}"
- execute_test(target, workflow, testLogging, teardown, flags)
- }
- }
- }
- }
- }
- post {
- aborted {
- collectArtifacts("aborted")
- }
- failure {
- collectArtifacts("failed")
- }
- always {
- collectArtifacts("always")
- }
- }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.9/bbsim-tests.groovy b/jjb/pipeline/voltha/voltha-2.9/bbsim-tests.groovy
deleted file mode 100755
index 4e1895f..0000000
--- a/jjb/pipeline/voltha/voltha-2.9/bbsim-tests.groovy
+++ /dev/null
@@ -1,297 +0,0 @@
-// Copyright 2021-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// voltha-2.x e2e tests for openonu-go
-// uses bbsim to simulate OLT/ONUs
-
-library identifier: 'cord-jenkins-libraries@master',
- retriever: modernSCM([
- $class: 'GitSCMSource',
- remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-def clusterName = "kind-ci"
-
-def execute_test(testTarget, workflow, testLogging, teardown, testSpecificHelmFlags = "") {
- def infraNamespace = "default"
- def volthaNamespace = "voltha"
- def logsDir = "$WORKSPACE/${testTarget}"
-
- stage('IAM')
- {
- script
- {
- String iam = [
- 'ci-management',
- 'jjb',
- 'pipeline',
- 'voltha',
- 'voltha-2.9',
- 'bbsim-tests.groovy'
- ].join('/')
- println("** ${iam}: ENTER")
- println("** ${iam}: LEAVE")
- }
- }
-
- stage('Cleanup') {
- if (teardown) {
- timeout(15) {
- script {
- helmTeardown(["default", infraNamespace, volthaNamespace])
- }
- timeout(1) {
- sh returnStdout: false, script: '''
- # remove orphaned port-forward from different namespaces
- ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
- '''
- }
- }
- }
- }
- stage('Deploy Voltha') {
- if (teardown) {
- timeout(10) {
- script {
-
- sh """
- mkdir -p ${logsDir}
- _TAG=kail-startup kail -n ${infraNamespace} -n ${volthaNamespace} > ${logsDir}/onos-voltha-startup-combined.log &
- """
-
- // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
- def localCharts = false
- if (volthaHelmChartsChange != "" || gerritProject == "voltha-helm-charts" || branch != "master") {
- localCharts = true
- }
-
- // NOTE temporary workaround expose ONOS node ports
- def localHelmFlags = extraHelmFlags.trim() + " --set global.log_level=${logLevel.toUpperCase()} " +
- " --set onos-classic.onosSshPort=30115 " +
- " --set onos-classic.onosApiPort=30120 " +
- " --set onos-classic.onosOfPort=31653 " +
- " --set onos-classic.individualOpenFlowNodePorts=true " + testSpecificHelmFlags
-
- if (gerritProject != "") {
- localHelmFlags = "${localHelmFlags} " + getVolthaImageFlags("${gerritProject}")
- }
-
- volthaDeploy([
- infraNamespace: infraNamespace,
- volthaNamespace: volthaNamespace,
- workflow: workflow.toLowerCase(),
- extraHelmFlags: localHelmFlags,
- localCharts: localCharts,
- bbsimReplica: olts.toInteger(),
- dockerRegistry: registry,
- ])
- }
-
- // stop logging
- sh """
- P_IDS="\$(ps e -ww -A | grep "_TAG=kail-startup" | grep -v grep | awk '{print \$1}')"
- if [ -n "\$P_IDS" ]; then
- echo \$P_IDS
- for P_ID in \$P_IDS; do
- kill -9 \$P_ID
- done
- fi
- cd ${logsDir}
- gzip -k onos-voltha-startup-combined.log
- rm onos-voltha-startup-combined.log
- """
- }
- sh """
- JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
- JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-infra-etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd 2379:2379; done"&
- JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-infra-kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
- bbsimDmiPortFwd=50075
- for i in {0..${olts.toInteger() - 1}}; do
- JENKINS_NODE_COOKIE="dontKillMe" _TAG="bbsim\${i}" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/bbsim\${i} \${bbsimDmiPortFwd}:50075; done"&
- ((bbsimDmiPortFwd++))
- done
- ps aux | grep port-forward
- """
- // setting ONOS log level
- script {
- setOnosLogLevels([
- onosNamespace: infraNamespace,
- apps: [
- 'org.opencord.dhcpl2relay',
- 'org.opencord.olt',
- 'org.opencord.aaa',
- 'org.onosproject.net.flowobjective.impl.FlowObjectiveManager',
- 'org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager'
- ],
- logLevel: logLevel
- ])
- }
- }
- }
- stage('Run test ' + testTarget + ' on ' + workflow + ' workFlow') {
- sh """
- mkdir -p ${logsDir}
- export ROBOT_MISC_ARGS="-d ${logsDir} ${params.extraRobotArgs} "
- ROBOT_MISC_ARGS+="-v ONOS_SSH_PORT:30115 -v ONOS_REST_PORT:30120 -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace} -v container_log_dir:${logsDir} -v logging:${testLogging}"
- export KVSTOREPREFIX=voltha/voltha_voltha
-
- make -C $WORKSPACE/voltha-system-tests ${testTarget} || true
- """
- getPodsInfo("${logsDir}")
- sh """
- set +e
- # collect logs collected in the Robot Framework StartLogging keyword
- cd ${logsDir}
- gzip *-combined.log || true
- rm *-combined.log || true
- """
- }
-}
-
-def collectArtifacts(exitStatus) {
- getPodsInfo("$WORKSPACE/${exitStatus}")
- sh """
- kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/${exitStatus}/voltha.log || true
- """
- archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.txt,**/*.html'
- sh '''
- sync
- pkill kail || true
- which voltctl
- md5sum $(which voltctl)
- '''
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: "**/*/log*.html",
- otherFiles: '',
- outputFileName: "**/*/output*.xml",
- outputPath: '.',
- passThreshold: 100,
- reportFileName: "**/*/report*.html",
- unstableThreshold: 0,
- onlyCritical: true]);
-}
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: "${timeout}", unit: 'MINUTES')
- }
- environment {
- KUBECONFIG="$HOME/.kube/kind-${clusterName}"
- VOLTCONFIG="$HOME/.volt/config"
- PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- DIAGS_PROFILE="VOLTHA_PROFILE"
- SSHPASS="karaf"
- }
- stages {
- stage('Download Code') {
- steps {
- getVolthaCode([
- branch: "${branch}",
- gerritProject: "${gerritProject}",
- gerritRefspec: "${gerritRefspec}",
- volthaSystemTestsChange: "${volthaSystemTestsChange}",
- volthaHelmChartsChange: "${volthaHelmChartsChange}",
- ])
- }
- }
- stage('Build patch') {
- // build the patch only if gerritProject is specified
- when {
- expression {
- return !gerritProject.isEmpty()
- }
- }
- steps {
- // NOTE that the correct patch has already been checked out
- // during the getVolthaCode step
- buildVolthaComponent("${gerritProject}")
- }
- }
- stage('Create K8s Cluster') {
- steps {
- script {
- def clusterExists = sh returnStdout: true, script: """
- kind get clusters | grep ${clusterName} | wc -l
- """
- if (clusterExists.trim() == "0") {
- createKubernetesCluster([nodes: 3, name: clusterName])
- }
- }
- }
- }
- stage('Replace voltctl') {
- // if the project is voltctl override the downloaded one with the built one
- when {
- expression {
- return gerritProject == "voltctl"
- }
- }
- steps{
- sh """
- mv `ls $WORKSPACE/voltctl/release/voltctl-*-linux-amd*` $WORKSPACE/bin/voltctl
- chmod +x $WORKSPACE/bin/voltctl
- """
- }
- }
- stage('Load image in kind nodes') {
- when {
- expression {
- return !gerritProject.isEmpty()
- }
- }
- steps {
- loadToKind()
- }
- }
- stage('Parse and execute tests') {
- steps {
- script {
- def tests = readYaml text: testTargets
-
- for(int i = 0;i<tests.size();i++) {
- def test = tests[i]
- def target = test["target"]
- def workflow = test["workflow"]
- def flags = test["flags"]
- def teardown = test["teardown"].toBoolean()
- def logging = test["logging"].toBoolean()
- def testLogging = 'False'
- if (logging) {
- testLogging = 'True'
- }
- println "Executing test ${target} on workflow ${workflow} with logging ${testLogging} and extra flags ${flags}"
- execute_test(target, workflow, testLogging, teardown, flags)
- }
- }
- }
- }
- }
- post {
- aborted {
- collectArtifacts("aborted")
- }
- failure {
- collectArtifacts("failed")
- }
- always {
- collectArtifacts("always")
- }
- }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.9/physical-build.groovy b/jjb/pipeline/voltha/voltha-2.9/physical-build.groovy
deleted file mode 100644
index 5d098b4..0000000
--- a/jjb/pipeline/voltha/voltha-2.9/physical-build.groovy
+++ /dev/null
@@ -1,414 +0,0 @@
-// Copyright 2017-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// used to deploy VOLTHA and configure ONOS physical PODs
-
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-library identifier: 'cord-jenkins-libraries@master',
- retriever: modernSCM([
- $class: 'GitSCMSource',
- remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-
-def deploy_custom_oltAdapterChart(namespace, name, chart, extraHelmFlags) {
- sh """
- helm install --create-namespace --set defaults.image_pullPolicy=Always --namespace ${namespace} ${extraHelmFlags} ${name} ${chart}
- """
-}
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 35, unit: 'MINUTES')
- }
- environment {
- PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
- KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
- }
-
- stages{
- stage('Download Code') {
- steps {
- getVolthaCode([
- branch: "${branch}",
- volthaSystemTestsChange: "${volthaSystemTestsChange}",
- volthaHelmChartsChange: "${volthaHelmChartsChange}",
- ])
- }
- }
- stage ("Parse deployment configuration file") {
- steps {
- sh returnStdout: true, script: "rm -rf ${configBaseDir}"
- sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
- script {
- if ( params.workFlow == "DT" ) {
- deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
- }
- else if ( params.workFlow == "TT" )
- {
- deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
- }
- else
- {
- deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
- }
- }
- }
- }
- stage('Clean up') {
- steps {
- timeout(15) {
- script {
- helmTeardown(["default", infraNamespace, volthaNamespace])
- }
- timeout(1) {
- sh returnStdout: false, script: '''
- # remove orphaned port-forward from different namespaces
- ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
- '''
- }
- }
- }
- }
- stage('Install Voltha') {
- steps {
- timeout(20) {
- installVoltctl("${branch}")
- script {
- // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
- def localCharts = false
- if (volthaHelmChartsChange != "" || branch != "master") {
- localCharts = true
- }
-
- // should the config file be suffixed with the workflow? see "deployment_config"
- def localHelmFlags = "-f $WORKSPACE/${configBaseDir}/${configKubernetesDir}/voltha/${configFileName}.yml --set global.log_level=${logLevel} "
-
- if (workFlow.toLowerCase() == "dt") {
- localHelmFlags += " --set radius.enabled=false "
- }
- if (workFlow.toLowerCase() == "tt") {
- localHelmFlags += " --set radius.enabled=false --set global.incremental_evto_update=true "
- if (enableMultiUni.toBoolean()) {
- localHelmFlags += " --set voltha-adapter-openonu.adapter_open_onu.uni_port_mask=${uniPortMask} "
- }
- }
-
- // NOTE temporary workaround expose ONOS node ports (pod-config needs to be updated to contain these values)
- // and to connect the ofagent to all instances of ONOS
- localHelmFlags = localHelmFlags + " --set onos-classic.onosSshPort=30115 " +
- "--set onos-classic.onosApiPort=30120 " +
- "--set onos-classic.onosOfPort=31653 " +
- "--set onos-classic.individualOpenFlowNodePorts=true " +
- "--set voltha.onos_classic.replicas=${params.NumOfOnos}"
-
- if (bbsimReplicas.toInteger() != 0) {
- localHelmFlags = localHelmFlags + " --set onu=${onuNumber},pon=${ponNumber} "
- }
-
- // adding user specified helm flags at the end so they'll have priority over everything else
- localHelmFlags = localHelmFlags + " ${extraHelmFlags}"
-
- def numberOfAdaptersToWait = 2
-
- if(openoltAdapterChart != "onf/voltha-adapter-openolt") {
- localHelmFlags = localHelmFlags + " --set voltha-adapter-openolt.enabled=false"
- // We skip waiting for adapters in the volthaDeploy step because it's already waiting for
- // both of them after the deployment of the custom olt adapter. See line 156.
- numberOfAdaptersToWait = 0
- }
-
- volthaDeploy([
- workflow: workFlow.toLowerCase(),
- extraHelmFlags: localHelmFlags,
- localCharts: localCharts,
- kubeconfig: "$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf",
- onosReplica: params.NumOfOnos,
- atomixReplica: params.NumOfAtomix,
- kafkaReplica: params.NumOfKafka,
- etcdReplica: params.NumOfEtcd,
- bbsimReplica: bbsimReplicas.toInteger(),
- adaptersToWait: numberOfAdaptersToWait,
- ])
-
- if(openoltAdapterChart != "onf/voltha-adapter-openolt"){
- extraHelmFlags = extraHelmFlags + " --set global.log_level=${logLevel}"
- deploy_custom_oltAdapterChart(volthaNamespace, oltAdapterReleaseName, openoltAdapterChart, extraHelmFlags)
- waitForAdapters([
- adaptersToWait: 2
- ])
- }
- }
- sh """
- JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
- JENKINS_NODE_COOKIE="dontKillMe" _TAG="etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd ${params.VolthaEtcdPort}:2379; done"&
- JENKINS_NODE_COOKIE="dontKillMe" _TAG="kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
- ps aux | grep port-forward
- """
- getPodsInfo("$WORKSPACE")
- }
- }
- }
- stage('Push Tech-Profile') {
- steps {
- script {
- if ( params.configurePod && params.profile != "Default" ) {
- for(int i=0; i < deployment_config.olts.size(); i++) {
- def tech_prof_directory = "XGS-PON"
- if (deployment_config.olts[i].containsKey("board_technology")){
- tech_prof_directory = deployment_config.olts[i]["board_technology"]
- }
- timeout(1) {
- sh returnStatus: true, script: """
- export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
- etcd_container=\$(kubectl get pods -n ${infraNamespace} -l app.kubernetes.io/name=etcd --no-headers | awk 'NR==1{print \$1}')
- if [[ "${workFlow}" == "TT" ]]; then
- if [[ "${params.enableMultiUni}" == "true" ]]; then
- kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-HSIA.json \$etcd_container:/tmp/hsia.json
- kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64'
- kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-VoIP.json \$etcd_container:/tmp/voip.json
- kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65'
- kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-MCAST-AdditionalBW-None.json \$etcd_container:/tmp/mcast_additionalBW_none.json
- kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_none.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66'
- kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-MCAST-AdditionalBW-NA.json \$etcd_container:/tmp/mcast_additionalBW_na.json
- kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_na.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/67'
- else
- kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-HSIA.json \$etcd_container:/tmp/hsia.json
- kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64'
- kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-VoIP.json \$etcd_container:/tmp/voip.json
- kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65'
- kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST-AdditionalBW-None.json \$etcd_container:/tmp/mcast_additionalBW_none.json
- kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_none.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66'
- kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST-AdditionalBW-NA.json \$etcd_container:/tmp/mcast_additionalBW_na.json
- kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_na.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/67'
- fi
- else
- kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-${profile}.json \$etcd_container:/tmp/flexpod.json
- kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64'
- fi
- """
- }
- timeout(1) {
- sh returnStatus: true, script: """
- export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
- etcd_container=\$(kubectl get pods -n ${infraNamespace} -l app.kubernetes.io/name=etcd --no-headers | awk 'NR==1{print \$1}')
- kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'ETCDCTL_API=3 etcdctl get --prefix service/voltha/technology_profiles/${tech_prof_directory}/64'
- """
- }
- }
- }
- }
- }
- }
- stage('Push MIB templates') {
- steps {
- sh """
- export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
- etcd_container=\$(kubectl get pods -n ${infraNamespace} -l app.kubernetes.io/name=etcd --no-headers | awk 'NR==1{print \$1}')
- kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/MIB_Alpha.json \$etcd_container:/tmp/MIB_Alpha.json
- kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Alpha.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/go_templates/BRCM/BVM4K00BRA0915-0083/5023_020O02414'
- kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Alpha.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/templates/BRCM/BVM4K00BRA0915-0083/5023_020O02414'
- kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/MIB_Scom.json \$etcd_container:/tmp/MIB_Scom.json
- kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Scom.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/go_templates/SCOM/Glasfaser-Modem/090140.1.0.304'
- kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Scom.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/templates/SCOM/Glasfaser-Modem/090140.1.0.304'
- """
- }
- }
- stage('Push Sadis-config') {
- steps {
- timeout(1) {
- sh returnStatus: true, script: """
- if [[ "${workFlow}" == "DT" ]]; then
- curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-DT.json
- elif [[ "${workFlow}" == "TT" ]]; then
- curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-TT.json
- else
- # this is the ATT case, rename the file in *-sadis-ATT.json so that we can avoid special cases and just load the file
- curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis.json
- fi
- """
- }
- }
- }
- stage('Switch Configurations in ONOS') {
- steps {
- script {
- if ( deployment_config.fabric_switches.size() > 0 ) {
- timeout(1) {
- def netcfg = "$WORKSPACE/${configBaseDir}/${configToscaDir}/voltha/${configFileName}-onos-netcfg-switch.json"
- if (params.inBandManagement){
- netcfg = "$WORKSPACE/${configBaseDir}/${configToscaDir}/voltha/${configFileName}-onos-netcfg-switch-inband.json"
- }
- sh """
- curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @${netcfg}
- curl -sSL --user karaf:karaf -X POST http://${deployment_config.nodes[0].ip}:30120/onos/v1/applications/org.onosproject.segmentrouting/active
- """
- }
- timeout(1) {
- setOnosLogLevels([
- onosNamespace: infraNamespace,
- apps: [
- 'org.opencord.dhcpl2relay',
- 'org.opencord.olt',
- 'org.opencord.aaa',
- 'org.onosproject.net.flowobjective.impl.FlowObjectiveManager',
- 'org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager'
- ]
- ])
- waitUntil {
- sr_active_out = sh returnStatus: true, script: """
- curl -sSL --user karaf:karaf -X GET http://${deployment_config.nodes[0].ip}:30120/onos/v1/applications/org.onosproject.segmentrouting | jq '.state' | grep ACTIVE
- sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled false"
- sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "cfg set org.onosproject.net.flow.impl.FlowRuleManager purgeOnDisconnection false"
- sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "cfg set org.onosproject.net.meter.impl.MeterManager purgeOnDisconnection false"
- """
- return sr_active_out == 0
- }
- }
- timeout(5) {
- for(int i=0; i < deployment_config.hosts.src.size(); i++) {
- for(int j=0; j < deployment_config.olts.size(); j++) {
- def aggPort = -1
- if(deployment_config.olts[j].serial == deployment_config.hosts.src[i].olt){
- aggPort = deployment_config.olts[j].aggPort
- if(aggPort == -1){
- throw new Exception("Upstream port for the olt is not configured, field aggPort is empty")
- }
- sh """
- sleep 10 # NOTE why are we sleeping?
- curl -X POST --user karaf:karaf --header 'Content-Type: application/json' --header 'Accept: application/json' -d '{"deviceId": "${deployment_config.fabric_switches[0].device_id}", "vlanId": "${deployment_config.hosts.src[i].s_tag}", "endpoints": [${deployment_config.fabric_switches[0].bngPort},${aggPort}]}' 'http://${deployment_config.nodes[0].ip}:30120/onos/segmentrouting/xconnect'
- """
- }
- }
- }
- }
- }
- }
- }
- }
- stage('Reinstall OLT software') {
- steps {
- script {
- if ( params.reinstallOlt ) {
- for(int i=0; i < deployment_config.olts.size(); i++) {
- // NOTE what is oltDebVersion23? is that for VOLTHA-2.3? do we still need this differentiation?
- sh returnStdout: true, script: """
- ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
- if [ "${params.inBandManagement}" == "true" ]; then
- sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'kill -9 `pgrep -f "[b]ash /opt/openolt/openolt_dev_mgmt_daemon_process_watchdog"` || true'
- fi
- sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} "dpkg --install ${deployment_config.olts[i].oltDebVersion}"
- sleep 10
- """
- timeout(5) {
- waitUntil {
- olt_sw_present = sh returnStdout: true, script: """
- if [[ "${deployment_config.olts[i].oltDebVersion}" == *"asfvolt16"* ]]; then
- sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep asfvolt16 | wc -l'
- elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"asgvolt64"* ]]; then
- sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep asgvolt64 | wc -l'
- elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-1600x-w"* ]]; then
- sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-1600x-w | wc -l'
- elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-1600g-w"* ]]; then
- sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-1600g-w | wc -l'
- elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"rlt-3200g-w"* ]]; then
- sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep rlt-3200g-w | wc -l'
- elif [[ "${deployment_config.olts[i].oltDebVersion}" == *"sda3016ss"* ]]; then
- sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep sda3016ss | wc -l'
- else
- echo Unknown Debian package for openolt
- fi
- if (${deployment_config.olts[i].fortygig}); then
- if [[ "${params.inBandManagement}" == "true" ]]; then
- ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
- sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'mkdir -p /opt/openolt/'
- sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp /root/watchdog-script/* /opt/openolt/'
- sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp /root/bal_cli_appl/example_user_appl /broadcom'
- sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp in-band-startup-script/* /etc/init.d/'
- fi
- fi
- """
- return olt_sw_present.toInteger() > 0
- }
- }
- }
- }
- }
- }
- }
- stage('Restart OLT processes') {
- steps {
- script {
- //rebooting OLTs
- for(int i=0; i < deployment_config.olts.size(); i++) {
- timeout(15) {
- sh returnStdout: true, script: """
- ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
- sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; rm -f /var/log/openolt_process_watchdog.log; reboot > /dev/null &' || true
- """
- }
- }
- sh returnStdout: true, script: """
- sleep ${params.waitTimerForOltUp}
- """
- //Checking dev_management_deamon and openoltprocesses
- for(int i=0; i < deployment_config.olts.size(); i++) {
- if ( params.oltAdapterReleaseName != "open-olt" ) {
- timeout(15) {
- waitUntil {
- devprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep dev_mgmt_daemon | wc -l'"
- return devprocess.toInteger() > 0
- }
- }
- timeout(15) {
- waitUntil {
- openoltprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep openolt | wc -l'"
- return openoltprocess.toInteger() > 0
- }
- }
- }
- }
- }
- }
- }
- }
-
- post {
- aborted {
- getPodsInfo("$WORKSPACE/failed")
- sh """
- kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.log || true
- """
- archiveArtifacts artifacts: '**/*.log,**/*.txt'
- }
- failure {
- getPodsInfo("$WORKSPACE/failed")
- sh """
- kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.logs || true
- """
- archiveArtifacts artifacts: '**/*.log,**/*.txt'
- }
- always {
- archiveArtifacts artifacts: '*.txt'
- }
- }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.9/voltha-physical-functional-tests.groovy b/jjb/pipeline/voltha/voltha-2.9/voltha-physical-functional-tests.groovy
deleted file mode 100644
index 762214b..0000000
--- a/jjb/pipeline/voltha/voltha-2.9/voltha-physical-functional-tests.groovy
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright 2017-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-library identifier: 'cord-jenkins-libraries@master',
- retriever: modernSCM([
- $class: 'GitSCMSource',
- remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-def infraNamespace = "infra"
-def volthaNamespace = "voltha"
-
-node {
- // Need this so that deployment_config has global scope when it's read later
- deployment_config = null
-}
-
-pipeline {
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: "${timeout}", unit: 'MINUTES')
- }
-
- environment {
- KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
- VOLTCONFIG="$HOME/.volt/config-minimal"
- PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- }
- stages {
- stage('Clone voltha-system-tests') {
- steps {
- step([$class: 'WsCleanup'])
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/voltha-system-tests",
- refspec: "${volthaSystemTestsChange}"
- ]],
- branches: [[ name: "${branch}", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- script {
- sh(script:"""
- if [ '${volthaSystemTestsChange}' != '' ] ; then
- cd $WORKSPACE/voltha-system-tests;
- git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
- fi
- """)
- }
- }
- }
- stage('Download All the VOLTHA repos') {
- when {
- expression {
- return "${branch}" == 'master';
- }
- }
- steps {
- checkout(changelog: true,
- poll: false,
- scm: [$class: 'RepoScm',
- manifestRepositoryUrl: "${params.manifestUrl}",
- manifestBranch: "${params.branch}",
- currentBranch: true,
- destinationDir: 'voltha',
- forceSync: true,
- resetFirst: true,
- quiet: true,
- jobs: 4,
- showAllChanges: true]
- )
- }
- }
- stage ('Initialize') {
- steps {
- sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
- script {
- deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
- }
- installVoltctl("${branch}")
- sh returnStdout: false, script: """
- mkdir -p $WORKSPACE/bin
- # download kail
- bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/bin"
-
- if [ "${params.branch}" == "voltha-2.9" ]; then
- # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
- # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
- # We should change this. In the meantime here is a workaround.
- set +e
-
- # Remove noise from voltha-core logs
- voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
- voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
- # Remove noise from openolt logs
- voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
- voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
- voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
- fi
- """
- }
- }
-
- stage('Functional Tests') {
- environment {
- ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
- ROBOT_FILE="Voltha_PODTests.robot"
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/FunctionalTests"
- }
- steps {
- sh """
- mkdir -p $ROBOT_LOGS_DIR
- if ( ${powerSwitch} ); then
- export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanity -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
- else
- export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i sanity -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
- fi
- ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
- make -C $WORKSPACE/voltha-system-tests voltha-test || true
- """
- }
- }
-
- stage('Failure/Recovery Tests') {
- environment {
- ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
- ROBOT_FILE="Voltha_FailureScenarios.robot"
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/FailureScenarios"
- }
- steps {
- sh """
- mkdir -p $ROBOT_LOGS_DIR
- if ( ${powerSwitch} ); then
- export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
- else
- export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
- fi
- ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
- make -C $WORKSPACE/voltha-system-tests voltha-test || true
- """
- }
- }
-
- stage('Dataplane Tests') {
- environment {
- ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
- ROBOT_FILE="Voltha_PODTests.robot"
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/DataplaneTests"
- }
- steps {
- sh """
- mkdir -p $ROBOT_LOGS_DIR
- export ROBOT_MISC_ARGS="--removekeywords wuks -i dataplane -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
- ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
- make -C $WORKSPACE/voltha-system-tests voltha-test || true
- """
- }
- }
- stage('HA Tests') {
- environment {
- ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
- ROBOT_FILE="Voltha_ONOSHATests.robot"
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ONOSHAScenarios"
- }
- steps {
- sh """
- mkdir -p $ROBOT_LOGS_DIR
- export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v workflow:${params.workFlow} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
- ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
- make -C $WORKSPACE/voltha-system-tests voltha-test || true
- """
- }
- }
-
- stage('Error Scenario Tests') {
- environment {
- ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
- ROBOT_FILE="Voltha_ErrorScenarios.robot"
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ErrorScenarios"
- }
- steps {
- sh """
- mkdir -p $ROBOT_LOGS_DIR
- export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
- ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
- make -C $WORKSPACE/voltha-system-tests voltha-test || true
- """
- }
- }
- }
- post {
- always {
- getPodsInfo("$WORKSPACE/pods")
- sh returnStdout: false, script: '''
- set +e
-
- # collect logs collected in the Robot Framework StartLogging keyword
- cd $WORKSPACE
- gzip *-combined.log || true
- rm *-combined.log || true
-
- # store information on the running pods
- kubectl get pods --all-namespaces -o wide > $WORKSPACE/pods.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-images.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-imagesId.txt || true
-
-
- # collect ETCD cluster logs
- mkdir -p $WORKSPACE/etcd
- printf '%s\n' $(kubectl get pods -l app=etcd -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I% bash -c "kubectl logs % > $WORKSPACE/etcd/%.log"
- '''
- script {
- deployment_config.olts.each { olt ->
- if (olt.type == null || olt.type == "" || olt.type == "openolt") {
- sh returnStdout: false, script: """
- sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log # Remove escape sequences
- sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log # Remove escape sequences
- sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/startup.log $WORKSPACE/startup-${olt.sship}.log || true
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/startup-${olt.sship}.log || true # Remove escape sequences
- sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt_process_watchdog.log $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true # Remove escape sequences
- """
- }
- }
- }
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: '**/log*.html',
- otherFiles: '',
- outputFileName: '**/output*.xml',
- outputPath: 'RobotLogs',
- passThreshold: 100,
- reportFileName: '**/report*.html',
- unstableThreshold: 0,
- onlyCritical: true
- ]);
- archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
- }
- }
-}