Merge "Fix Helm Charts project RegExp"
diff --git a/Makefile b/Makefile
index 15a66ee..af7a30a 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# Makefile for testing JJB jobs in a virtualenv
-
.PHONY: test clean
+SHELL = bash -e -o pipefail
VENV_DIR ?= venv-jjb
JJB_VERSION ?= 3.2.0
JOBCONFIG_DIR ?= job-configs
diff --git a/jjb/build_openolt_deb.yaml b/jjb/build_openolt_deb.yaml
index e1b8cf6..9cc5408 100644
--- a/jjb/build_openolt_deb.yaml
+++ b/jjb/build_openolt_deb.yaml
@@ -30,12 +30,6 @@
build-days-to-keep: '{build-days-to-keep}'
artifact-num-to-keep: '{artifact-num-to-keep}'
- parameters:
- - string:
- name: notificationEmail
- default: 'shad@opennetworking.org kailash@opennetworking.org'
- description: ''
-
node: 'qct-pod3-node2'
project-type: pipeline
diff --git a/jjb/cord-test/nightly-build-pipeline.yaml b/jjb/cord-test/nightly-build-pipeline.yaml
index be17049..7ff3007 100644
--- a/jjb/cord-test/nightly-build-pipeline.yaml
+++ b/jjb/cord-test/nightly-build-pipeline.yaml
@@ -25,6 +25,10 @@
openoltAdapterChart: onf/voltha-adapter-openolt
+ oltAdapterReleaseName: open-olt
+
+ waitTimerForOltUp: 360
+
<<: *test-pipe-job-boiler-plate
parameters:
@@ -82,11 +86,6 @@
default: '{profile}'
description: 'Profile in which this job installs'
- - string:
- name: notificationEmail
- default: 'kailash@opennetworking.org, you@opennetworking.org, suchitra@opennetworking.org'
- description: ''
-
- bool:
name: installMonitoringAndLogging
default: false
@@ -145,7 +144,17 @@
- string:
name: openoltAdapterChart
default: '{openoltAdapterChart}'
- description: 'OpenOLT chart name (or location on file system)'
+ description: 'Olt adapter chart name (or location on file system)'
+
+ - string:
+ name: oltAdapterReleaseName
+ default: '{oltAdapterReleaseName}'
+ description: 'Olt adapter release name'
+
+ - string:
+ name: waitTimerForOltUp
+ default: '{waitTimerForOltUp}'
+ description: 'Wait timer for the OLT to come up after reboot'
concurrent: true
@@ -171,6 +180,10 @@
openoltAdapterChart: onf/voltha-adapter-openolt
+ oltAdapterReleaseName: open-olt
+
+ waitTimerForOltUp: 360
+
parameters:
- string:
name: buildNode
@@ -252,14 +265,19 @@
description: "Onus per PonPort"
- string:
- name: notificationEmail
- default: 'kailash@opennetworking.org, you@opennetworking.org, suchitra@opennetworking.org'
- description: ''
-
- - string:
name: openoltAdapterChart
default: '{openoltAdapterChart}'
- description: 'OpenOLT chart name (or location on file system)'
+ description: 'Olt adapter chart name (or location on file system)'
+
+ - string:
+ name: oltAdapterReleaseName
+ default: '{oltAdapterReleaseName}'
+ description: 'Olt adapter release name'
+
+ - string:
+ name: waitTimerForOltUp
+ default: '{waitTimerForOltUp}'
+ description: 'Wait timer for the OLT to come up after reboot'
concurrent: true
@@ -334,11 +352,6 @@
name: branch
default: '{branch}'
- - string:
- name: notificationEmail
- default: 'kailash@opennetworking.org, you@opennetworking.org, suchitra@opennetworking.org'
- description: ''
-
concurrent: true
pipeline-scm:
@@ -415,11 +428,6 @@
name: branch
default: '{branch}'
- - string:
- name: notificationEmail
- default: 'kailash@opennetworking.org, you@opennetworking.org, suchitra@opennetworking.org'
- description: ''
-
- bool:
name: configurePod
default: true
@@ -516,11 +524,6 @@
name: branch
default: '{branch}'
- - string:
- name: notificationEmail
- default: 'kailash@opennetworking.org, you@opennetworking.org, suchitra@opennetworking.org'
- description: ''
-
- bool:
name: configurePod
default: true
@@ -612,11 +615,6 @@
name: branch
default: '{branch}'
- - string:
- name: notificationEmail
- default: 'kailash@opennetworking.org, you@opennetworking.org, suchitra@opennetworking.org'
- description: ''
-
concurrent: true
pipeline-scm:
@@ -689,11 +687,6 @@
name: branch
default: '{branch}'
- - string:
- name: notificationEmail
- default: 'kailash@opennetworking.org, you@opennetworking.org, suchitra@opennetworking.org'
- description: ''
-
concurrent: true
pipeline-scm:
@@ -771,11 +764,6 @@
default: '{profile}'
description: 'Profile in which this job installs'
- - string:
- name: notificationEmail
- default: 'luca@opennetworking.org, teo@opennetworking.org, weiyu@opennetworking.org'
- description: ''
-
- bool:
name: configurePod
default: true
diff --git a/jjb/pipeline/mcord-build-test.groovy b/jjb/pipeline/mcord-build-test.groovy
index a5ebb66..8d2269e 100644
--- a/jjb/pipeline/mcord-build-test.groovy
+++ b/jjb/pipeline/mcord-build-test.groovy
@@ -132,7 +132,6 @@
sh (script: "ssh -oStrictHostKeyChecking=no -i ~/.ssh/cord ubuntu@${node_ip} 'export OS_CLOUD=openstack_helm; openstack server list --all-projects 1>&2'", returnStdout: true)
}
- step([$class: 'Mailer', notifyEveryUnstableBuild: true, recipients: "${notificationEmail}", sendToIndividuals: false])
- }
+ }
}
}
diff --git a/jjb/pipeline/omec-container-remote.groovy b/jjb/pipeline/omec-container-remote.groovy
index 0b743eb..51270f2 100644
--- a/jjb/pipeline/omec-container-remote.groovy
+++ b/jjb/pipeline/omec-container-remote.groovy
@@ -40,7 +40,7 @@
always {
// Copy artifacts from the remote job dir (make sure both jobs run on the same node)
sh """
- cp -r ../${params.project}_premerge_${params.pod}/artifacts/* ./
+ cp -r ../${params.project}_premerge_${params.pod}/* ./
"""
archiveArtifacts artifacts: "**/*.*", allowEmptyArchive: true
}
diff --git a/jjb/pipeline/voltha-physical-soak-tests.groovy b/jjb/pipeline/voltha-physical-soak-tests.groovy
deleted file mode 100644
index 080efd1..0000000
--- a/jjb/pipeline/voltha-physical-soak-tests.groovy
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2017-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-node {
- // Need this so that deployment_config has global scope when it's read later
- deployment_config = null
-}
-
-pipeline {
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 280, unit: 'MINUTES')
- }
-
- environment {
- KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
- VOLTCONFIG="$HOME/.volt/config-minimal"
- PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- }
-
- stages {
- stage ('Initialize') {
- steps {
- step([$class: 'WsCleanup'])
- sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/${configBaseDir}"
- sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/kind-voltha"
- script {
- deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
- }
- // This checkout allows us to show changes in Jenkins
- checkout(changelog: true,
- poll: false,
- scm: [$class: 'RepoScm',
- manifestRepositoryUrl: "${params.manifestUrl}",
- manifestBranch: "${params.branch}",
- currentBranch: true,
- destinationDir: 'voltha',
- forceSync: true,
- resetFirst: true,
- quiet: true,
- jobs: 4,
- showAllChanges: true]
- )
- sh returnStdout: false, script: """
- cd voltha
- git clone -b master ${cordRepoUrl}/cord-tester
- mkdir -p $WORKSPACE/bin
- bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/bin"
- cd $WORKSPACE
- if [ "${params.branch}" != "master" ]; then
- cd $WORKSPACE/kind-voltha
- source releases/${params.branch}
- VC_VERSION=1.1.8
- else
- VC_VERSION=\$(curl -sSL https://api.github.com/repos/opencord/voltctl/releases/latest | jq -r .tag_name | sed -e 's/^v//g')
- fi
-
- HOSTOS=\$(uname -s | tr "[:upper:]" "[:lower:"])
- HOSTARCH=\$(uname -m | tr "[:upper:]" "[:lower:"])
- if [ \$HOSTARCH == "x86_64" ]; then
- HOSTARCH="amd64"
- fi
- curl -o $WORKSPACE/bin/voltctl -sSL https://github.com/opencord/voltctl/releases/download/v\${VC_VERSION}/voltctl-\${VC_VERSION}-\${HOSTOS}-\${HOSTARCH}
- chmod 755 $WORKSPACE/bin/voltctl
- voltctl version --clientonly
-
- if [ "${params.branch}" == "master" ]; then
- # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
- # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
- # We should change this. In the meantime here is a workaround.
- set +e
-
- # Remove noise from voltha-core logs
- voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
- voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
- # Remove noise from openolt logs
- voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
- voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
- voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
- fi
- """
- }
- }
-
- stage('Functional Tests') {
- environment {
- ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
- ROBOT_FILE="Voltha_PODTests.robot"
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/FunctionalTests"
- }
- steps {
- sh """
- cd $WORKSPACE/voltha/kind-voltha/scripts
- ./log-collector.sh > /dev/null &
- ./log-combine.sh > /dev/null &
-
- mkdir -p $ROBOT_LOGS_DIR
- if [ "${params.testType}" == "Functional" ]; then
- if ( ${powerSwitch} ); then
- export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanity -i functional -e DeleteOLT -e DisableONU_AuthCheck -e DisableDeleteONUandOLT -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
- else
- export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i sanity -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
- fi
- make -C $WORKSPACE/voltha/voltha-system-tests voltha-test || true
- fi
- """
- }
- }
-
- stage('Failure/Recovery Tests') {
- environment {
- ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
- ROBOT_FILE="Voltha_FailureScenarios.robot"
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/FailureScenarios"
- }
- steps {
- sh """
- mkdir -p $ROBOT_LOGS_DIR
- if [ "${params.testType}" == "Failure" ]; then
- if ( ${powerSwitch} ); then
- export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
- else
- export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functional -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
- fi
- make -C $WORKSPACE/voltha/voltha-system-tests voltha-test || true
- fi
- """
- }
- }
-
- stage('Dataplane Tests') {
- environment {
- ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
- ROBOT_FILE="Voltha_PODTests.robot"
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/DataplaneTests"
- }
- steps {
- sh """
- mkdir -p $ROBOT_LOGS_DIR
- if [ "${params.testType}" == "Dataplane" ]; then
- export ROBOT_MISC_ARGS="--removekeywords wuks -i dataplane -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
- make -C $WORKSPACE/voltha/voltha-system-tests voltha-test || true
- fi
- """
- }
- }
-
- }
- post {
- always {
- sh returnStdout: false, script: '''
- set +e
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
- kubectl get nodes -o wide
- kubectl get pods -n voltha -o wide
-
- sleep 60 # Wait for log-collector and log-combine to complete
-
- # Clean up "announcer" pod used by the tests if present
- kubectl delete pod announcer || true
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep '"level":"error"' $WORKSPACE/voltha/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep 'ERROR' $WORKSPACE/voltha/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
- extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
- extract_errors_python onos >> $WORKSPACE/error-report.log
-
- cd $WORKSPACE/voltha/kind-voltha/scripts/logger/combined/
- tar czf $WORKSPACE/container-logs.tgz *
-
- cd $WORKSPACE
- gzip *-combined.log || true
-
- # collect ETCD cluster logs
- mkdir -p $WORKSPACE/etcd
- printf '%s\n' $(kubectl get pods -l app=etcd -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I% bash -c "kubectl logs % > $WORKSPACE/etcd/%.log"
- '''
- script {
- deployment_config.olts.each { olt ->
- sh returnStdout: false, script: """
- sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log # Remove escape sequences
- sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log # Remove escape sequences
- sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/startup.log $WORKSPACE/startup-${olt.sship}.log || true
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/startup-${olt.sship}.log || true # Remove escape sequences
- sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt_process_watchdog.log $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true # Remove escape sequences
- """
- }
- }
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: '**/log*.html',
- otherFiles: '',
- outputFileName: '**/output*.xml',
- outputPath: 'RobotLogs',
- passThreshold: 100,
- reportFileName: '**/report*.html',
- unstableThreshold: 0
- ]);
- archiveArtifacts artifacts: '*.log,*.gz,*.tgz,etcd/*.log'
- }
- unstable {
- step([$class: 'Mailer', notifyEveryUnstableBuild: true, recipients: "${notificationEmail}", sendToIndividuals: false])
- }
- }
-}
diff --git a/jjb/pipeline/voltha-scale-matrix.groovy b/jjb/pipeline/voltha-scale-matrix.groovy
deleted file mode 100644
index 7cc810b..0000000
--- a/jjb/pipeline/voltha-scale-matrix.groovy
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright 2019-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-pipeline {
-
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 120, unit: 'MINUTES')
- }
- environment {
- JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
- KUBECONFIG="$HOME/.kube/config"
- VOLTCONFIG="$HOME/.volt/config"
- SSHPASS="karaf"
- SCHEDULE_ON_CONTROL_NODES="yes"
- FANCY=0
- NAME="minimal"
-
- WITH_SIM_ADAPTERS="no"
- WITH_RADIUS="no"
- WITH_BBSIM="yes"
- LEGACY_BBSIM_INDEX="no"
- DEPLOY_K8S="no"
- CONFIG_SADIS="external"
- VOLTHA_LOG_LEVEL="WARN"
-
- // install everything in the default namespace
- VOLTHA_NS="default"
- ADAPTER_NS="default"
- INFRA_NS="default"
- BBSIM_NS="default"
-
- // workflow
- WITH_EAPOL="no"
- WITH_DHCP="no"
- WITH_IGMP="no"
-
- // infrastructure size
- NUM_OF_OPENONU="${openonuAdapterReplicas}"
- NUM_OF_ONOS="${onosReplicas}"
- NUM_OF_ATOMIX="${atomixReplicas}"
- NUM_OF_KAFKA="${kafkaReplicas}"
- NUM_OF_ETCD="${etcdReplicas}"
- }
-
- stages {
- stage ('Parse parameters') {
- steps {
- script {
- format = "format is 'olt-pon-onu' separated bya comma, eg: '1-16-16, 1-16-32, 2-16-32'"
- source = params.topologies
-
- if (source == null || source == "") {
- throw new Exception("You need to specify some deployment topologies, " + format)
- }
-
- topologies = []
-
- for(topo in source.split(",")) {
- t = topo.split("-")
- topologies.add(['olt': t[0].trim(), 'pon': t[1].trim(), 'onu': t[2].trim()])
- }
-
- if (topologies.size() == 0) {
- throw new Exception("Not enough topologies defined, " + format)
- }
- println "Deploying topologies:"
- println topologies
- }
- }
- }
- stage ('Cleanup') {
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh returnStdout: false, script: """
- helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com
- helm repo add stable https://kubernetes-charts.storage.googleapis.com
- helm repo add onf https://charts.opencord.org
- helm repo add cord https://charts.opencord.org
- helm repo add onos https://charts.onosproject.org
- helm repo add atomix https://charts.atomix.io
- helm repo add bbsim-sadis https://ciena.github.io/bbsim-sadis-server/charts
- helm repo update
-
- for hchart in \$(helm list -q | grep -E -v 'docker-registry|kafkacat');
- do
- echo "Purging chart: \${hchart}"
- helm delete "\${hchart}"
- done
- bash /home/cord/voltha-scale/wait_for_pods.sh
-
- cd $WORKSPACE
- rm -rf $WORKSPACE/*
- """
- }
- }
- }
- stage('Clone kind-voltha') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[ url: "https://gerrit.opencord.org/kind-voltha", ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- script {
- sh(script:"""
- if [ '${kindVolthaChange}' != '' ] ; then
- cd $WORKSPACE/kind-voltha;
- git fetch https://gerrit.opencord.org/kind-voltha ${volthaSystemTestsChange} && git checkout FETCH_HEAD
- fi
- """)
- }
- }
- }
- stage('Clone voltha-system-tests') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[ url: "https://gerrit.opencord.org/voltha-system-tests", ]],
- branches: [[ name: "${release}", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- script {
- sh(script:"""
- if [ '${volthaSystemTestsChange}' != '' ] ; then
- cd $WORKSPACE/voltha-system-tests;
- git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
- fi
- """)
- }
- }
- }
- stage('Deploy and test') {
- steps {
- repeat_deploy_and_test(topologies)
- }
- }
- stage('Aggregate stats') {
- steps {
- sh returnStdout: false, script: """
- export IN_FOLDER=$WORKSPACE/stats/
- export OUT_FOLDER=$WORKSPACE/plots/
- mkdir -p \$OUT_FOLDER
- cd $WORKSPACE/voltha-system-tests
- make vst_venv
- source ./vst_venv/bin/activate
-
- sleep 60 # we have to wait for prometheus to collect all the information
-
- python tests/scale/stats-aggregation.py -s \$IN_FOLDER -o \$OUT_FOLDER
- """
- }
- }
- }
- post {
- always {
- archiveArtifacts artifacts: '*-install-minimal.log,*-minimal-env.sh,RobotLogs/**/*,stats/**/*,logs/**/*'
- }
- }
-}
-
-def repeat_deploy_and_test(list) {
- for (int i = 0; i < list.size(); i++) {
- stage('Cleanup') {
- sh returnStdout: false, script: """
- for hchart in \$(helm list -q | grep -E -v 'bbsim-sadis-server|onos|radius');
- do
- echo "Purging chart: \${hchart}"
- helm delete "\${hchart}"
- done
- bash /home/cord/voltha-scale/wait_for_pods.sh
- """
- }
- stage('Deploy monitoring infrastructure') {
- sh returnStdout: false, script: '''
- helm install nem-monitoring cord/nem-monitoring \
- -f $HOME/voltha-scale/grafana.yaml \
- --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
- --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
-
- # TODO download this file from https://github.com/opencord/helm-charts/blob/master/scripts/wait_for_pods.sh
- bash /home/cord/voltha-scale/wait_for_pods.sh
- '''
- }
- stage('Deploy topology: ' + list[i]['olt'] + "-" + list[i]['pon'] + "-" + list[i]['onu']) {
- timeout(time: 10, unit: 'MINUTES') {
- script {
- now = new Date();
- currentRunStart = now.getTime() / 1000;
- println("Start: " + currentRunStart)
- }
- sh returnStdout: false, script: """
- cd $WORKSPACE/kind-voltha/
-
- if [ '${release.trim()}' != 'master' ]; then
- source $WORKSPACE/kind-voltha/releases/${release}
- fi
-
- # if it's newer than voltha-2.4 set the correct BBSIM_CFG
- if [ '${release.trim()}' != 'voltha-2.4' ]; then
- export BBSIM_CFG="$WORKSPACE/kind-voltha/configs/bbsim-sadis-dt.yaml"
- fi
-
- export NUM_OF_BBSIM=${list[i]['olt']}
- export EXTRA_HELM_FLAGS+="--set enablePerf=true,pon=${list[i]['pon']},onu=${list[i]['onu']} "
- export EXTRA_HELM_FLAGS+="--set prometheus.kafka.enabled=true,prometheus.operator.enabled=true,prometheus.jmx.enabled=true,prometheus.operator.serviceMonitor.namespace=default"
- ./voltha up
-
- # disable LLDP
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled false
-
- cp minimal-env.sh ../${list[i]['olt']}-${list[i]['pon']}-${list[i]['onu']}-minimal-env.sh
- cp install-minimal.log ../${list[i]['olt']}-${list[i]['pon']}-${list[i]['onu']}-install-minimal.log
- """
- sleep(120) // TODO can we improve and check once the bbsim-sadis-server is actually done loading subscribers??
- }
- }
- stage('Test topology: ' + list[i]['olt'] + "-" + list[i]['pon'] + "-" + list[i]['onu']) {
- timeout(time: 15, unit: 'MINUTES') {
- sh returnStdout: false, script: """
- mkdir -p $WORKSPACE/RobotLogs/${list[i]['olt']}-${list[i]['pon']}-${list[i]['onu']}
- cd $WORKSPACE/voltha-system-tests
- make vst_venv
-
- export ROBOT_PARAMS=" \
- -v olt:${list[i]['olt']} \
- -v pon:${list[i]['pon']} \
- -v onu:${list[i]['onu']} \
- -v workflow:dt \
- -v withEapol:false \
- -v withDhcp:false \
- -v withIgmp:false \
- -e authentication \
- -e dhcp"
-
- cd $WORKSPACE/voltha-system-tests
- source ./vst_venv/bin/activate
- robot -d $WORKSPACE/RobotLogs/${list[i]['olt']}-${list[i]['pon']}-${list[i]['onu']} \
- \$ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
- """
- }
- }
- stage('Collect metrics: ' + list[i]['olt'] + "-" + list[i]['pon'] + "-" + list[i]['onu']) {
- script {
- now = new Date();
- currentRunEnd = now.getTime() / 1000;
- println("End: " + currentRunEnd)
- delta = currentRunEnd - currentRunStart
- println("Delta: " + delta)
- minutesDelta = Math.ceil(delta / 60).toInteger()
- println("Delta in minutes: " + minutesDelta)
- }
- sh returnStdout: false, script: """
- export LOG_FOLDER=$WORKSPACE/stats/${list[i]['olt']}-${list[i]['pon']}-${list[i]['onu']}
- mkdir -p \$LOG_FOLDER
- cd $WORKSPACE/voltha-system-tests
- make vst_venv
- source ./vst_venv/bin/activate
-
- sleep 60 # we have to wait for prometheus to collect all the information
-
- python tests/scale/sizing.py -o \$LOG_FOLDER -s ${minutesDelta}
- """
- }
- }
-}
diff --git a/jjb/pipeline/voltha-scale-multi-stack.groovy b/jjb/pipeline/voltha-scale-multi-stack.groovy
deleted file mode 100644
index 8264387..0000000
--- a/jjb/pipeline/voltha-scale-multi-stack.groovy
+++ /dev/null
@@ -1,724 +0,0 @@
-// Copyright 2019-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// deploy VOLTHA using kind-voltha and performs a scale test
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 120, unit: 'MINUTES')
- }
- environment {
- JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
- KUBECONFIG="$HOME/.kube/config"
- SSHPASS="karaf"
- PATH="$PATH:$WORKSPACE/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- SCHEDULE_ON_CONTROL_NODES="yes"
- FANCY=0
- WAIT_ON_DOWN="yes"
- WITH_SIM_ADAPTERS="no"
- WITH_RADIUS="${withRadius}"
- WITH_BBSIM="yes"
- LEGACY_BBSIM_INDEX="no"
- DEPLOY_K8S="no"
- CONFIG_SADIS="external"
- WITH_KAFKA="yes"
- WITH_ETCD="yes"
- VOLTHA_ETCD_PORT=9999
- INFRA_NS="infra"
-
- // configurable options
- WITH_EAPOL="${withEapol}"
- WITH_DHCP="${withDhcp}"
- WITH_IGMP="${withIgmp}"
- VOLTHA_LOG_LEVEL="${logLevel}"
- NUM_OF_BBSIM="${olts}"
- NUM_OF_OPENONU="${openonuAdapterReplicas}"
- NUM_OF_ONOS="${onosReplicas}"
- NUM_OF_ATOMIX="${atomixReplicas}"
- NUM_OF_KAFKA="${kafkaReplicas}"
- NUM_OF_ETCD="${etcdReplicas}"
- WITH_PPROF="${withProfiling}"
- EXTRA_HELM_FLAGS="${extraHelmFlags} " // note that the trailing space is required to separate the parameters from appends done later
- VOLTHA_CHART="${volthaChart}"
- VOLTHA_BBSIM_CHART="${bbsimChart}"
- VOLTHA_ADAPTER_OPEN_OLT_CHART="${openoltAdapterChart}"
- VOLTHA_ADAPTER_OPEN_ONU_CHART="${openonuAdapterChart}"
- ONOS_CLASSIC_CHART="${onosChart}"
- RADIUS_CHART="${radiusChart}"
-
- APPS_TO_LOG="etcd kafka onos-onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server"
- LOG_FOLDER="$WORKSPACE/logs"
-
- GERRIT_PROJECT="${GERRIT_PROJECT}"
- }
-
- stages {
- stage ('Cleanup') {
- steps {
- timeout(time: 11, unit: 'MINUTES') {
- sh returnStdout: false, script: """
- helm repo add stable https://charts.helm.sh/stable
- helm repo add onf https://charts.opencord.org
- helm repo add cord https://charts.opencord.org
- helm repo add onos https://charts.onosproject.org
- helm repo add atomix https://charts.atomix.io
- helm repo add bbsim-sadis https://ciena.github.io/bbsim-sadis-server/charts
- helm repo update
-
- # removing ETCD port forward
- P_ID="\$(ps e -ww -A | grep "_TAG=etcd-port-forward" | grep -v grep | awk '{print \$1}')"
- if [ -n "\$P_ID" ]; then
- kill -9 \$P_ID
- fi
-
- NAMESPACES="voltha1 voltha2 infra default"
- for NS in \$NAMESPACES
- do
- for hchart in \$(helm list -n \$NS -q | grep -E -v 'docker-registry|kafkacat');
- do
- echo "Purging chart: \${hchart}"
- helm delete -n \$NS "\${hchart}"
- done
- done
-
- test -e $WORKSPACE/kind-voltha/voltha && cd $WORKSPACE/kind-voltha && ./voltha down
-
- cd $WORKSPACE
- rm -rf $WORKSPACE/*
-
- # remove orphaned port-forward from different namespaces
- ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
- """
- }
- }
- }
- stage('Clone kind-voltha') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/kind-voltha",
- refspec: "${kindVolthaChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- script {
- sh(script:"""
- if [ '${kindVolthaChange}' != '' ] ; then
- cd $WORKSPACE/kind-voltha;
- git fetch https://gerrit.opencord.org/kind-voltha ${kindVolthaChange} && git checkout FETCH_HEAD
- fi
- """)
- }
- }
- }
- stage('Clone voltha-system-tests') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/voltha-system-tests",
- refspec: "${volthaSystemTestsChange}"
- ]],
- branches: [[ name: "${release}", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- script {
- sh(script:"""
- if [ '${volthaSystemTestsChange}' != '' ] ; then
- cd $WORKSPACE/voltha-system-tests;
- git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
- fi
- """)
- }
- }
- }
- stage('Build patch') {
- when {
- expression {
- return params.GERRIT_PROJECT
- }
- }
- steps {
- sh """
- git clone https://\$GERRIT_HOST/\$GERRIT_PROJECT
- cd \$GERRIT_PROJECT
- git fetch https://\$GERRIT_HOST/\$GERRIT_PROJECT \$GERRIT_REFSPEC && git checkout FETCH_HEAD
-
- DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-build
- DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-push
- """
- }
- }
- stage('Deploy common infrastructure') {
- // includes monitoring, kafka, etcd
- steps {
- sh '''
- if [ ${withMonitoring} = true ] ; then
- helm install -n $INFRA_NS nem-monitoring cord/nem-monitoring \
- -f $HOME/voltha-scale/grafana.yaml \
- --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
- --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
- fi
- '''
- }
- }
- stage('Deploy VOLTHA infrastructure') {
- steps {
- sh returnStdout: false, script: '''
-
- cd $WORKSPACE/kind-voltha/
-
- export ETCD_CHART=$HOME/teone/helm-charts/etcd
- export KAFKA_CHART=$HOME/teone/helm-charts/kafka
-
- # KAFKA config
- export NUM_OF_KAFKA=${kafkaReplicas}
- export EXTRA_HELM_FLAGS+=' --set prometheus.kafka.enabled=true,prometheus.operator.enabled=true,prometheus.jmx.enabled=true,prometheus.operator.serviceMonitor.namespace=default '
-
- # ETCD config
- export EXTRA_HELM_FLAGS+=" --set memoryMode=${inMemoryEtcdStorage} "
-
- NAME=infra JUST_INFRA=y ./voltha up
-
- # Forward the ETCD port onto $VOLTHA_ETCD_PORT
- _TAG=etcd-port-forward kubectl -n \$INFRA_NS port-forward --address 0.0.0.0 -n default service/etcd $VOLTHA_ETCD_PORT:2379&
- '''
- }
- }
- stage('Deploy Voltha') {
- steps {
- deploy_voltha_stacks(params.volthaStacks)
- }
- }
- stage('Start logging') {
- steps {
- sh returnStdout: false, script: '''
- # start logging with kail
-
- mkdir -p $LOG_FOLDER
-
- list=($APPS_TO_LOG)
- for app in "${list[@]}"
- do
- echo "Starting logs for: ${app}"
- _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
- done
- '''
- }
- }
- stage('Configuration') {
- steps {
- script {
- sh returnStdout: false, script: """
-
- # TODO this needs to be repeated per stack
- # kubectl exec \$(kubectl get pods | grep -E "bbsim[0-9]" | awk 'NR==1{print \$1}') -- bbsimctl log ${logLevel.toLowerCase()} false
-
- #Setting link discovery
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 1000
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
-
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.onosproject
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.opencord
-
- # Set Flows/Ports/Meters poll frequency
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
-
- if [ ${withFlows} = false ]; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.olt
- fi
-
- if [ ${withPcap} = true ] && [ ${volthaStacks} -eq 1 ] ; then
- # Start the tcp-dump in ofagent
- export OF_AGENT=\$(kubectl -n \$INFRA_NS get pods -l app=ofagent -o name)
- kubectl exec \$OF_AGENT -- apk update
- kubectl exec \$OF_AGENT -- apk add tcpdump
- kubectl exec \$OF_AGENT -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
- _TAG=ofagent-tcpdump kubectl -n \$INFRA_NS exec \$OF_AGENT -- tcpdump -nei eth0 -w out.pcap&
-
- # Start the tcp-dump in radius
- export RADIUS=\$(kubectl -n \$INFRA_NS get pods -l app=radius -o name)
- kubectl exec \$RADIUS -- apt-get update
- kubectl exec \$RADIUS -- apt-get install -y tcpdump
- _TAG=radius-tcpdump kubectl -n \$INFRA_NS exec \$RADIUS -- tcpdump -w out.pcap&
-
- # Start the tcp-dump in ONOS
- for i in \$(seq 0 \$ONOSES); do
- INSTANCE="onos-onos-classic-\$i"
- kubectl exec \$INSTANCE -- apt-get update
- kubectl exec \$INSTANCE -- apt-get install -y tcpdump
- kubectl exec \$INSTANCE -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
- _TAG=\$INSTANCE kubectl -n \$INFRA_NS exec \$INSTANCE -- /usr/bin/tcpdump -nei eth0 port 1812 -w out.pcap&
- done
- else
- echo "PCAP not supported for multiple VOLTHA stacks"
- fi
- """
- }
- }
- }
- stage('Setup Test') {
- steps {
- sh '''
- mkdir -p $WORKSPACE/RobotLogs
- cd $WORKSPACE/voltha-system-tests
- make vst_venv
- '''
- sh '''
- if [ ${withProfiling} = true ] && [ ${volthaStacks} -eq 1 ]; then
- mkdir -p $LOG_FOLDER/pprof
- cat << EOF > $WORKSPACE/pprof.sh
-timestamp() {
- date +"%T"
-}
-
-i=0
-while [[ true ]]; do
- ((i++))
- ts=$(timestamp)
- go tool pprof -png http://127.0.0.1:6060/debug/pprof/heap > $LOG_FOLDER/pprof/rw-core-heap-\\$i-\\$ts.png
- go tool pprof -png http://127.0.0.1:6060/debug/pprof/goroutine > $LOG_FOLDER/pprof/rw-core-goroutine-\\$i-\\$ts.png
- curl -o $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof http://127.0.0.1:6060/debug/pprof/profile?seconds=10
- go tool pprof -png $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.png
-
- go tool pprof -png http://127.0.0.1:6061/debug/pprof/heap > $LOG_FOLDER/pprof/openolt-heap-\\$i-\\$ts.png
- go tool pprof -png http://127.0.0.1:6061/debug/pprof/goroutine > $LOG_FOLDER/pprof/openolt-goroutine-\\$i-\\$ts.png
- curl -o $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof http://127.0.0.1:6061/debug/pprof/profile?seconds=10
- go tool pprof -png $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.png
-
- go tool pprof -png http://127.0.0.1:6062/debug/pprof/heap > $LOG_FOLDER/pprof/ofagent-heap-\\$i-\\$ts.png
- go tool pprof -png http://127.0.0.1:6062/debug/pprof/goroutine > $LOG_FOLDER/pprof/ofagent-goroutine-\\$i-\\$ts.png
- curl -o $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof http://127.0.0.1:6062/debug/pprof/profile?seconds=10
- go tool pprof -png $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.png
-
- sleep 10
-done
-EOF
-
- _TAG="pprof"
- _TAG=$_TAG bash $WORKSPACE/pprof.sh &
- else
- echo "Profiling not supported for multiple VOLTHA stacks"
- fi
- '''
- }
- }
- stage('Run Test') {
- steps {
- test_voltha_stacks(params.volthaStacks)
- }
- }
- }
- post {
- always {
- // collect result, done in the "post" step so it's executed even in the
- // event of a timeout in the tests
- sh '''
-
- # stop the kail processes
- list=($APPS_TO_LOG)
- for app in "${list[@]}"
- do
- echo "Stopping logs for: ${app}"
- _TAG="kail-$app"
- P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
- done
- '''
- // compressing the logs to save space on Jenkins
- sh '''
- cd $LOG_FOLDER
- tar -czf logs.tar.gz *.log
- rm *.log
- '''
- sh '''
-
- if [ ${withPcap} = true ] && [ ${volthaStacks} -eq 1 ]; then
- # stop ofAgent tcpdump
- P_ID="\$(ps e -ww -A | grep "_TAG=ofagent-tcpdump" | grep -v grep | awk '{print \$1}')"
- if [ -n "\$P_ID" ]; then
- kill -9 \$P_ID
- fi
-
- # stop radius tcpdump
- P_ID="\$(ps e -ww -A | grep "_TAG=radius-tcpdump" | grep -v grep | awk '{print \$1}')"
- if [ -n "\$P_ID" ]; then
- kill -9 \$P_ID
- fi
-
- # stop onos tcpdump
- LIMIT=$(($NUM_OF_ONOS - 1))
- for i in $(seq 0 $LIMIT); do
- INSTANCE="onos-onos-classic-$i"
- P_ID="\$(ps e -ww -A | grep "_TAG=$INSTANCE" | grep -v grep | awk '{print \$1}')"
- if [ -n "\$P_ID" ]; then
- kill -9 \$P_ID
- fi
- done
-
- # copy the file
- export OF_AGENT=$(kubectl get pods -l app=ofagent | awk 'NR==2{print $1}') || true
- kubectl cp $OF_AGENT:out.pcap $LOG_FOLDER/ofagent.pcap || true
-
- export RADIUS=$(kubectl get pods -l app=radius | awk 'NR==2{print $1}') || true
- kubectl cp $RADIUS:out.pcap $LOG_FOLDER/radius.pcap || true
-
- LIMIT=$(($NUM_OF_ONOS - 1))
- for i in $(seq 0 $LIMIT); do
- INSTANCE="onos-onos-classic-$i"
- kubectl cp $INSTANCE:out.pcap $LOG_FOLDER/$INSTANCE.pcap || true
- done
- fi
- '''
- sh '''
- if [ ${withProfiling} = true ] && [ ${volthaStacks} -eq 1 ]; then
- _TAG="pprof"
- P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
- fi
- '''
- plot([
- csvFileName: 'scale-test.csv',
- csvSeries: [
- [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- ],
- group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (Stacks: ${params.volthaStacks}, OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus})", yaxis: 'Time (s)', useDescr: true
- ])
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: 'RobotLogs/**/log.html',
- otherFiles: '',
- outputFileName: 'RobotLogs/**/output.xml',
- outputPath: '.',
- passThreshold: 100,
- reportFileName: 'RobotLogs/**/report.html',
- unstableThreshold: 0]);
- // get all the logs from kubernetes PODs
- sh returnStdout: false, script: '''
-
- # store information on running charts
- helm ls --all-namespaces > $LOG_FOLDER/helm-list.txt || true
-
- # store information on the running pods
- kubectl get pods --all-namespaces -o wide > $LOG_FOLDER/pods.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
-
- # copy the ONOS logs directly from the container to avoid the color codes
- printf '%s\n' $(kubectl -n \$INFRA_NS get pods -l app=onos-onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl -n \$INFRA_NS cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
-
- # get radius logs out of the container
- kubectl -n \$INFRA_NS cp $(kubectl -n \$INFRA_NS get pods -l app=radius --no-headers | awk '{print $1}'):/var/log/freeradius/radius.log $LOG_FOLDER//radius.log || true
- '''
- // dump all the BBSim(s) ONU information
- script {
- for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
- stack_ns="voltha"+i
- sh """
- BBSIM_IDS=\$(kubectl -n ${stack_ns} get pods | grep bbsim | grep -v server | awk '{print \$1}')
- IDS=(\$BBSIM_IDS)
-
- for bbsim in "\${IDS[@]}"
- do
- kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl onu list > $LOG_FOLDER/${stack_ns}/\$bbsim-device-list.txt || true
- kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl service list > $LOG_FOLDER/${stack_ns}/\$bbsim-service-list.txt || true
- done
- """
- }
- }
- // get ONOS debug infos
- sh '''
-
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt || true
-
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt || true
-
- if [ ${withFlows} = true ] ; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt || true
- fi
-
- if [ ${provisionSubscribers} = true ]; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt || true
- fi
-
- if [ ${withEapol} = true ] ; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt || true
- fi
-
- if [ ${withDhcp} = true ] ; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt || true
- fi
- '''
- // collect etcd metrics
- sh '''
- mkdir -p $WORKSPACE/etcd-metrics
- curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
- curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
- curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
- curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time.json || true
- '''
- // get VOLTHA debug infos
- script {
- for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
- stack_ns="voltha"+i
- voltcfg="~/.volt/config-voltha"+i
- try {
- sh """
- voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device list -o json > $LOG_FOLDER/${stack_ns}/device-list.json || true
- python -m json.tool $LOG_FOLDER/${stack_ns}/device-list.json > $LOG_FOLDER/${stack_ns}/voltha-devices-list.json || true
- rm $LOG_FOLDER/${stack_ns}/device-list.json || true
- voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device list > $LOG_FOLDER/${stack_ns}/voltha-devices-list.txt || true
-
- DEVICE_LIST=
- printf '%s\n' \$(voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -c $HOME/.volt/config-${stack_ns}-m 8MB device flows # > $LOG_FOLDER/${stack_ns}/voltha-device-flows-#.txt" || true
- printf '%s\n' \$(voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/${stack_ns}/voltha-device-ports-#.txt" || true
-
- printf '%s\n' \$(voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB logicaldevice flows # > $LOG_FOLDER/${stack_ns}/voltha-logicaldevice-flows-#.txt" || true
- printf '%s\n' \$(voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB logicaldevice port list # > $LOG_FOLDER/${stack_ns}/voltha-logicaldevice-ports-#.txt" || true
- """
- } catch(e) {
- sh '''
- echo "Can't get device list from voltclt"
- '''
- }
- }
- }
- // get cpu usage by container
- sh '''
- if [ ${withMonitoring} = true ] ; then
- cd $WORKSPACE/voltha-system-tests
- source ./vst_venv/bin/activate
- sleep 60 # we have to wait for prometheus to collect all the information
- python tests/scale/sizing.py -o $WORKSPACE/plots || true
- fi
- '''
- archiveArtifacts artifacts: 'kind-voltha/install-*.log,execution-time-*.txt,logs/**/*.txt,logs/**/*.tar.gz,RobotLogs/**/*,plots/*,etcd-metrics/*'
- }
- }
-}
-
-def deploy_voltha_stacks(numberOfStacks) {
- for (int i = 1; i <= numberOfStacks.toInteger(); i++) {
- stage("Deploy VOLTHA stack " + i) {
- sh returnStdout: false, script: """
-
- # unset voltha-api port so that the port is forwarded on a new one
- unset VOLTHA_API_PORT
-
- cd $WORKSPACE/kind-voltha/
-
- export NAME=voltha${i}
- export VOLTHA_NS=voltha${i}
- export ADAPTER_NS=voltha${i}
- export BBSIM_NS=voltha${i}
- export BBSIM_BASE_INDEX=${i}
- export WITH_ETCD=etcd.\$INFRA_NS.svc:2379
- export WITH_KAFKA=kafka.\$INFRA_NS.svc:9092
- export WITH_ONOS=onos-onos-classic-hs.\$INFRA_NS.svc:6653
-
- export EXTRA_HELM_FLAGS+=' '
-
- # Load the release defaults
- if [ '${release.trim()}' != 'master' ]; then
- source $WORKSPACE/kind-voltha/releases/${release}
- EXTRA_HELM_FLAGS+=" ${extraHelmFlags} "
- fi
-
- # BBSim custom image handling
- if [ '${bbsimImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'bbsim' ]; then
- IFS=: read -r bbsimRepo bbsimTag <<< '${bbsimImg.trim()}'
- EXTRA_HELM_FLAGS+="--set images.bbsim.repository=\$bbsimRepo,images.bbsim.tag=\$bbsimTag "
- fi
-
- # VOLTHA and ofAgent custom image handling
- # NOTE to override the rw-core image in a released version you must set the ofAgent image too
- # TODO split ofAgent and voltha-go
- if [ '${rwCoreImg.trim()}' != '' ] && [ '${ofAgentImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-go' ]; then
- IFS=: read -r rwCoreRepo rwCoreTag <<< '${rwCoreImg.trim()}'
- IFS=: read -r ofAgentRepo ofAgentTag <<< '${ofAgentImg.trim()}'
- EXTRA_HELM_FLAGS+="--set images.rw_core.repository=\$rwCoreRepo,images.rw_core.tag=\$rwCoreTag,images.ofagent.repository=\$ofAgentRepo,images.ofagent.tag=\$ofAgentTag "
- fi
-
- # OpenOLT custom image handling
- if [ '${openoltAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openolt-adapter' ]; then
- IFS=: read -r openoltAdapterRepo openoltAdapterTag <<< '${openoltAdapterImg.trim()}'
- EXTRA_HELM_FLAGS+="--set images.adapter_open_olt.repository=\$openoltAdapterRepo,images.adapter_open_olt.tag=\$openoltAdapterTag "
- fi
-
- # OpenONU custom image handling
- if [ '${openonuAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter' ]; then
- IFS=: read -r openonuAdapterRepo openonuAdapterTag <<< '${openonuAdapterImg.trim()}'
- EXTRA_HELM_FLAGS+="--set images.adapter_open_onu.repository=\$openonuAdapterRepo,images.adapter_open_onu.tag=\$openonuAdapterTag "
- fi
-
- # OpenONU Go custom image handling
- if [ '${openonuAdapterGoImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter-go' ]; then
- IFS=: read -r openonuAdapterGoRepo openonuAdapterGoTag <<< '${openonuAdapterGoImg.trim()}'
- EXTRA_HELM_FLAGS+="--set images.adapter_open_onu_go.repository=\$openonuAdapterGoRepo,images.adapter_open_onu_go.tag=\$openonuAdapterGoTag "
- fi
-
- # ONOS custom image handling
- if [ '${onosImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-onos' ]; then
- IFS=: read -r onosRepo onosTag <<< '${onosImg.trim()}'
- EXTRA_HELM_FLAGS+="--set images.onos.repository=\$onosRepo,images.onos.tag=\$onosTag "
- fi
-
- # set BBSim parameters
- EXTRA_HELM_FLAGS+='--set enablePerf=true,pon=${pons},onu=${onus} '
-
- # disable the securityContext, this is a development cluster
- EXTRA_HELM_FLAGS+='--set securityContext.enabled=false '
-
- # No persistent-volume-claims in Atomix
- EXTRA_HELM_FLAGS+="--set atomix.persistence.enabled=false "
-
- echo "Installing with the following extra arguments:"
- echo $EXTRA_HELM_FLAGS
-
- # if it's newer than voltha-2.4 set the correct BBSIM_CFG
- if [ '${release.trim()}' != 'voltha-2.4' ]; then
- export BBSIM_CFG="$WORKSPACE/kind-voltha/configs/bbsim-sadis-${workflow}.yaml"
- fi
-
- # Use custom built images
-
- if [ '\$GERRIT_PROJECT' == 'voltha-go' ]; then
- EXTRA_HELM_FLAGS+="--set images.rw_core.repository=${dockerRegistry}/voltha/voltha-rw-core,images.rw_core.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'voltha-openolt-adapter' ]; then
- EXTRA_HELM_FLAGS+="--set images.adapter_open_olt.repository=${dockerRegistry}/voltha/voltha-openolt-adapter,images.adapter_open_olt.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter' ]; then
- EXTRA_HELM_FLAGS+="--set images.adapter_open_onu.repository=${dockerRegistry}/voltha/voltha-openonu-adapter,images.adapter_open_onu.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter-go' ]; then
- EXTRA_HELM_FLAGS+="--set images.adapter_open_onu_go.repository=${dockerRegistry}/voltha/voltha-openonu-adapter-go,images.adapter_open_onu_go.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'ofagent-go' ]; then
- EXTRA_HELM_FLAGS+="--set images.ofagent.repository=${dockerRegistry}/voltha/voltha-ofagent-go,images.ofagent.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'voltha-onos' ]; then
- EXTRA_HELM_FLAGS+="--set images.onos.repository=${dockerRegistry}/voltha/voltha-onos,images.onos.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'bbsim' ]; then
- EXTRA_HELM_FLAGS+="--set images.bbsim.repository=${dockerRegistry}/voltha/bbsim,images.bbsim.tag=voltha-scale "
- fi
-
- ./voltha up
- """
- }
- }
-}
-
-def test_voltha_stacks(numberOfStacks) {
- for (int i = 1; i <= numberOfStacks.toInteger(); i++) {
- stage("Test VOLTHA stack " + i) {
- timeout(time: 15, unit: 'MINUTES') {
- sh """
- export VOLTCONFIG="$HOME/.volt/config-voltha${i}"
- ROBOT_PARAMS="-v stackId:${i} \
- -v olt:${olts} \
- -v pon:${pons} \
- -v onu:${onus} \
- -v workflow:${workflow} \
- -v withEapol:${withEapol} \
- -v withDhcp:${withDhcp} \
- -v withIgmp:${withIgmp} \
- --noncritical non-critical \
- -e teardown "
-
- if [ ${withEapol} = false ] ; then
- ROBOT_PARAMS+="-e authentication "
- fi
-
- if [ ${withDhcp} = false ] ; then
- ROBOT_PARAMS+="-e dhcp "
- fi
-
- if [ ${provisionSubscribers} = false ] ; then
- # if we're not considering subscribers then we don't care about authentication and dhcp
- ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
- fi
-
- if [ ${withFlows} = false ] ; then
- ROBOT_PARAMS+="-i setup -i activation "
- fi
-
- cd $WORKSPACE/voltha-system-tests
- source ./vst_venv/bin/activate
- robot -d $WORKSPACE/RobotLogs/voltha${i} \
- \$ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
-
- # collect results
- python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/voltha${i}/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time-voltha${i}.txt || true
- cat $WORKSPACE/execution-time-voltha${i}.txt
- """
- }
- }
- }
-}
diff --git a/jjb/pipeline/voltha-tt-physical-functional-tests.groovy b/jjb/pipeline/voltha-tt-physical-functional-tests.groovy
deleted file mode 100644
index 93486ab..0000000
--- a/jjb/pipeline/voltha-tt-physical-functional-tests.groovy
+++ /dev/null
@@ -1,292 +0,0 @@
-// Copyright 2017-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-node {
- // Need this so that deployment_config has global scope when it's read later
- deployment_config = null
-}
-
-pipeline {
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 180, unit: 'MINUTES')
- }
-
- environment {
- KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
- VOLTCONFIG="$HOME/.volt/config-minimal"
- PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- }
-
- stages {
- stage('Clone kind-voltha') {
- steps {
- step([$class: 'WsCleanup'])
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/kind-voltha",
- refspec: "${kindVolthaChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
- stage('Clone voltha-system-tests') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/voltha-system-tests",
- refspec: "${volthaSystemTestsChange}"
- ]],
- branches: [[ name: "${branch}", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- script {
- sh(script:"""
- if [ '${volthaSystemTestsChange}' != '' ] ; then
- cd $WORKSPACE/voltha-system-tests;
- git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
- fi
- """)
- }
- }
- }
- stage('Clone cord-tester') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/cord-tester",
- refspec: "${cordTesterChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "cord-tester"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
- // This checkout allows us to show changes in Jenkins
- // we only do this on master as we don't branch all the repos for all the releases
- // (we should compute the difference by tracking the container version, not the code)
- stage('Download All the VOLTHA repos') {
- when {
- expression {
- return "${branch}" == 'master';
- }
- }
- steps {
- checkout(changelog: true,
- poll: false,
- scm: [$class: 'RepoScm',
- manifestRepositoryUrl: "${params.manifestUrl}",
- manifestBranch: "${params.branch}",
- currentBranch: true,
- destinationDir: 'voltha',
- forceSync: true,
- resetFirst: true,
- quiet: true,
- jobs: 4,
- showAllChanges: true]
- )
- }
- }
- stage ('Initialize') {
- steps {
- sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/${configBaseDir}"
- script {
- deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
- }
- sh returnStdout: false, script: """
- mkdir -p $WORKSPACE/bin
- bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/bin"
- cd $WORKSPACE
- if [ "${params.branch}" != "master" ]; then
- cd $WORKSPACE/kind-voltha
- source releases/${params.branch}
- else
- VOLTCTL_VERSION=\$(curl -sSL https://api.github.com/repos/opencord/voltctl/releases/latest | jq -r .tag_name | sed -e 's/^v//g')
- fi
-
- HOSTOS=\$(uname -s | tr "[:upper:]" "[:lower:"])
- HOSTARCH=\$(uname -m | tr "[:upper:]" "[:lower:"])
- if [ \$HOSTARCH == "x86_64" ]; then
- HOSTARCH="amd64"
- fi
- curl -o $WORKSPACE/bin/voltctl -sSL https://github.com/opencord/voltctl/releases/download/v\${VOLTCTL_VERSION}/voltctl-\${VOLTCTL_VERSION}-\${HOSTOS}-\${HOSTARCH}
- chmod 755 $WORKSPACE/bin/voltctl
- voltctl version --clientonly
-
-
- # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
- # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
- # We should change this. In the meantime here is a workaround.
- if [ "${params.branch}" == "master" ]; then
- set +e
-
-
- # Remove noise from voltha-core logs
- voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
- voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
- # Remove noise from openolt logs
- voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
- voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
- voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
- fi
- """
- }
- }
-
- stage('Functional Tests') {
- environment {
- ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
- ROBOT_FILE="Voltha_TT_PODTests.robot"
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/FunctionalTests"
- }
- steps {
- sh """
- cd $WORKSPACE/kind-voltha/scripts
- ./log-collector.sh > /dev/null &
- ./log-combine.sh > /dev/null &
-
- mkdir -p $ROBOT_LOGS_DIR
- if ( ${powerSwitch} ); then
- export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanityTT -i sanityTT-MCAST -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
- else
- export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i sanityTT -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
- fi
- make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
- """
- }
- }
-
- stage('Failure/Recovery Tests') {
- environment {
- ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
- ROBOT_FILE="Voltha_TT_FailureScenarios.robot"
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/FailureScenarios"
- }
- steps {
- sh """
- mkdir -p $ROBOT_LOGS_DIR
- if ( ${powerSwitch} ); then
- export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
- else
- export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
- fi
- make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
- """
- }
- }
-
- }
- post {
- always {
- sh returnStdout: false, script: '''
- set +e
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
- kubectl get nodes -o wide
- kubectl get pods -n voltha -o wide
- kubectl get pods -o wide
-
- sleep 60 # Wait for log-collector and log-combine to complete
-
- # Clean up "announcer" pod used by the tests if present
- kubectl delete pod announcer || true
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep '"level":"error"' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep 'ERROR' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
- extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
- extract_errors_python onos >> $WORKSPACE/error-report.log
-
- gzip error-report.log || true
- rm error-report.log || true
-
- cd $WORKSPACE/kind-voltha/scripts/logger/combined/
- tar czf $WORKSPACE/container-logs.tgz *
- rm * || true
-
- cd $WORKSPACE
- gzip *-combined.log || true
- rm *-combined.log || true
-
- # store information on running charts
- helm ls > $WORKSPACE/helm-list.txt || true
-
- # store information on the running pods
- kubectl get pods --all-namespaces -o wide > $WORKSPACE/pods.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-images.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-imagesId.txt || true
-
- '''
- script {
- deployment_config.olts.each { olt ->
- if (olt.type == null || olt.type == "" || olt.type == "openolt") {
- sh returnStdout: false, script: """
- sshpass -p ${olt.pass} scp ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log # Remove escape sequences
- """
- }
- }
- }
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: '**/log*.html',
- otherFiles: '',
- outputFileName: '**/output*.xml',
- outputPath: 'RobotLogs',
- passThreshold: 100,
- reportFileName: '**/report*.html',
- unstableThreshold: 0
- ]);
- archiveArtifacts artifacts: '*.log,*.gz,*.tgz'
- }
- unstable {
- step([$class: 'Mailer', notifyEveryUnstableBuild: true, recipients: "${notificationEmail}", sendToIndividuals: false])
- }
- }
-}
diff --git a/jjb/pipeline/voltha/master/bbsim-tests.groovy b/jjb/pipeline/voltha/master/bbsim-tests.groovy
old mode 100644
new mode 100755
index a67d32a..0fe3eb4
--- a/jjb/pipeline/voltha/master/bbsim-tests.groovy
+++ b/jjb/pipeline/voltha/master/bbsim-tests.groovy
@@ -1,4 +1,4 @@
-// Copyright 2017-present Open Networking Foundation
+// Copyright 2021-present Open Networking Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,98 +12,155 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// voltha-2.x e2e tests
+// voltha-2.x e2e tests for openonu-go
// uses bbsim to simulate OLT/ONUs
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
library identifier: 'cord-jenkins-libraries@master',
retriever: modernSCM([
$class: 'GitSCMSource',
remote: 'https://gerrit.opencord.org/ci-management.git'
])
-def test_workflow(name) {
- timeout(time: 10, unit: 'MINUTES') {
- stage('Deploy - '+ name + ' workflow') {
- def extraHelmFlags = "${extraHelmFlags} --set global.log_level=DEBUG,onu=1,pon=1 "
+def clusterName = "kind-ci"
- if (gerritProject != "") {
- extraHelmFlags = extraHelmFlags + getVolthaImageFlags("${gerritProject}")
+def execute_test(testTarget, workflow, teardown, testSpecificHelmFlags = "") {
+ def infraNamespace = "default"
+ def volthaNamespace = "voltha"
+ def robotLogsDir = "RobotLogs"
+ stage('Cleanup') {
+ if (teardown) {
+ timeout(15) {
+ script {
+ helmTeardown(["default", infraNamespace, volthaNamespace])
}
-
- def localCharts = false
- if (gerritProject == "voltha-helm-charts") {
- localCharts = true
+ timeout(1) {
+ sh returnStdout: false, script: '''
+ # remove orphaned port-forward from different namespaces
+ ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
+ '''
}
-
- volthaDeploy([
- workflow: name,
- extraHelmFlags:extraHelmFlags,
- localCharts: localCharts,
- dockerRegistry: "mirror.registry.opennetworking.org"
- ])
- // start logging
- sh """
- mkdir -p $WORKSPACE/${name}
- _TAG=kail-${name} kail -n infra -n voltha > $WORKSPACE/${name}/onos-voltha-combined.log &
- """
- // forward ONOS and VOLTHA ports
- sh """
- _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8101:8101&
- _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8181:8181&
- _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha svc/voltha-voltha-api 55555:55555&
- """
+ }
}
}
- stage('Test VOLTHA - '+ name + ' workflow') {
+ stage('Deploy Voltha') {
+ if (teardown) {
+ timeout(10) {
+ script {
+
+ sh """
+ mkdir -p $WORKSPACE/${testTarget}-components
+ _TAG=kail-startup kail -n ${infraNamespace} -n ${volthaNamespace} > $WORKSPACE/${testTarget}-components/onos-voltha-startup-combined.log &
+ """
+
+ // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
+ def localCharts = false
+ if (volthaHelmChartsChange != "" || gerritProject == "voltha-helm-charts") {
+ localCharts = true
+ }
+
+ // NOTE temporary workaround expose ONOS node ports
+ def localHelmFlags = extraHelmFlags.trim() + " --set global.log_level=${logLevel.toUpperCase()} " +
+ " --set onos-classic.onosSshPort=30115 " +
+ " --set onos-classic.onosApiPort=30120 " +
+ " --set onos-classic.onosOfPort=31653 " +
+ " --set onos-classic.individualOpenFlowNodePorts=true " + testSpecificHelmFlags
+
+ if (gerritProject != "") {
+ localHelmFlags = "${localHelmFlags} " + getVolthaImageFlags("${gerritProject}")
+ }
+
+ volthaDeploy([
+ infraNamespace: infraNamespace,
+ volthaNamespace: volthaNamespace,
+ workflow: workflow.toLowerCase(),
+ extraHelmFlags: localHelmFlags,
+ localCharts: localCharts,
+ bbsimReplica: olts.toInteger(),
+ dockerRegistry: registry,
+ ])
+ }
+
+ // stop logging
+ sh """
+ P_IDS="\$(ps e -ww -A | grep "_TAG=kail-startup" | grep -v grep | awk '{print \$1}')"
+ if [ -n "\$P_IDS" ]; then
+ echo \$P_IDS
+ for P_ID in \$P_IDS; do
+ kill -9 \$P_ID
+ done
+ fi
+ cd $WORKSPACE/${testTarget}-components/
+ gzip -k onos-voltha-startup-combined.log
+ rm onos-voltha-startup-combined.log
+ """
+ }
sh """
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/${name.toUpperCase()}Workflow"
- mkdir -p \$ROBOT_LOGS_DIR
- export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -e PowerSwitch"
-
- # By default, all tests tagged 'sanity' are run. This covers basic functionality
- # like running through the ATT workflow for a single subscriber.
- export TARGET=sanity-kind-${name}
-
- # If the Gerrit comment contains a line with "functional tests" then run the full
- # functional test suite. This covers tests tagged either 'sanity' or 'functional'.
- # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
- REGEX="functional tests"
- if [[ "\$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]]; then
- export TARGET=functional-single-kind-${name}
- fi
-
- if [[ "${gerritProject}" == "bbsim" ]]; then
- echo "Running BBSim specific Tests"
- export TARGET=sanity-bbsim-${name}
- fi
-
- export VOLTCONFIG=$HOME/.volt/config
- export KUBECONFIG=$HOME/.kube/config
-
- # Run the specified tests
- make -C $WORKSPACE/voltha-system-tests \$TARGET || true
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-infra-etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd 2379:2379; done"&
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-infra-kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
+ bbsimDmiPortFwd=50075
+ for i in {0..${olts.toInteger() - 1}}; do
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG="bbsim\${i}" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/bbsim\${i} \${bbsimDmiPortFwd}:50075; done"&
+ ((bbsimDmiPortFwd++))
+ done
+ ps aux | grep port-forward
"""
- // stop logging
- sh """
- P_IDS="\$(ps e -ww -A | grep "_TAG=kail-${name}" | grep -v grep | awk '{print \$1}')"
- if [ -n "\$P_IDS" ]; then
- echo \$P_IDS
- for P_ID in \$P_IDS; do
- kill -9 \$P_ID
- done
- fi
- """
- // remove port-forwarding
- sh """
- # remove orphaned port-forward from different namespaces
- ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
- """
- // collect pod details
- getPodsInfo("$WORKSPACE/${name}")
- helmTeardown(['infra', 'voltha'])
+ }
}
+ stage('Run test ' + testTarget + ' on ' + workflow + ' workFlow') {
+ // start logging
+ sh """
+ mkdir -p $WORKSPACE/${testTarget}-components
+ _TAG=kail-${workflow} kail -n ${infraNamespace} -n ${volthaNamespace} > $WORKSPACE/${testTarget}-components/onos-voltha-combined.log &
+ """
+ sh """
+ mkdir -p $WORKSPACE/${robotLogsDir}/${testTarget}-robot
+ export ROBOT_MISC_ARGS="-d $WORKSPACE/${robotLogsDir}/${testTarget}-robot "
+ ROBOT_MISC_ARGS+="-v ONOS_SSH_PORT:30115 -v ONOS_REST_PORT:30120 -v INFRA_NAMESPACE:${infraNamespace}"
+ export KVSTOREPREFIX=voltha/voltha_voltha
+
+ make -C $WORKSPACE/voltha-system-tests ${testTarget} || true
+ """
+ // stop logging
+ sh """
+ P_IDS="\$(ps e -ww -A | grep "_TAG=kail-${workflow}" | grep -v grep | awk '{print \$1}')"
+ if [ -n "\$P_IDS" ]; then
+ echo \$P_IDS
+ for P_ID in \$P_IDS; do
+ kill -9 \$P_ID
+ done
+ fi
+ cd $WORKSPACE/${testTarget}-components/
+ rm onos-voltha-combined.log.gz || true
+ gzip -k onos-voltha-combined.log
+ rm onos-voltha-combined.log
+ """
+ getPodsInfo("$WORKSPACE/${testTarget}-components")
+ }
+}
+
+def collectArtifacts(exitStatus) {
+ getPodsInfo("$WORKSPACE/${exitStatus}")
+ sh """
+ kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/${exitStatus}/voltha.log || true
+ """
+ archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.txt,**/*.html'
+ sh '''
+ sync
+ pkill kail || true
+ which voltctl
+ md5sum $(which voltctl)
+ '''
+ step([$class: 'RobotPublisher',
+ disableArchiveOutput: false,
+ logFileName: "RobotLogs/*/log*.html",
+ otherFiles: '',
+ outputFileName: "RobotLogs/*/output*.xml",
+ outputPath: '.',
+ passThreshold: 100,
+ reportFileName: "RobotLogs/*/report*.html",
+ unstableThreshold: 0,
+ onlyCritical: true]);
}
pipeline {
@@ -113,14 +170,16 @@
label "${params.buildNode}"
}
options {
- timeout(time: 35, unit: 'MINUTES')
+ timeout(time: "${timeout}", unit: 'MINUTES')
}
environment {
- PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
- KUBECONFIG="$HOME/.kube/kind-config-${clusterName}"
+ KUBECONFIG="$HOME/.kube/kind-${clusterName}"
+ VOLTCONFIG="$HOME/.volt/config"
+ PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ROBOT_MISC_ARGS="-e PowerSwitch ${params.extraRobotArgs}"
+ DIAGS_PROFILE="VOLTHA_PROFILE"
}
-
- stages{
+ stages {
stage('Download Code') {
steps {
getVolthaCode([
@@ -133,6 +192,12 @@
}
}
stage('Build patch') {
+ // build the patch only if gerritProject is specified
+ when {
+ expression {
+ return !gerritProject.isEmpty()
+ }
+ }
steps {
// NOTE that the correct patch has already been checked out
// during the getVolthaCode step
@@ -141,12 +206,14 @@
}
stage('Create K8s Cluster') {
steps {
- createKubernetesCluster([nodes: 3])
- }
- }
- stage('Load image in kind nodes') {
- steps {
- loadToKind()
+ script {
+ def clusterExists = sh returnStdout: true, script: """
+ kind get clusters | grep ${clusterName} | wc -l
+ """
+ if (clusterExists.trim() == "0") {
+ createKubernetesCluster([nodes: 3, name: clusterName])
+ }
+ }
}
}
stage('Replace voltctl') {
@@ -163,48 +230,43 @@
"""
}
}
- stage('Run Test') {
- steps {
- timeout(time: 30, unit: 'MINUTES') {
- test_workflow("att")
- test_workflow("dt")
- test_workflow("tt")
+ stage('Load image in kind nodes') {
+ when {
+ expression {
+ return !gerritProject.isEmpty()
}
}
+ steps {
+ loadToKind()
+ }
+ }
+ stage('Parse and execute tests') {
+ steps {
+ script {
+ def tests = readYaml text: testTargets
+
+ for(int i = 0;i<tests.size();i++) {
+ def test = tests[i]
+ def target = test["target"]
+ def workflow = test["workflow"]
+ def flags = test["flags"]
+ def teardown = test["teardown"].toBoolean()
+ println "Executing test ${target} on workflow ${workflow} with extra flags ${flags}"
+ execute_test(target, workflow, teardown, flags)
+ }
+ }
+ }
}
}
-
post {
aborted {
- getPodsInfo("$WORKSPACE/failed")
- sh """
- kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.log || true
- """
- archiveArtifacts artifacts: '**/*.log,**/*.txt'
+ collectArtifacts("aborted")
}
failure {
- getPodsInfo("$WORKSPACE/failed")
- sh """
- kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.logs || true
- """
- archiveArtifacts artifacts: '**/*.log,**/*.txt'
+ collectArtifacts("failed")
}
always {
- sh '''
- gzip $WORKSPACE/att/onos-voltha-combined.log || true
- gzip $WORKSPACE/dt/onos-voltha-combined.log || true
- gzip $WORKSPACE/tt/onos-voltha-combined.log || true
- '''
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: 'RobotLogs/*/log*.html',
- otherFiles: '',
- outputFileName: 'RobotLogs/*/output*.xml',
- outputPath: '.',
- passThreshold: 100,
- reportFileName: 'RobotLogs/*/report*.html',
- unstableThreshold: 0]);
- archiveArtifacts artifacts: '*.log,**/*.log,**/*.gz,*.gz,*.txt,**/*.txt'
+ collectArtifacts("always")
}
}
}
diff --git a/jjb/pipeline/voltha/master/physical-build.groovy b/jjb/pipeline/voltha/master/physical-build.groovy
index c791e91..dd339f2 100644
--- a/jjb/pipeline/voltha/master/physical-build.groovy
+++ b/jjb/pipeline/voltha/master/physical-build.groovy
@@ -25,6 +25,12 @@
def infraNamespace = "infra"
def volthaNamespace = "voltha"
+def deploy_custom_oltAdapterChart(namespace, name, chart, extraHelmFlags) {
+ sh """
+ helm install --create-namespace --set defaults.image_pullPolicy=Always --namespace ${namespace} ${extraHelmFlags} ${name} ${chart}
+ """
+}
+
pipeline {
/* no label, executor is determined by JJB */
@@ -44,6 +50,7 @@
steps {
getVolthaCode([
branch: "${branch}",
+ volthaSystemTestsChange: "${volthaSystemTestsChange}",
volthaHelmChartsChange: "${volthaHelmChartsChange}",
])
}
@@ -51,7 +58,7 @@
stage ("Parse deployment configuration file") {
steps {
sh returnStdout: true, script: "rm -rf ${configBaseDir}"
- sh returnStdout: true, script: "git clone -b master ${cordRepoUrl}/${configBaseDir}"
+ sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
script {
if ( params.workFlow == "DT" ) {
deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
@@ -93,38 +100,52 @@
}
// should the config file be suffixed with the workflow? see "deployment_config"
- def extraHelmFlags = "-f $WORKSPACE/${configBaseDir}/${configKubernetesDir}/voltha/${configFileName}.yml --set global.log_level=${logLevel} "
+ def localHelmFlags = "-f $WORKSPACE/${configBaseDir}/${configKubernetesDir}/voltha/${configFileName}.yml --set global.log_level=${logLevel} "
if (workFlow.toLowerCase() == "dt") {
- extraHelmFlags += " --set radius.enabled=false "
+ localHelmFlags += " --set radius.enabled=false "
}
if (workFlow.toLowerCase() == "tt") {
- extraHelmFlags += " --set radius.enabled=false --set global.incremental_evto_update=true "
+ localHelmFlags += " --set radius.enabled=false --set global.incremental_evto_update=true "
+ if (enableMultiUni.toBoolean()) {
+ localHelmFlags += " --set voltha-adapter-openonu.adapter_open_onu.uni_port_mask=${uniPortMask} "
+ }
}
// NOTE temporary workaround expose ONOS node ports (pod-config needs to be updated to contain these values)
- extraHelmFlags = extraHelmFlags + " --set onos-classic.onosSshPort=30115 " +
+ // and to connect the ofagent to all instances of ONOS
+ localHelmFlags = localHelmFlags + " --set onos-classic.onosSshPort=30115 " +
"--set onos-classic.onosApiPort=30120 " +
"--set onos-classic.onosOfPort=31653 " +
- "--set onos-classic.individualOpenFlowNodePorts=true "
+ "--set onos-classic.individualOpenFlowNodePorts=true " +
+ "--set voltha.onos_classic.replicas=${params.NumOfOnos}"
- def bbsimReplica = 0
- if (installBBSim.toBoolean()) {
- bbsimReplica = 1
- extraHelmFlags = extraHelmFlags + " --set onu=${onuNumber},pon=${ponNumber} "
+ if (bbsimReplicas.toInteger() != 0) {
+ localHelmFlags = localHelmFlags + " --set onu=${onuNumber},pon=${ponNumber} "
+ }
+
+ // adding user specified helm flags at the end so they'll have priority over everything else
+ localHelmFlags = localHelmFlags + " ${extraHelmFlags}"
+
+ if(openoltAdapterChart != "onf/voltha-adapter-openolt") {
+ localHelmFlags = localHelmFlags + " --set voltha-adapter-openolt.enabled=false"
}
volthaDeploy([
workflow: workFlow.toLowerCase(),
- extraHelmFlags: extraHelmFlags,
+ extraHelmFlags: localHelmFlags,
localCharts: localCharts,
kubeconfig: "$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf",
onosReplica: params.NumOfOnos,
atomixReplica: params.NumOfAtomix,
kafkaReplica: params.NumOfKafka,
etcdReplica: params.NumOfEtcd,
- bbsimReplica: bbsimReplica.toInteger(),
+ bbsimReplica: bbsimReplicas.toInteger(),
])
+
+ if(openoltAdapterChart != "onf/voltha-adapter-openolt"){
+ deploy_custom_oltAdapterChart(volthaNamespace, oltAdapterReleaseName, openoltAdapterChart, extraHelmFlags)
+ }
}
sh """
JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
@@ -142,21 +163,33 @@
if ( params.configurePod && params.profile != "Default" ) {
for(int i=0; i < deployment_config.olts.size(); i++) {
def tech_prof_directory = "XGS-PON"
- // If no debian package is specified we default to GPON for the ADTRAN OLT.
- if (!deployment_config.olts[i].containsKey("oltDebVersion") || deployment_config.olts[i].oltDebVersion.contains("asgvolt64")){
- tech_prof_directory = "GPON"
+ if (deployment_config.olts[i].containsKey("board_technology")){
+ tech_prof_directory = deployment_config.olts[i]["board_technology"]
}
timeout(1) {
sh returnStatus: true, script: """
export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
- etcd_container=\$(kubectl get pods -n ${infraNamespace} | grep etcd | awk 'NR==1{print \$1}')
+ etcd_container=\$(kubectl get pods -n ${infraNamespace} -l app.kubernetes.io/name=etcd --no-headers | awk 'NR==1{print \$1}')
if [[ "${workFlow}" == "TT" ]]; then
- kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-HSIA.json \$etcd_container:/tmp/hsia.json
- kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64'
- kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-VoIP.json \$etcd_container:/tmp/voip.json
- kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65'
- kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST.json \$etcd_container:/tmp/mcast.json
- kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66'
+ if [[ "${params.enableMultiUni}" == "true" ]]; then
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-HSIA.json \$etcd_container:/tmp/hsia.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64'
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-VoIP.json \$etcd_container:/tmp/voip.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65'
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-MCAST-AdditionalBW-None.json \$etcd_container:/tmp/mcast_additionalBW_none.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_none.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66'
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-MCAST-AdditionalBW-NA.json \$etcd_container:/tmp/mcast_additionalBW_na.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_na.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/67'
+ else
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-HSIA.json \$etcd_container:/tmp/hsia.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64'
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-VoIP.json \$etcd_container:/tmp/voip.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65'
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST-AdditionalBW-None.json \$etcd_container:/tmp/mcast_additionalBW_none.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_none.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66'
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST-AdditionalBW-NA.json \$etcd_container:/tmp/mcast_additionalBW_na.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_na.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/67'
+ fi
else
kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-${profile}.json \$etcd_container:/tmp/flexpod.json
kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64'
@@ -166,7 +199,7 @@
timeout(1) {
sh returnStatus: true, script: """
export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
- etcd_container=\$(kubectl get pods -n ${infraNamespace} | grep etcd | awk 'NR==1{print \$1}')
+ etcd_container=\$(kubectl get pods -n ${infraNamespace} -l app.kubernetes.io/name=etcd --no-headers | awk 'NR==1{print \$1}')
kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'ETCDCTL_API=3 etcdctl get --prefix service/voltha/technology_profiles/${tech_prof_directory}/64'
"""
}
@@ -179,7 +212,7 @@
steps {
sh """
export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
- etcd_container=\$(kubectl get pods -n ${infraNamespace} | grep etcd | awk 'NR==1{print \$1}')
+ etcd_container=\$(kubectl get pods -n ${infraNamespace} -l app.kubernetes.io/name=etcd --no-headers | awk 'NR==1{print \$1}')
kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/MIB_Alpha.json \$etcd_container:/tmp/MIB_Alpha.json
kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Alpha.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/go_templates/BRCM/BVM4K00BRA0915-0083/5023_020O02414'
kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Alpha.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/templates/BRCM/BVM4K00BRA0915-0083/5023_020O02414'
@@ -225,6 +258,8 @@
sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.dhcpl2relay"
sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.aaa"
sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.olt"
+ #TRACE in the pipeliner is too chatty, moving to DEBUG
+ sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.opencord.olt.driver"
sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.FlowObjectiveManager"
sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager"
curl -sSL --user karaf:karaf -X GET http://${deployment_config.nodes[0].ip}:30120/onos/v1/applications/org.onosproject.segmentrouting | jq '.state' | grep ACTIVE
@@ -235,7 +270,7 @@
return sr_active_out == 0
}
}
- timeout(5) {
+ timeout(7) {
for(int i=0; i < deployment_config.hosts.src.size(); i++) {
for(int j=0; j < deployment_config.olts.size(); j++) {
def aggPort = -1
@@ -263,24 +298,11 @@
for(int i=0; i < deployment_config.olts.size(); i++) {
// NOTE what is oltDebVersion23? is that for VOLTHA-2.3? do we still need this differentiation?
sh returnStdout: true, script: """
- if [[ "${branch}" != "master" ]] && [[ "${params.inBandManagement}" == "true" ]]; then
- ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
+ ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
+ if [ "${params.inBandManagement}" == "true" ]; then
sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'kill -9 `pgrep -f "[b]ash /opt/openolt/openolt_dev_mgmt_daemon_process_watchdog"` || true'
- sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} "dpkg --install ${deployment_config.olts[i].oltDebVersion23}"
fi
- if [[ "${branch}" != "master" ]] && [[ "${params.inBandManagement}" == "false" ]]; then
- ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
- sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} "dpkg --install ${deployment_config.olts[i].oltDebVersion23}"
- fi
- if [[ "${branch}" == "master" ]] && [[ "${params.inBandManagement}" == "true" ]]; then
- ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
- sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'kill -9 `pgrep -f "[b]ash /opt/openolt/openolt_dev_mgmt_daemon_process_watchdog"` || true'
- sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} "dpkg --install ${deployment_config.olts[i].oltDebVersion}"
- fi
- if [[ "${branch}" == "master" ]] && [[ "${params.inBandManagement}" == "false" ]]; then
- ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
- sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} "dpkg --install ${deployment_config.olts[i].oltDebVersion}"
- fi
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} "dpkg --install ${deployment_config.olts[i].oltDebVersion}"
sleep 10
"""
timeout(5) {
@@ -312,28 +334,39 @@
stage('Restart OLT processes') {
steps {
script {
+ //rebooting OLTs
for(int i=0; i < deployment_config.olts.size(); i++) {
- int waitTimerForOltUp = 360
- if ( params.inBandManagement ) {
- waitTimerForOltUp = 540
- }
- timeout(15) {
+ if ( params.oltAdapterReleaseName != "open-olt" ) {
+ timeout(15) {
+ sh returnStdout: true, script: """
+ ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; rm -f /var/log/openolt_process_watchdog.log; reboot > /dev/null &' || true
+ """
+ }
+ } else {
sh returnStdout: true, script: """
ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
- sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; rm -f /var/log/openolt_process_watchdog.log; reboot -f > /dev/null &' || true
- sleep ${waitTimerForOltUp}
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'reboot > /dev/null &' || true
"""
}
- timeout(15) {
- waitUntil {
- devprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep dev_mgmt_daemon | wc -l'"
- return devprocess.toInteger() > 0
+ }
+ sh returnStdout: true, script: """
+ sleep ${params.waitTimerForOltUp}
+ """
+ //Checking dev_management_deamon and openoltprocesses
+ for(int i=0; i < deployment_config.olts.size(); i++) {
+ if ( params.oltAdapterReleaseName != "open-olt" ) {
+ timeout(15) {
+ waitUntil {
+ devprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep dev_mgmt_daemon | wc -l'"
+ return devprocess.toInteger() > 0
+ }
}
- }
- timeout(15) {
- waitUntil {
- openoltprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep openolt | wc -l'"
- return openoltprocess.toInteger() > 0
+ timeout(15) {
+ waitUntil {
+ openoltprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep openolt | wc -l'"
+ return openoltprocess.toInteger() > 0
+ }
}
}
}
diff --git a/jjb/pipeline/voltha/master/software-upgrades.groovy b/jjb/pipeline/voltha/master/software-upgrades.groovy
index 3877af3..de6f8ce 100644
--- a/jjb/pipeline/voltha/master/software-upgrades.groovy
+++ b/jjb/pipeline/voltha/master/software-upgrades.groovy
@@ -22,22 +22,16 @@
])
def test_software_upgrade(name) {
stage('Deploy Voltha - '+ name) {
- def extraHelmFlags = "${extraHelmFlags} --set global.log_level=DEBUG,onu=1,pon=1 --set onos-classic.replicas=3,onos-classic.atomix.replicas=3 "
+ def extraHelmFlags = extraHelmFlags.trim()
+ extraHelmFlags = extraHelmFlags + " --set global.log_level=DEBUG,onu=1,pon=1 --set onos-classic.replicas=3,onos-classic.atomix.replicas=3 "
if ("${name}" == "onos-app-upgrade" || "${name}" == "onu-software-upgrade") {
extraHelmFlags = extraHelmFlags + "--set global.image_tag=master --set onos-classic.image.tag=master "
}
if ("${name}" == "voltha-component-upgrade") {
extraHelmFlags = extraHelmFlags + "--set images.onos_config_loader.tag=master-onos-config-loader --set onos-classic.image.tag=master "
}
- extraHelmFlags = extraHelmFlags + """ --set voltha.services.controller[0].service=voltha-infra-onos-classic-0.voltha-infra-onos-classic-hs.infra.svc \
- --set voltha.services.controller[0].port=6653 \
- --set voltha.services.controller[0].address=voltha-infra-onos-classic-0.voltha-infra-onos-classic-hs.infra.svc:6653 \
- --set voltha.services.controller[1].service=voltha-infra-onos-classic-1.voltha-infra-onos-classic-hs.infra.svc \
- --set voltha.services.controller[1].port=6653 \
- --set voltha.services.controller[1].address=voltha-infra-onos-classic-1.voltha-infra-onos-classic-hs.infra.svc:6653 \
- --set voltha.services.controller[2].service=voltha-infra-onos-classic-2.voltha-infra-onos-classic-hs.infra.svc \
- --set voltha.services.controller[2].port=6653 \
- --set voltha.services.controller[2].address=voltha-infra-onos-classic-2.voltha-infra-onos-classic-hs.infra.svc:6653 """
+ extraHelmFlags = extraHelmFlags + " --set onos-classic.onosSshPort=30115 --set onos-classic.onosApiPort=30120 "
+ extraHelmFlags = extraHelmFlags + " --set voltha.onos_classic.replicas=3"
//ONOS custom image handling
if ( onosImg.trim() != '' ) {
String[] split;
@@ -57,9 +51,9 @@
"""
// forward ONOS and VOLTHA ports
sh """
- _TAG=onos-port-forward bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8101:8101; done &"
- _TAG=onos-port-forward bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8181:8181; done &"
- _TAG=port-forward-voltha-api bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n voltha svc/voltha-voltha-api 55555:55555; done &"
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG=onos-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/voltha-infra-onos-classic-hs 8101:8101; done 2>&1 " &
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG=onos-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/voltha-infra-onos-classic-hs 8181:8181; done 2>&1 " &
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG=port-forward-voltha-api /bin/bash -c "while true; do kubectl -n voltha port-forward --address 0.0.0.0 service/voltha-voltha-api 55555:55555; done 2>&1 " &
"""
sh """
sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 log:set DEBUG org.opencord
@@ -113,11 +107,12 @@
export TARGET=voltha-comp-upgrade-test
fi
if [[ ${name} == 'onu-software-upgrade' ]]; then
- export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v onu_image_name:${onuImageName.trim()} -v onu_image_url:${onuImageUrl.trim()} -v onu_image_version:${onuImageVersion.trim()} -v onu_image_crc:${onuImageCrc.trim()} -v onu_image_local_dir:${onuImageLocalDir.trim()} -e PowerSwitch"
+ export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v image_version:${onuImageVersion.trim()} -v image_url:${onuImageUrl.trim()} -v image_vendor:${onuImageVendor.trim()} -v image_activate_on_success:${onuImageActivateOnSuccess.trim()} -v image_commit_on_success:${onuImageCommitOnSuccess.trim()} -v image_crc:${onuImageCrc.trim()} -e PowerSwitch"
export TARGET=onu-upgrade-test
fi
export VOLTCONFIG=$HOME/.volt/config-minimal
export KUBECONFIG=$HOME/.kube/kind-config-voltha-minimal
+ ROBOT_MISC_ARGS+=" -v ONOS_SSH_PORT:30115 -v ONOS_REST_PORT:30120"
# Run the specified tests
make -C $WORKSPACE/voltha-system-tests \$TARGET || true
"""
@@ -190,22 +185,6 @@
helmTeardown(['infra', 'voltha'])
}
}
- stage('Install latest voltctl') {
- steps {
- sh """
- mkdir -p $WORKSPACE/bin || true
- # install voltctl
- HOSTOS="\$(uname -s | tr "[:upper:]" "[:lower:"])"
- HOSTARCH="\$(uname -m | tr "[:upper:]" "[:lower:"])"
- if [ "\$HOSTARCH" == "x86_64" ]; then
- HOSTARCH="amd64"
- fi
- VC_VERSION="\$(curl --fail -sSL https://api.github.com/repos/opencord/voltctl/releases/latest | jq -r .tag_name | sed -e 's/^v//g')"
- curl -Lo $WORKSPACE/bin/voltctl https://github.com/opencord/voltctl/releases/download/v\$VC_VERSION/voltctl-\$VC_VERSION-\$HOSTOS-\$HOSTARCH
- chmod +x $WORKSPACE/bin/voltctl
- """
- }
- }
stage('Create K8s Cluster') {
steps {
createKubernetesCluster([nodes: 3])
@@ -240,7 +219,8 @@
outputPath: '.',
passThreshold: 100,
reportFileName: 'RobotLogs/*/report*.html',
- unstableThreshold: 0]);
+ unstableThreshold: 0,
+ onlyCritical: true]);
archiveArtifacts artifacts: '*.log,**/*.log,**/*.gz,*.gz,*.txt,**/*.txt'
}
}
diff --git a/jjb/pipeline/voltha/master/tucson-build-and-test.groovy b/jjb/pipeline/voltha/master/tucson-build-and-test.groovy
index 3c3b942..1a96607 100644
--- a/jjb/pipeline/voltha/master/tucson-build-and-test.groovy
+++ b/jjb/pipeline/voltha/master/tucson-build-and-test.groovy
@@ -35,6 +35,9 @@
PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
KUBECONFIG="$HOME/.kube/kind-${clusterName}"
VOLTCONFIG="$HOME/.volt/config"
+ LOG_FOLDER="$WORKSPACE/${workflow}/"
+ APPS_TO_LOG="etcd kafka onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server onos-config-loader"
+
}
stages{
stage('Download Code') {
@@ -51,7 +54,7 @@
stage ("Parse deployment configuration file") {
steps {
sh returnStdout: true, script: "rm -rf ${configBaseDir}"
- sh returnStdout: true, script: "git clone -b master ${cordRepoUrl}/${configBaseDir}"
+ sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
script {
if (params.workflow.toUpperCase() == "TT") {
@@ -138,9 +141,22 @@
}
// start logging
sh """
+ rm -rf $WORKSPACE/${workFlow}/
mkdir -p $WORKSPACE/${workFlow}
_TAG=kail-${workFlow} kail -n infra -n voltha > $WORKSPACE/${workFlow}/onos-voltha-combined.log &
"""
+ sh returnStdout: false, script: '''
+ # start logging with kail
+
+ mkdir -p $LOG_FOLDER
+
+ list=($APPS_TO_LOG)
+ for app in "${list[@]}"
+ do
+ echo "Starting logs for: ${app}"
+ _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
+ done
+ '''
sh """
JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd 2379:2379; done"&
@@ -187,6 +203,8 @@
sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.dhcpl2relay"
sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.aaa"
sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.olt"
+ #TRACE in the pipeliner is too chatty, moving to DEBUG
+ sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.opencord.olt.driver"
sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.FlowObjectiveManager"
sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager"
@@ -314,6 +332,22 @@
fi
gzip $WORKSPACE/${workFlow}/onos-voltha-combined.log || true
"""
+ sh '''
+ # stop the kail processes
+ list=($APPS_TO_LOG)
+ for app in "${list[@]}"
+ do
+ echo "Stopping logs for: ${app}"
+ _TAG="kail-$app"
+ P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
+ if [ -n "$P_IDS" ]; then
+ echo $P_IDS
+ for P_ID in $P_IDS; do
+ kill -9 $P_ID
+ done
+ fi
+ done
+ '''
step([$class: 'RobotPublisher',
disableArchiveOutput: false,
logFileName: 'RobotLogs/log*.html',
@@ -322,8 +356,9 @@
outputPath: '.',
passThreshold: 100,
reportFileName: 'RobotLogs/report*.html',
- unstableThreshold: 0]);
- archiveArtifacts artifacts: '**/*.txt,**/*.gz,*.gz'
+ unstableThreshold: 0,
+ onlyCritical: true]);
+ archiveArtifacts artifacts: '**/*.txt,**/*.gz,*.gz,**/*.log'
}
}
}
diff --git a/jjb/pipeline/voltha-dt-physical-functional-tests.groovy b/jjb/pipeline/voltha/master/voltha-dt-physical-functional-tests.groovy
similarity index 73%
copy from jjb/pipeline/voltha-dt-physical-functional-tests.groovy
copy to jjb/pipeline/voltha/master/voltha-dt-physical-functional-tests.groovy
index 0f9d373..a551fa4 100644
--- a/jjb/pipeline/voltha-dt-physical-functional-tests.groovy
+++ b/jjb/pipeline/voltha/master/voltha-dt-physical-functional-tests.groovy
@@ -12,6 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+
node {
// Need this so that deployment_config has global scope when it's read later
deployment_config = null
@@ -23,7 +29,7 @@
label "${params.buildNode}"
}
options {
- timeout(time: 640, unit: 'MINUTES')
+ timeout(time: "${timeout}", unit: 'MINUTES')
}
environment {
@@ -33,26 +39,9 @@
}
stages {
- stage('Clone kind-voltha') {
- steps {
- step([$class: 'WsCleanup'])
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/kind-voltha",
- refspec: "${kindVolthaChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
stage('Clone voltha-system-tests') {
steps {
+ step([$class: 'WsCleanup'])
checkout([
$class: 'GitSCM',
userRemoteConfigs: [[
@@ -76,23 +65,6 @@
}
}
}
- stage('Clone cord-tester') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/cord-tester",
- refspec: "${cordTesterChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "cord-tester"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
// This checkout allows us to show changes in Jenkins
// we only do this on master as we don't branch all the repos for all the releases
// (we should compute the difference by tracking the container version, not the code)
@@ -120,7 +92,7 @@
}
stage ('Initialize') {
steps {
- sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/${configBaseDir}"
+ sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
script {
deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
}
@@ -128,9 +100,8 @@
mkdir -p $WORKSPACE/bin
bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/bin"
cd $WORKSPACE
- if [ "${params.branch}" != "master" ]; then
- cd $WORKSPACE/kind-voltha
- source releases/${params.branch}
+ if [ "${params.branch}" == "voltha-2.8" ]; then
+ VOLTCTL_VERSION=1.6.11
else
VOLTCTL_VERSION=\$(curl -sSL https://api.github.com/repos/opencord/voltctl/releases/latest | jq -r .tag_name | sed -e 's/^v//g')
fi
@@ -172,10 +143,6 @@
}
steps {
sh """
- cd $WORKSPACE/kind-voltha/scripts
- ./log-collector.sh > /dev/null &
- ./log-combine.sh > /dev/null &
-
mkdir -p $ROBOT_LOGS_DIR
if ( ${powerSwitch} ); then
export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanityDt -i functionalDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
@@ -227,11 +194,11 @@
ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ONOSHAScenarios"
}
steps {
- sh """
- mkdir -p $ROBOT_LOGS_DIR
- export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v workflow:${params.workFlow} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
- make -C $WORKSPACE/voltha-system-tests voltha-test || true
- """
+ sh """
+ mkdir -p $ROBOT_LOGS_DIR
+ export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v workflow:${params.workFlow} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
+ make -C $WORKSPACE/voltha-system-tests voltha-test || true
+ """
}
}
@@ -272,62 +239,14 @@
}
post {
always {
+ getPodsInfo("$WORKSPACE/pods")
sh returnStdout: false, script: '''
set +e
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
- kubectl get nodes -o wide
- kubectl get pods -n voltha -o wide
- kubectl get pods -o wide
- sleep 60 # Wait for log-collector and log-combine to complete
-
- # Clean up "announcer" pod used by the tests if present
- kubectl delete pod announcer || true
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep '"level":"error"' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep 'ERROR' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
- extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
- extract_errors_python onos >> $WORKSPACE/error-report.log
-
- gzip error-report.log || true
- rm error-report.log || true
-
- cd $WORKSPACE/kind-voltha/scripts/logger/combined/
- tar czf $WORKSPACE/container-logs.tgz *
- rm * || true
-
+ # collect logs collected in the Robot Framework StartLogging keyword
cd $WORKSPACE
gzip *-combined.log || true
rm *-combined.log || true
-
- # store information on running charts
- helm ls > $WORKSPACE/helm-list.txt || true
-
- # store information on the running pods
- kubectl get pods --all-namespaces -o wide > $WORKSPACE/pods.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-images.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-imagesId.txt || true
-
- # collect ETCD cluster logs
- mkdir -p $WORKSPACE/etcd
- printf '%s\n' $(kubectl get pods -l app=etcd -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I% bash -c "kubectl logs % > $WORKSPACE/etcd/%.log"
'''
script {
deployment_config.olts.each { olt ->
@@ -353,9 +272,10 @@
outputPath: 'RobotLogs',
passThreshold: 100,
reportFileName: '**/report*.html',
- unstableThreshold: 0
+ unstableThreshold: 0,
+ onlyCritical: true
]);
- archiveArtifacts artifacts: '*.log,*.gz,*.tgz,etcd/*.log,*.txt'
+ archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
}
}
}
diff --git a/jjb/pipeline/voltha-physical-functional-tests.groovy b/jjb/pipeline/voltha/master/voltha-physical-functional-tests.groovy
similarity index 77%
rename from jjb/pipeline/voltha-physical-functional-tests.groovy
rename to jjb/pipeline/voltha/master/voltha-physical-functional-tests.groovy
index a2d9c8d..aca802b 100644
--- a/jjb/pipeline/voltha-physical-functional-tests.groovy
+++ b/jjb/pipeline/voltha/master/voltha-physical-functional-tests.groovy
@@ -12,6 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+
node {
// Need this so that deployment_config has global scope when it's read later
deployment_config = null
@@ -23,7 +29,7 @@
label "${params.buildNode}"
}
options {
- timeout(time: 380, unit: 'MINUTES')
+ timeout(time: "${timeout}", unit: 'MINUTES')
}
environment {
@@ -32,26 +38,9 @@
PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
}
stages {
- stage('Clone kind-voltha') {
- steps {
- step([$class: 'WsCleanup'])
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/kind-voltha",
- refspec: "${kindVolthaChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
stage('Clone voltha-system-tests') {
steps {
+ step([$class: 'WsCleanup'])
checkout([
$class: 'GitSCM',
userRemoteConfigs: [[
@@ -67,23 +56,6 @@
])
}
}
- stage('Clone cord-tester') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/cord-tester",
- refspec: "${cordTesterChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "cord-tester"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
stage('Download All the VOLTHA repos') {
when {
expression {
@@ -108,7 +80,7 @@
}
stage ('Initialize') {
steps {
- sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/${configBaseDir}"
+ sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
script {
deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
}
@@ -116,9 +88,8 @@
mkdir -p $WORKSPACE/bin
bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/bin"
cd $WORKSPACE
- if [ "${params.branch}" != "master" ]; then
- cd $WORKSPACE/kind-voltha
- source releases/${params.branch}
+ if [ "${params.branch}" == "voltha-2.8" ]; then
+ VOLTCTL_VERSION=1.6.11
else
VOLTCTL_VERSION=\$(curl -sSL https://api.github.com/repos/opencord/voltctl/releases/latest | jq -r .tag_name | sed -e 's/^v//g')
fi
@@ -158,10 +129,6 @@
}
steps {
sh """
- cd $WORKSPACE/kind-voltha/scripts
- ./log-collector.sh > /dev/null &
- ./log-combine.sh > /dev/null &
-
mkdir -p $ROBOT_LOGS_DIR
if ( ${powerSwitch} ); then
export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanity -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
@@ -238,54 +205,15 @@
}
post {
always {
+ getPodsInfo("$WORKSPACE/pods")
sh returnStdout: false, script: '''
set +e
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
- kubectl get nodes -o wide
- kubectl get pods -n voltha -o wide
- kubectl get pods -o wide
- sleep 60 # Wait for log-collector and log-combine to complete
-
- # Clean up "announcer" pod used by the tests if present
- kubectl delete pod announcer || true
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep '"level":"error"' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep 'ERROR' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
- extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
- extract_errors_python onos >> $WORKSPACE/error-report.log
-
- gzip error-report.log || true
- rm error-report.log || true
-
- cd $WORKSPACE/kind-voltha/scripts/logger/combined/
- tar czf $WORKSPACE/container-logs.tgz *
- rm * || true
-
+ # collect logs collected in the Robot Framework StartLogging keyword
cd $WORKSPACE
gzip *-combined.log || true
rm *-combined.log || true
- # store information on running charts
- helm ls > $WORKSPACE/helm-list.txt || true
-
# store information on the running pods
kubectl get pods --all-namespaces -o wide > $WORKSPACE/pods.txt || true
kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-images.txt || true
@@ -320,12 +248,10 @@
outputPath: 'RobotLogs',
passThreshold: 100,
reportFileName: '**/report*.html',
- unstableThreshold: 0
+ unstableThreshold: 0,
+ onlyCritical: true
]);
- archiveArtifacts artifacts: '*.log,*.gz,*.tgz,etcd/*.log'
- }
- unstable {
- step([$class: 'Mailer', notifyEveryUnstableBuild: true, recipients: "${notificationEmail}", sendToIndividuals: false])
+ archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
}
}
}
diff --git a/jjb/pipeline/voltha-physical-soak-dt-tests.groovy b/jjb/pipeline/voltha/master/voltha-physical-soak-dt-tests.groovy
similarity index 68%
rename from jjb/pipeline/voltha-physical-soak-dt-tests.groovy
rename to jjb/pipeline/voltha/master/voltha-physical-soak-dt-tests.groovy
index 49b7d29..a9a2005 100644
--- a/jjb/pipeline/voltha-physical-soak-dt-tests.groovy
+++ b/jjb/pipeline/voltha/master/voltha-physical-soak-dt-tests.groovy
@@ -12,18 +12,26 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+
node {
// Need this so that deployment_config has global scope when it's read later
deployment_config = null
}
+def volthaNamespace = "voltha"
+
pipeline {
/* no label, executor is determined by JJB */
agent {
label "${params.buildNode}"
}
options {
- timeout(time: 280, unit: 'MINUTES')
+ timeout(time: "${timeout}", unit: 'MINUTES')
}
environment {
@@ -34,26 +42,9 @@
stages {
- stage('Clone kind-voltha') {
- steps {
- step([$class: 'WsCleanup'])
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/kind-voltha",
- refspec: "${kindVolthaChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
stage('Clone voltha-system-tests') {
steps {
+ step([$class: 'WsCleanup'])
checkout([
$class: 'GitSCM',
userRemoteConfigs: [[
@@ -77,23 +68,6 @@
}
}
}
- stage('Clone cord-tester') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/cord-tester",
- refspec: "${cordTesterChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "cord-tester"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
// This checkout allows us to show changes in Jenkins
// we only do this on master as we don't branch all the repos for all the releases
// (we should compute the difference by tracking the container version, not the code)
@@ -121,7 +95,7 @@
}
stage ('Initialize') {
steps {
- sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/${configBaseDir}"
+ sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
script {
deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
}
@@ -129,10 +103,8 @@
mkdir -p $WORKSPACE/bin
bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/bin"
cd $WORKSPACE
- if [ "${params.branch}" != "master" ]; then
- cd $WORKSPACE/kind-voltha
- source releases/${params.branch}
- VC_VERSION=1.1.8
+ if [ "${params.branch}" == "voltha-2.8" ]; then
+ VC_VERSION=1.6.11
else
VC_VERSION=\$(curl -sSL https://api.github.com/repos/opencord/voltctl/releases/latest | jq -r .tag_name | sed -e 's/^v//g')
fi
@@ -172,16 +144,14 @@
}
steps {
sh """
- cd $WORKSPACE/kind-voltha/scripts
- ./log-collector.sh > /dev/null &
- ./log-combine.sh > /dev/null &
-
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG="prometheus" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n cattle-prometheus svc/access-prometheus 31301:80; done"&
+ ps aux | grep port-forward
mkdir -p $ROBOT_LOGS_DIR
if [ "${params.testType}" == "Functional" ]; then
if ( ${powerSwitch} ); then
- export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i soak -e dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
+ export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i soak -e dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
else
- export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i soak -e dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
+ export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i soak -e dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
fi
make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
fi
@@ -199,7 +169,7 @@
sh """
mkdir -p $ROBOT_LOGS_DIR
if [ "${params.testType}" == "Failure" ]; then
- export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i soak -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
+ export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i soak -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
fi
"""
@@ -216,7 +186,7 @@
sh """
mkdir -p $ROBOT_LOGS_DIR
if [ "${params.testType}" == "Dataplane" ]; then
- export ROBOT_MISC_ARGS="--removekeywords wuks -i BandwidthProfileUDPDt -i TechProfileDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
+ export ROBOT_MISC_ARGS="--removekeywords wuks -i soakDataplane -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
fi
"""
@@ -226,44 +196,14 @@
}
post {
always {
+ getPodsInfo("$WORKSPACE/pods")
sh returnStdout: false, script: '''
set +e
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
- kubectl get nodes -o wide
- kubectl get pods -n voltha -o wide
- sleep 60 # Wait for log-collector and log-combine to complete
-
- # Clean up "announcer" pod used by the tests if present
- kubectl delete pod announcer || true
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep '"level":"error"' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep 'ERROR' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
- extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
- extract_errors_python onos >> $WORKSPACE/error-report.log
-
- cd $WORKSPACE/kind-voltha/scripts/logger/combined/
- tar czf $WORKSPACE/container-logs.tgz *
-
+ # collect logs collected in the Robot Framework StartLogging keyword
cd $WORKSPACE
gzip *-combined.log || true
+ rm *-combined.log || true
# collect ETCD cluster logs
mkdir -p $WORKSPACE/etcd
@@ -291,12 +231,18 @@
outputPath: 'RobotLogs',
passThreshold: 100,
reportFileName: '**/report*.html',
- unstableThreshold: 0
+ unstableThreshold: 0,
+ onlyCritical: true
]);
- archiveArtifacts artifacts: '*.log,*.gz,*.tgz,etcd/*.log'
- }
- unstable {
- step([$class: 'Mailer', notifyEveryUnstableBuild: true, recipients: "${notificationEmail}", sendToIndividuals: false])
+ // get cpu usage by container
+ sh """
+ mkdir -p $WORKSPACE/plots || true
+ cd $WORKSPACE/voltha-system-tests
+ source ./vst_venv/bin/activate || true
+ sleep 60 # we have to wait for prometheus to collect all the information
+ python scripts/sizing.py -o $WORKSPACE/plots -a 0.0.0.0:31301 -n ${volthaNamespace} -s 3600 || true
+ """
+ archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt,plots/*'
}
}
}
diff --git a/jjb/pipeline/voltha/master/voltha-scale-multi-stack.groovy b/jjb/pipeline/voltha/master/voltha-scale-multi-stack.groovy
index fb5e75c..3c02c4e 100644
--- a/jjb/pipeline/voltha/master/voltha-scale-multi-stack.groovy
+++ b/jjb/pipeline/voltha/master/voltha-scale-multi-stack.groovy
@@ -22,14 +22,6 @@
remote: 'https://gerrit.opencord.org/ci-management.git'
])
-def ofAgentConnections(numOfOnos, releaseName, namespace) {
- def params = " "
- numOfOnos.times {
- params += "--set voltha.services.controller[${it}].address=${releaseName}-onos-classic-${it}.${releaseName}-onos-classic-hs.${namespace}.svc:6653 "
- }
- return params
-}
-
pipeline {
/* no label, executor is determined by JJB */
@@ -45,7 +37,6 @@
SSHPASS="karaf"
PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
- APPS_TO_LOG="etcd kafka onos-onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server"
LOG_FOLDER="$WORKSPACE/logs"
}
@@ -53,10 +44,6 @@
stage ('Cleanup') {
steps {
timeout(time: 11, unit: 'MINUTES') {
- sh """
- # remove orphaned port-forward from different namespaces
- ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
- """
script {
def namespaces = ["infra"]
// FIXME we may have leftovers from more VOLTHA stacks (eg: run1 had 10 stacks, run2 had 2 stacks)
@@ -65,14 +52,24 @@
}
helmTeardown(namespaces)
}
- sh returnStdout: false, script: """
+ sh returnStdout: false, script: '''
helm repo add onf https://charts.opencord.org
- helm repo add cord https://charts.opencord.org
helm repo update
- # remove all port-forward from different namespaces
- ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
- """
+ # remove all persistent volume claims
+ kubectl delete pvc --all-namespaces --all
+ PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
+ while [[ \$PVCS != 0 ]]; do
+ sleep 5
+ PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
+ done
+
+ # remove orphaned port-forward from different namespaces
+ ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
+
+ cd $WORKSPACE
+ rm -rf $WORKSPACE/*
+ '''
}
}
}
@@ -81,7 +78,7 @@
getVolthaCode([
branch: "${release}",
volthaSystemTestsChange: "${volthaSystemTestsChange}",
- //volthaHelmChartsChange: "${volthaHelmChartsChange}",
+ volthaHelmChartsChange: "${volthaHelmChartsChange}",
])
}
}
@@ -98,27 +95,55 @@
'''
}
}
+ stage('Start logging') {
+ steps {
+ script {
+ startComponentsLogs([
+ appsToLog: [
+ 'app.kubernetes.io/name=etcd',
+ 'app.kubernetes.io/name=kafka',
+ 'app=onos-classic',
+ 'app=adapter-open-onu',
+ 'app=adapter-open-olt',
+ 'app=rw-core',
+ 'app=ofagent',
+ 'app=bbsim',
+ 'app=radius',
+ 'app=bbsim-sadis-server',
+ 'app=onos-config-loader',
+ ]
+ ])
+ }
+ }
+ }
stage('Deploy VOLTHA infrastructure') {
steps {
- sh returnStdout: false, script: '''
+ timeout(time: 5, unit: 'MINUTES') {
+ script {
+ def localCharts = false
+ if (volthaHelmChartsChange != "" || release != "master") {
+ localCharts = true
+ }
- helm install kafka -n infra $HOME/teone/helm-charts/kafka --set replicaCount=${kafkaReplicas},replicas=${kafkaReplicas} --set persistence.enabled=false \
- --set zookeeper.replicaCount=${kafkaReplicas} --set zookeeper.persistence.enabled=false \
- --set prometheus.kafka.enabled=true,prometheus.operator.enabled=true,prometheus.jmx.enabled=true,prometheus.operator.serviceMonitor.namespace=default
+ def infraHelmFlags =
+ "--set global.log_level=${logLevel} " +
+ "--set radius.enabled=${withEapol} " +
+ "--set onos-classic.onosSshPort=30115 " +
+ "--set onos-classic.onosApiPort=30120 " +
+ params.extraHelmFlags
- # the ETCD chart use "auth" for resons different than BBsim, so strip that away
- ETCD_FLAGS=$(echo ${extraHelmFlags} | sed -e 's/--set auth=false / /g') | sed -e 's/--set auth=true / /g'
- ETCD_FLAGS+=" --set auth.rbac.enabled=false,persistence.enabled=false,statefulset.replicaCount=${etcdReplicas}"
- ETCD_FLAGS+=" --set memoryMode=${inMemoryEtcdStorage} "
- helm install -n infra --set replicas=${etcdReplicas} etcd $HOME/teone/helm-charts/etcd $ETCD_FLAGS
-
- helm upgrade --install -n infra voltha-infra onf/voltha-infra \
- -f $WORKSPACE/voltha-helm-charts/examples/${workflow}-values.yaml \
- --set onos-classic.replicas=${onosReplicas},onos-classic.atomix.replicas=${atomixReplicas} \
- --set radius.enabled=${withEapol} \
- --set kafka.enabled=false \
- --set etcd.enabled=false
- '''
+ volthaInfraDeploy([
+ workflow: workflow,
+ infraNamespace: "infra",
+ extraHelmFlags: infraHelmFlags,
+ localCharts: localCharts,
+ onosReplica: onosReplicas,
+ atomixReplica: atomixReplicas,
+ kafkaReplica: kafkaReplicas,
+ etcdReplica: etcdReplicas,
+ ])
+ }
+ }
}
}
stage('Deploy Voltha') {
@@ -126,22 +151,6 @@
deploy_voltha_stacks(params.volthaStacks)
}
}
- stage('Start logging') {
- steps {
- sh returnStdout: false, script: '''
- # start logging with kail
-
- mkdir -p $LOG_FOLDER
-
- list=($APPS_TO_LOG)
- for app in "${list[@]}"
- do
- echo "Starting logs for: ${app}"
- _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
- done
- '''
- }
- }
stage('Configuration') {
steps {
script {
@@ -176,6 +185,10 @@
sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
+ #SR is not needed in scale tests and not currently used by operators in production, can be disabled.
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.onosproject.segmentrouting
+
+
if [ ${withFlows} = false ]; then
sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.olt
fi
@@ -200,31 +213,9 @@
}
post {
always {
+ stopComponentsLogs([compress: true])
// collect result, done in the "post" step so it's executed even in the
// event of a timeout in the tests
- sh '''
-
- # stop the kail processes
- list=($APPS_TO_LOG)
- for app in "${list[@]}"
- do
- echo "Stopping logs for: ${app}"
- _TAG="kail-$app"
- P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
- done
- '''
- // compressing the logs to save space on Jenkins
- sh '''
- cd $LOG_FOLDER
- tar -czf logs.tar.gz *.log
- rm *.log
- '''
plot([
csvFileName: 'scale-test.csv',
csvSeries: [
@@ -249,6 +240,7 @@
outputPath: '.',
passThreshold: 100,
reportFileName: 'RobotLogs/**/report.html',
+ onlyCritical: true,
unstableThreshold: 0]);
// get all the logs from kubernetes PODs
sh returnStdout: false, script: '''
@@ -262,23 +254,25 @@
kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
# copy the ONOS logs directly from the container to avoid the color codes
- printf '%s\n' $(kubectl -n \$INFRA_NS get pods -l app=onos-onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl -n \$INFRA_NS cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
+ printf '%s\n' $(kubectl get pods -n infra -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp -n infra #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
- # get radius logs out of the container
- kubectl -n \$INFRA_NS cp $(kubectl -n \$INFRA_NS get pods -l app=radius --no-headers | awk '{print $1}'):/var/log/freeradius/radius.log $LOG_FOLDER//radius.log || true
'''
// dump all the BBSim(s) ONU information
script {
for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
stack_ns="voltha"+i
sh """
+ mkdir -p \$LOG_FOLDER/${stack_ns}
BBSIM_IDS=\$(kubectl -n ${stack_ns} get pods | grep bbsim | grep -v server | awk '{print \$1}')
IDS=(\$BBSIM_IDS)
for bbsim in "\${IDS[@]}"
do
- kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl onu list > $LOG_FOLDER/${stack_ns}/\$bbsim-device-list.txt || true
- kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl service list > $LOG_FOLDER/${stack_ns}/\$bbsim-service-list.txt || true
+ kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl onu list > \$LOG_FOLDER/${stack_ns}/\$bbsim-device-list.txt || true
+ kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl service list > \$LOG_FOLDER/${stack_ns}/\$bbsim-service-list.txt || true
+ kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl olt resources GEM_PORT > \$LOG_FOLDER/${stack_ns}/\$bbsim-flows-gem-ports.txt || true
+ kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl olt resources ALLOC_ID > \$LOG_FOLDER/${stack_ns}/\$bbsim-flows-alloc-ids.txt || true
+ kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl olt pons > \$LOG_FOLDER/${stack_ns}/\$bbsim-pon-resources.txt || true
done
"""
}
@@ -291,6 +285,9 @@
sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt || true
sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt || true
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 netcfg > $LOG_FOLDER/onos-netcfg.txt || true
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg get > $LOG_FOLDER/onos-cfg.txt || true
+
sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt || true
sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt || true
@@ -332,7 +329,8 @@
try {
sh """
- _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555& 2>&1 > /dev/null
+ # _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555& > /dev/null 2>&1
+ _TAG="voltha-port-forward" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555 > /dev/null 2>&1; done"&
voltctl -m 8MB device list -o json > $LOG_FOLDER/${stack_ns}/device-list.json || true
python -m json.tool $LOG_FOLDER/${stack_ns}/device-list.json > $LOG_FOLDER/${stack_ns}/voltha-devices-list.json || true
@@ -340,7 +338,7 @@
voltctl -m 8MB device list > $LOG_FOLDER/${stack_ns}/voltha-devices-list.txt || true
DEVICE_LIST=
- printf '%s\n' \$(voltctl -m 8MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl-m 8MB device flows # > $LOG_FOLDER/${stack_ns}/voltha-device-flows-#.txt" || true
+ printf '%s\n' \$(voltctl -m 8MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device flows # > $LOG_FOLDER/${stack_ns}/voltha-device-flows-#.txt" || true
printf '%s\n' \$(voltctl -m 8MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/${stack_ns}/voltha-device-ports-#.txt" || true
printf '%s\n' \$(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice flows # > $LOG_FOLDER/${stack_ns}/voltha-logicaldevice-flows-#.txt" || true
@@ -350,8 +348,9 @@
ps aux | grep port-forw | grep voltha-api | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
"""
} catch(e) {
+ println e
sh '''
- echo "Can't get device list from voltclt"
+ echo "Can't get device list from voltctl"
'''
}
}
@@ -362,43 +361,42 @@
cd $WORKSPACE/voltha-system-tests
source ./vst_venv/bin/activate
sleep 60 # we have to wait for prometheus to collect all the information
- python tests/scale/sizing.py -o $WORKSPACE/plots || true
+ python scripts/sizing.py -o $WORKSPACE/plots || true
fi
'''
- archiveArtifacts artifacts: 'kind-voltha/install-*.log,execution-time-*.txt,logs/**/*.txt,logs/**/*.tar.gz,RobotLogs/**/*,plots/*,etcd-metrics/*'
+ archiveArtifacts artifacts: 'kind-voltha/install-*.log,execution-time-*.txt,logs/**/*.txt,logs/**/*.tar.gz,logs/**/*.tgz,RobotLogs/**/*,plots/*,etcd-metrics/*'
}
}
}
def deploy_voltha_stacks(numberOfStacks) {
for (int i = 1; i <= numberOfStacks.toInteger(); i++) {
- stage("Deploy VOLTHA stack " + i) {
- // ${logLevel}
- def extraHelmFlags = "${extraHelmFlags} --set global.log_level=${logLevel},enablePerf=true,onu=${onus},pon=${pons} "
- extraHelmFlags += " --set securityContext.enabled=false,atomix.persistence.enabled=false "
+ timeout(time: 5, unit: 'MINUTES') {
+ stage("Deploy VOLTHA stack " + i) {
- // FIXME having to set all of these values is annoying, is there a better solution?
- def volthaHelmFlags = extraHelmFlags +
- "--set voltha.services.kafka.adapter.address=kafka.infra.svc:9092 " +
- "--set voltha.services.kafka.cluster.address=kafka.infra.svc:9092 " +
- "--set voltha.services.etcd.address=etcd.infra.svc:2379 " +
- "--set voltha-adapter-openolt.services.kafka.adapter.address=kafka.infra.svc:9092 " +
- "--set voltha-adapter-openolt.services.kafka.cluster.address=kafka.infra.svc:9092 " +
- "--set voltha-adapter-openolt.services.etcd.address=etcd.infra.svc:2379 " +
- "--set voltha-adapter-openonu.services.kafka.adapter.address=kafka.infra.svc:9092 " +
- "--set voltha-adapter-openonu.services.kafka.cluster.address=kafka.infra.svc:9092 " +
- "--set voltha-adapter-openonu.services.etcd.address=etcd.infra.svc:2379" +
- ofAgentConnections(onosReplicas.toInteger(), "voltha-infra", "infra")
+ def localCharts = false
+ if (volthaHelmChartsChange != "" || release != "master") {
+ localCharts = true
+ }
- volthaStackDeploy([
- bbsimReplica: olts.toInteger(),
- infraNamespace: "infra",
- volthaNamespace: "voltha${i}",
- stackName: "voltha${i}",
- stackId: i,
- workflow: workflow,
- extraHelmFlags: volthaHelmFlags
- ])
+ def volthaHelmFlags =
+ "--set global.log_level=${logLevel} " +
+ "--set enablePerf=true,onu=${onus},pon=${pons} " +
+ "--set securityContext.enabled=false " +
+ params.extraHelmFlags
+
+ volthaStackDeploy([
+ bbsimReplica: olts.toInteger(),
+ infraNamespace: "infra",
+ volthaNamespace: "voltha${i}",
+ stackName: "voltha${i}",
+ stackId: i,
+ workflow: workflow,
+ extraHelmFlags: volthaHelmFlags,
+ localCharts: localCharts,
+ onosReplica: onosReplicas,
+ ])
+ }
}
}
}
@@ -413,7 +411,9 @@
voltctl -s 127.0.0.1:55555 config > $HOME/.volt/config
export VOLTCONFIG=$HOME/.volt/config
- _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555& 2>&1 > /dev/null
+ # _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555& > /dev/null 2>&1
+ _TAG="voltha-port-forward" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555 > /dev/null 2>&1; done"&
+
ROBOT_PARAMS="-v stackId:${i} \
-v olt:${olts} \
diff --git a/jjb/pipeline/voltha/master/voltha-scale-test.groovy b/jjb/pipeline/voltha/master/voltha-scale-test.groovy
index 35604f8..08f04a8 100644
--- a/jjb/pipeline/voltha/master/voltha-scale-test.groovy
+++ b/jjb/pipeline/voltha/master/voltha-scale-test.groovy
@@ -21,7 +21,7 @@
])
// this function generates the correct parameters for ofAgent
-// to connect to multple ONOS instances
+// to connect to multiple ONOS instances
def ofAgentConnections(numOfOnos, releaseName, namespace) {
def params = " "
numOfOnos.times {
@@ -50,10 +50,7 @@
NUM_OF_ONOS="${onosReplicas}"
NUM_OF_ATOMIX="${atomixReplicas}"
EXTRA_HELM_FLAGS=" "
-
- APPS_TO_LOG="etcd kafka onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server onos-config-loader"
LOG_FOLDER="$WORKSPACE/logs"
-
GERRIT_PROJECT="${GERRIT_PROJECT}"
}
@@ -68,6 +65,14 @@
helm repo add onf https://charts.opencord.org
helm repo update
+ # remove all persistent volume claims
+ kubectl delete pvc --all-namespaces --all
+ PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
+ while [[ \$PVCS != 0 ]]; do
+ sleep 5
+ PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
+ done
+
# remove orphaned port-forward from different namespaces
ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
@@ -104,19 +109,8 @@
}
}
stage('Deploy common infrastructure') {
- // includes monitoring, kafka, etcd
steps {
sh '''
- helm install kafka $HOME/teone/helm-charts/kafka --set replicaCount=${kafkaReplicas},replicas=${kafkaReplicas} --set persistence.enabled=false \
- --set zookeeper.replicaCount=${kafkaReplicas} --set zookeeper.persistence.enabled=false \
- --set prometheus.kafka.enabled=true,prometheus.operator.enabled=true,prometheus.jmx.enabled=true,prometheus.operator.serviceMonitor.namespace=default
-
- # the ETCD chart use "auth" for resons different than BBsim, so strip that away
- ETCD_FLAGS=$(echo ${extraHelmFlags} | sed -e 's/--set auth=false / /g') | sed -e 's/--set auth=true / /g'
- ETCD_FLAGS+=" --set auth.rbac.enabled=false,persistence.enabled=false,statefulset.replicaCount=${etcdReplicas}"
- ETCD_FLAGS+=" --set memoryMode=${inMemoryEtcdStorage} "
- helm install --set replicas=${etcdReplicas} etcd $HOME/teone/helm-charts/etcd $ETCD_FLAGS
-
if [ ${withMonitoring} = true ] ; then
helm install nem-monitoring onf/nem-monitoring \
-f $HOME/voltha-scale/grafana.yaml \
@@ -130,18 +124,21 @@
steps {
timeout(time: 10, unit: 'MINUTES') {
script {
- sh returnStdout: false, script: '''
- # start logging with kail
-
- mkdir -p $LOG_FOLDER
-
- list=($APPS_TO_LOG)
- for app in "${list[@]}"
- do
- echo "Starting logs for: ${app}"
- _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
- done
- '''
+ startComponentsLogs([
+ appsToLog: [
+ 'app.kubernetes.io/name=etcd',
+ 'app.kubernetes.io/name=kafka',
+ 'app=onos-classic',
+ 'app=adapter-open-onu',
+ 'app=adapter-open-olt',
+ 'app=rw-core',
+ 'app=ofagent',
+ 'app=bbsim',
+ 'app=radius',
+ 'app=bbsim-sadis-server',
+ 'app=onos-config-loader',
+ ]
+ ])
def returned_flags = sh (returnStdout: true, script: """
export EXTRA_HELM_FLAGS+=' '
@@ -233,8 +230,8 @@
def extraHelmFlags = returned_flags
// The added space before params.extraHelmFlags is required due to the .trim() above
def infraHelmFlags =
- " --set etcd.enabled=false,kafka.enabled=false" +
- " --set global.log_level=${logLevel} " +
+ "--set global.log_level=${logLevel} " +
+ "--set radius.enabled=${withEapol} " +
"--set onos-classic.onosSshPort=30115 " +
"--set onos-classic.onosApiPort=30120 " +
extraHelmFlags + " " + params.extraHelmFlags
@@ -253,20 +250,11 @@
localCharts: localCharts,
onosReplica: onosReplicas,
atomixReplica: atomixReplicas,
+ kafkaReplica: kafkaReplicas,
+ etcdReplica: etcdReplicas,
])
- def stackHelmFlags = "${ofAgentConnections(onosReplicas.toInteger(), "voltha-infra", "default")} " +
- "--set voltha.services.kafka.adapter.address=kafka.default.svc:9092 " +
- "--set voltha.services.kafka.cluster.address=kafka.default.svc:9092 " +
- "--set voltha.services.etcd.address=etcd.default.svc:2379 " +
- "--set voltha-adapter-openolt.services.kafka.adapter.address=kafka.default.svc:9092 " +
- "--set voltha-adapter-openolt.services.kafka.cluster.address=kafka.default.svc:9092 " +
- "--set voltha-adapter-openolt.services.etcd.address=etcd.default.svc:2379 " +
- "--set voltha-adapter-openonu.services.kafka.adapter.address=kafka.default.svc:9092 " +
- "--set voltha-adapter-openonu.services.kafka.cluster.address=kafka.default.svc:9092 " +
- "--set voltha-adapter-openonu.services.etcd.address=etcd.default.svc:2379"
-
- stackHelmFlags += " --set onu=${onus},pon=${pons} --set global.log_level=${logLevel.toLowerCase()} "
+ stackHelmFlags = " --set onu=${onus},pon=${pons} --set global.log_level=${logLevel.toLowerCase()} "
stackHelmFlags += " --set voltha.ingress.enabled=true --set voltha.ingress.enableVirtualHosts=true --set voltha.fullHostnameOverride=voltha.scale1.dev "
stackHelmFlags += extraHelmFlags + " " + params.extraHelmFlags
@@ -277,7 +265,8 @@
stackName: "voltha1", // TODO support custom charts
workflow: workflow,
extraHelmFlags: stackHelmFlags,
- localCharts: false,
+ localCharts: localCharts,
+ onosReplica: onosReplicas,
])
sh """
set +x
@@ -341,12 +330,12 @@
fi
if [ '${workflow}' = 'tt' ]; then
- etcd_container=\$(kubectl get pods --all-namespaces | grep etcd | awk 'NR==1{print \$2}')
+ etcd_container=\$(kubectl get pods --all-namespaces | grep etcd-0 | awk 'NR==1{print \$2}')
kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-HSIA.json \$etcd_container:/tmp/hsia.json
put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64')
kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-VoIP.json \$etcd_container:/tmp/voip.json
put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65')
- kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST.json \$etcd_container:/tmp/mcast.json
+ kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST-AdditionalBW-NA.json \$etcd_container:/tmp/mcast.json
put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66')
fi
@@ -387,7 +376,7 @@
sh """
# load MIB template
wget https://raw.githubusercontent.com/opencord/voltha-openonu-adapter-go/master/templates/BBSM-12345123451234512345-00000000000001-v1.json
- cat BBSM-12345123451234512345-00000000000001-v1.json | kubectl exec -it \$(kubectl get pods |grep etcd | awk 'NR==1{print \$1}') -- etcdctl put service/voltha/omci_mibs/go_templates/BBSM/12345123451234512345/00000000000001
+ cat BBSM-12345123451234512345-00000000000001-v1.json | kubectl exec -it \$(kubectl get pods |grep etcd-0 | awk 'NR==1{print \$1}') -- etcdctl put service/voltha/omci_mibs/go_templates/BBSM/12345123451234512345/00000000000001
"""
}
}
@@ -484,6 +473,9 @@
return params.withIgmp
}
}
+ options {
+ timeout(time: 11, unit: 'MINUTES')
+ }
steps {
sh returnStdout: false, script: """
# sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.onosproject.store.group.impl
@@ -494,53 +486,53 @@
cd $WORKSPACE/voltha-system-tests
make vst_venv
'''
- timeout(time: 11, unit: 'MINUTES') {
- sh '''
- ROBOT_PARAMS="--exitonfailure \
- -v olt:${olts} \
- -v pon:${pons} \
- -v onu:${onus} \
- -v workflow:${workflow} \
- -v withEapol:${withEapol} \
- -v withDhcp:${withDhcp} \
- -v withIgmp:${withIgmp} \
- -v ONOS_SSH_PORT:30115 \
- -v ONOS_REST_PORT:30120 \
- --noncritical non-critical \
- -i igmp \
- -e setup -e activation -e flow-before \
- -e authentication -e provision -e flow-after \
- -e dhcp -e teardown "
- cd $WORKSPACE/voltha-system-tests
- source ./vst_venv/bin/activate
- robot -d $ROBOT_LOGS_DIR \
- $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
- '''
+ script {
+ Exception caughtException = null
+
+ catchError(buildResult: 'SUCCESS', stageResult: 'ABORTED') {
+ try {
+ sh '''
+ ROBOT_PARAMS="--exitonfailure \
+ -v olt:${olts} \
+ -v pon:${pons} \
+ -v onu:${onus} \
+ -v workflow:${workflow} \
+ -v withEapol:${withEapol} \
+ -v withDhcp:${withDhcp} \
+ -v withIgmp:${withIgmp} \
+ -v ONOS_SSH_PORT:30115 \
+ -v ONOS_REST_PORT:30120 \
+ --noncritical non-critical \
+ -i igmp \
+ -e setup -e activation -e flow-before \
+ -e authentication -e provision -e flow-after \
+ -e dhcp -e teardown "
+ cd $WORKSPACE/voltha-system-tests
+ source ./vst_venv/bin/activate
+ robot -d $ROBOT_LOGS_DIR \
+ $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
+ '''
+ } catch (org.jenkinsci.plugins.workflow.steps.FlowInterruptedException e) {
+ // if the error is a timeout don't mark the build as failed
+ println "IGMP test timed out"
+ } catch (Throwable e) {
+ caughtException = e
+ }
+ }
+
+ if (caughtException) {
+ error caughtException.message
+ }
}
}
}
}
post {
always {
+ stopComponentsLogs()
// collect result, done in the "post" step so it's executed even in the
// event of a timeout in the tests
sh '''
-
- # stop the kail processes
- list=($APPS_TO_LOG)
- for app in "${list[@]}"
- do
- echo "Stopping logs for: ${app}"
- _TAG="kail-$app"
- P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
- done
-
if [ ${withPcap} = true ] ; then
# stop ofAgent tcpdump
P_ID="\$(ps e -ww -A | grep "_TAG=ofagent-tcpdump" | grep -v grep | awk '{print \$1}')"
@@ -579,7 +571,7 @@
fi
cd voltha-system-tests
- source ./vst_venv/bin/activate
+ source ./vst_venv/bin/activate || true
python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time.txt || true
cat $WORKSPACE/execution-time.txt
'''
@@ -619,18 +611,11 @@
outputPath: 'RobotLogs',
passThreshold: 100,
reportFileName: '**/report*.html',
+ onlyCritical: true,
unstableThreshold: 0]);
+ getPodsInfo("$LOG_FOLDER")
// get all the logs from kubernetes PODs
sh returnStdout: false, script: '''
-
- # store information on running charts
- helm ls > $LOG_FOLDER/helm-list.txt || true
-
- # store information on the running pods
- kubectl get pods -o wide > $LOG_FOLDER/pods.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
-
# copy the ONOS logs directly from the container to avoid the color codes
printf '%s\n' $(kubectl get pods -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
@@ -662,7 +647,7 @@
'''
script {
// first make sure the port-forward is still running,
- // sometimes Jenkins kills it relardless of the JENKINS_NODE_COOKIE=dontKillMe
+ // sometimes Jenkins kills it regardless of the JENKINS_NODE_COOKIE=dontKillMe
def running = sh (
script: 'ps aux | grep port-forw | grep -E "onos|voltha" | grep -v grep | wc -l',
returnStdout: true
@@ -725,6 +710,10 @@
curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time-bucket.json || true
curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_wal_fsync_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-wal-fsync-time-bucket.json || true
curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_network_peer_round_trip_time_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-network-peer-round-trip-time-seconds.json || true
+ etcd_namespace=\$(kubectl get pods --all-namespaces | grep etcd-0 | awk 'NR==1{print \$1}')
+ etcd_container=\$(kubectl get pods --all-namespaces | grep etcd-0 | awk 'NR==1{print \$2}')
+ kubectl exec -it -n \$etcd_namespace \$etcd_container -- etcdctl defrag --cluster || true
+ kubectl exec -it -n \$etcd_namespace \$etcd_container -- etcdctl endpoint status -w table > $WORKSPACE/etcd-metrics/etcd-status-table.txt || true
'''
// get VOLTHA debug infos
@@ -752,9 +741,9 @@
sh '''
if [ ${withMonitoring} = true ] ; then
cd $WORKSPACE/voltha-system-tests
- source ./vst_venv/bin/activate
+ source ./vst_venv/bin/activate || true
sleep 60 # we have to wait for prometheus to collect all the information
- python tests/scale/sizing.py -o $WORKSPACE/plots || true
+ python scripts/sizing.py -o $WORKSPACE/plots || true
fi
'''
archiveArtifacts artifacts: 'execution-time.txt,logs/*,logs/pprof/*,RobotLogs/**/*,plots/*,etcd-metrics/*'
diff --git a/jjb/pipeline/voltha/master/voltha-tt-physical-functional-tests.groovy b/jjb/pipeline/voltha/master/voltha-tt-physical-functional-tests.groovy
new file mode 100644
index 0000000..cc3538a
--- /dev/null
+++ b/jjb/pipeline/voltha/master/voltha-tt-physical-functional-tests.groovy
@@ -0,0 +1,240 @@
+// Copyright 2017-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+
+node {
+ // Need this so that deployment_config has global scope when it's read later
+ deployment_config = null
+}
+
+pipeline {
+ /* no label, executor is determined by JJB */
+ agent {
+ label "${params.buildNode}"
+ }
+ options {
+ timeout(time: "${timeout}", unit: 'MINUTES')
+ }
+
+ environment {
+ KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
+ VOLTCONFIG="$HOME/.volt/config-minimal"
+ PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ }
+
+ stages {
+ stage('Clone voltha-system-tests') {
+ steps {
+ step([$class: 'WsCleanup'])
+ checkout([
+ $class: 'GitSCM',
+ userRemoteConfigs: [[
+ url: "https://gerrit.opencord.org/voltha-system-tests",
+ refspec: "${volthaSystemTestsChange}"
+ ]],
+ branches: [[ name: "${branch}", ]],
+ extensions: [
+ [$class: 'WipeWorkspace'],
+ [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
+ [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
+ ],
+ ])
+ script {
+ sh(script:"""
+ if [ '${volthaSystemTestsChange}' != '' ] ; then
+ cd $WORKSPACE/voltha-system-tests;
+ git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
+ fi
+ """)
+ }
+ }
+ }
+ // This checkout allows us to show changes in Jenkins
+ // we only do this on master as we don't branch all the repos for all the releases
+ // (we should compute the difference by tracking the container version, not the code)
+ stage('Download All the VOLTHA repos') {
+ when {
+ expression {
+ return "${branch}" == 'master';
+ }
+ }
+ steps {
+ checkout(changelog: true,
+ poll: false,
+ scm: [$class: 'RepoScm',
+ manifestRepositoryUrl: "${params.manifestUrl}",
+ manifestBranch: "${params.branch}",
+ currentBranch: true,
+ destinationDir: 'voltha',
+ forceSync: true,
+ resetFirst: true,
+ quiet: true,
+ jobs: 4,
+ showAllChanges: true]
+ )
+ }
+ }
+ stage ('Initialize') {
+ steps {
+ sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
+ script {
+ deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
+ }
+ sh returnStdout: false, script: """
+ mkdir -p $WORKSPACE/bin
+ bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/bin"
+ cd $WORKSPACE
+ if [ "${params.branch}" == "voltha-2.8" ]; then
+ VOLTCTL_VERSION=1.6.11
+ else
+ VOLTCTL_VERSION=\$(curl -sSL https://api.github.com/repos/opencord/voltctl/releases/latest | jq -r .tag_name | sed -e 's/^v//g')
+ fi
+
+ HOSTOS=\$(uname -s | tr "[:upper:]" "[:lower:"])
+ HOSTARCH=\$(uname -m | tr "[:upper:]" "[:lower:"])
+ if [ \$HOSTARCH == "x86_64" ]; then
+ HOSTARCH="amd64"
+ fi
+ curl -o $WORKSPACE/bin/voltctl -sSL https://github.com/opencord/voltctl/releases/download/v\${VOLTCTL_VERSION}/voltctl-\${VOLTCTL_VERSION}-\${HOSTOS}-\${HOSTARCH}
+ chmod 755 $WORKSPACE/bin/voltctl
+ voltctl version --clientonly
+
+
+ # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
+ # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
+ # We should change this. In the meantime here is a workaround.
+ if [ "${params.branch}" == "master" ]; then
+ set +e
+
+
+ # Remove noise from voltha-core logs
+ voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
+ voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
+ # Remove noise from openolt logs
+ voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
+ voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
+ voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
+ fi
+ """
+ }
+ }
+
+ stage('Functional Tests') {
+ environment {
+ ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
+ ROBOT_FILE="Voltha_TT_PODTests.robot"
+ ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/FunctionalTests"
+ }
+ steps {
+ sh """
+ device_teardown=True
+ if [ ${params.enableMultiUni} = true ]; then
+ device_teardown=False
+ fi
+ mkdir -p $ROBOT_LOGS_DIR
+ if ( ${powerSwitch} ); then
+ export ROBOT_MISC_ARGS="--removekeywords wuks -i functionalTT -i PowerSwitch -i sanityTT -i sanityTT-MCAST -e bbsim -e notready -d $ROBOT_LOGS_DIR -v teardown_device:\${device_teardown} -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
+ else
+ export ROBOT_MISC_ARGS="--removekeywords wuks -i functionalTT -e PowerSwitch -i sanityTT -i sanityTT-MCAST -e bbsim -e notready -d $ROBOT_LOGS_DIR -v teardown_device:\${device_teardown} -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
+ fi
+ make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
+ """
+ }
+ }
+
+ stage('Failure/Recovery Tests') {
+ environment {
+ ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
+ ROBOT_FILE="Voltha_TT_FailureScenarios.robot"
+ ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/FailureScenarios"
+ }
+ steps {
+ sh """
+ mkdir -p $ROBOT_LOGS_DIR
+ if [ ${params.enableMultiUni} = false ]; then
+ if ( ${powerSwitch} ); then
+ export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
+ else
+ export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
+ fi
+ make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
+ fi
+ """
+ }
+ }
+
+ stage('Multi-Tcont Tests') {
+ environment {
+ ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
+ ROBOT_FILE="Voltha_TT_MultiTcontTests.robot"
+ ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/MultiTcontScenarios"
+ ROBOT_TEST_INPUT_FILE="$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-TT-multi-tcont-tests-input.yaml"
+ }
+ steps {
+ sh """
+ mkdir -p $ROBOT_LOGS_DIR
+ if [ ${params.enableMultiUni} = false ]; then
+ if ( ${powerSwitch} ); then
+ export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel} -V $ROBOT_TEST_INPUT_FILE"
+ else
+ export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel} -V $ROBOT_TEST_INPUT_FILE"
+ fi
+ make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
+ fi
+ """
+ }
+ }
+
+ }
+ post {
+ always {
+ getPodsInfo("$WORKSPACE/pods")
+ sh returnStdout: false, script: '''
+ set +e
+
+ # collect logs collected in the Robot Framework StartLogging keyword
+ cd $WORKSPACE
+ gzip *-combined.log || true
+ rm *-combined.log || true
+ '''
+ script {
+ deployment_config.olts.each { olt ->
+ if (olt.type == null || olt.type == "" || olt.type == "openolt") {
+ sh returnStdout: false, script: """
+ sshpass -p ${olt.pass} scp ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
+ sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log # Remove escape sequences
+ """
+ }
+ }
+ }
+ step([$class: 'RobotPublisher',
+ disableArchiveOutput: false,
+ logFileName: '**/log*.html',
+ otherFiles: '',
+ outputFileName: '**/output*.xml',
+ outputPath: 'RobotLogs',
+ passThreshold: 100,
+ reportFileName: '**/report*.html',
+ unstableThreshold: 0,
+ onlyCritical: true
+ ]);
+ archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
+ }
+ }
+}
diff --git a/jjb/pipeline/voltha/voltha-2.7/bbsim-tests.groovy b/jjb/pipeline/voltha/voltha-2.7/bbsim-tests.groovy
deleted file mode 100644
index c72c0f9..0000000
--- a/jjb/pipeline/voltha/voltha-2.7/bbsim-tests.groovy
+++ /dev/null
@@ -1,253 +0,0 @@
-// Copyright 2017-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// voltha-2.x e2e tests
-// uses bbsim to simulate OLT/ONUs
-
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-library identifier: 'cord-jenkins-libraries@master',
- retriever: modernSCM([
- $class: 'GitSCMSource',
- remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-// TODO move this in a keyword so it can be shared across pipelines
-def customImageFlags(project) {
- def chart = "unknown"
- def image = "unknown"
- switch(project) {
- case "ofagent-go":
- chart = "voltha"
- image = "ofagent"
- break
- case "voltha-go":
- chart = "voltha"
- image = "rw_core"
- break
- case "voltha-openonu-adapter-go":
- chart = "voltha-adapter-openonu"
- image = "adapter_open_onu_go"
- break
- // TODO remove after 2.7
- case "voltha-openonu-adapter":
- chart = "voltha-adapter-openonu"
- image = "adapter_open_onu"
- break
- // TODO end
- case "voltha-openolt-adapter":
- chart = "voltha-adapter-openolt"
- image = "adapter_open_olt"
- break
- case "bbsim":
- // BBSIM has a different format that voltha, return directly
- return "--set images.bbsim.tag=citest,images.bbsim.pullPolicy=Never"
- break
- default:
- break
- }
-
- return "--set ${chart}.images.${image}.tag=citest,${chart}.images.${image}.pullPolicy=Never "
-}
-
-def test_workflow(name) {
- stage('Deploy - '+ name + ' workflow') {
- def extraHelmFlags = "${extraHelmFlags} --set global.log_level=DEBUG,onu=1,pon=1 "
-
- if (gerritProject != "") {
- extraHelmFlags = extraHelmFlags + customImageFlags("${gerritProject}")
- }
-
- def localCharts = false
- if (gerritProject == "voltha-helm-charts") {
- localCharts = true
- }
-
- volthaDeploy([workflow: name, extraHelmFlags: extraHelmFlags, localCharts: localCharts])
- // start logging
- sh """
- mkdir -p $WORKSPACE/${name}
- _TAG=kail-${name} kail -n infra -n voltha > $WORKSPACE/${name}/onos-voltha-combined.log &
- """
- // forward ONOS and VOLTHA ports
- sh """
- _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8101:8101&
- _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8181:8181&
- _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha svc/voltha-voltha-api 55555:55555&
- """
- }
- stage('Test VOLTHA - '+ name + ' workflow') {
- sh """
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/${name.toUpperCase()}Workflow"
- mkdir -p \$ROBOT_LOGS_DIR
- export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -e PowerSwitch"
-
- # By default, all tests tagged 'sanity' are run. This covers basic functionality
- # like running through the ATT workflow for a single subscriber.
- export TARGET=sanity-kind-${name}
-
- # If the Gerrit comment contains a line with "functional tests" then run the full
- # functional test suite. This covers tests tagged either 'sanity' or 'functional'.
- # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
- REGEX="functional tests"
- if [[ "\$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]]; then
- export TARGET=functional-single-kind-${name}
- fi
-
- if [[ "${gerritProject}" == "bbsim" ]]; then
- echo "Running BBSim specific Tests"
- export TARGET=sanity-bbsim-${name}
- fi
-
- export VOLTCONFIG=$HOME/.volt/config
- export KUBECONFIG=$HOME/.kube/config
-
- # Run the specified tests
- make -C $WORKSPACE/voltha-system-tests \$TARGET || true
- """
- // stop logging
- sh """
- P_IDS="\$(ps e -ww -A | grep "_TAG=kail-${name}" | grep -v grep | awk '{print \$1}')"
- if [ -n "\$P_IDS" ]; then
- echo \$P_IDS
- for P_ID in \$P_IDS; do
- kill -9 \$P_ID
- done
- fi
- """
- // remove port-forwarding
- sh """
- # remove orphaned port-forward from different namespaces
- ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
- """
- // collect pod details
- get_pods_info("$WORKSPACE/${name}")
- helmTeardown(['infra', 'voltha'])
- }
-}
-
-def get_pods_info(dest) {
- // collect pod details, this is here in case of failure
- sh """
- mkdir -p ${dest}
- kubectl get pods --all-namespaces -o wide | tee ${dest}/pods.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee ${dest}/pod-images.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee ${dest}/pod-imagesId.txt || true
- kubectl describe pods --all-namespaces -l app.kubernetes.io/part-of=voltha > ${dest}/pods-describe.txt
- helm ls --all-namespaces | tee ${dest}/helm-charts.txt
- """
-}
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 35, unit: 'MINUTES')
- }
- environment {
- PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
- KUBECONFIG="$HOME/.kube/kind-config-${clusterName}"
- }
-
- stages{
- stage('Download Code') {
- steps {
- getVolthaCode([
- branch: "${branch}",
- gerritProject: "${gerritProject}",
- gerritRefspec: "${gerritRefspec}",
- volthaSystemTestsChange: "${volthaSystemTestsChange}",
- volthaHelmChartsChange: "${volthaHelmChartsChange}",
- ])
- }
- }
- stage('Build patch') {
- steps {
- // NOTE that the correct patch has already been checked out
- // during the getVolthaCode step
- buildVolthaComponent("${gerritProject}")
- }
- }
- stage('Create K8s Cluster') {
- steps {
- createKubernetesCluster([nodes: 3])
- }
- }
- stage('Load image in kind nodes') {
- steps {
- loadToKind()
- }
- }
- stage('Replace voltctl') {
- // if the project is voltctl override the downloaded one with the built one
- when {
- expression {
- return gerritProject == "voltctl"
- }
- }
- steps{
- sh """
- mv `ls $WORKSPACE/voltctl/release/voltctl-*-linux-amd*` $WORKSPACE/bin/voltctl
- chmod +x $WORKSPACE/bin/voltctl
- """
- }
- }
- stage('Run Test') {
- steps {
- timeout(time: 30, unit: 'MINUTES') {
- test_workflow("att")
- test_workflow("dt")
- test_workflow("tt")
- }
- }
- }
- }
-
- post {
- aborted {
- get_pods_info("$WORKSPACE/failed")
- sh """
- kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.log
- """
- archiveArtifacts artifacts: '**/*.log,**/*.txt'
- }
- failure {
- get_pods_info("$WORKSPACE/failed")
- sh """
- kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.logs
- """
- archiveArtifacts artifacts: '**/*.log,**/*.txt'
- }
- always {
- sh '''
- gzip $WORKSPACE/att/onos-voltha-combined.log || true
- gzip $WORKSPACE/dt/onos-voltha-combined.log || true
- gzip $WORKSPACE/tt/onos-voltha-combined.log || true
- '''
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: 'RobotLogs/*/log*.html',
- otherFiles: '',
- outputFileName: 'RobotLogs/*/output*.xml',
- outputPath: '.',
- passThreshold: 100,
- reportFileName: 'RobotLogs/*/report*.html',
- unstableThreshold: 0]);
- archiveArtifacts artifacts: '*.log,**/*.log,**/*.gz,*.gz,*.txt,**/*.txt'
- }
- }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.7/voltha-DMI-bbsim-tests.groovy b/jjb/pipeline/voltha/voltha-2.7/voltha-DMI-bbsim-tests.groovy
deleted file mode 100755
index e7dba07..0000000
--- a/jjb/pipeline/voltha/voltha-2.7/voltha-DMI-bbsim-tests.groovy
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright 2017-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// voltha-2.x e2e tests
-// uses kind-voltha to deploy voltha-2.X
-// uses bbsim to simulate OLT/ONUs
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 30, unit: 'MINUTES')
- }
- environment {
- KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
- VOLTCONFIG="$HOME/.volt/config-minimal"
- PATH="$PATH:$WORKSPACE/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- NAME="minimal"
- FANCY=0
- WITH_SIM_ADAPTERS="no"
- WITH_RADIUS="yes"
- WITH_BBSIM="yes"
- DEPLOY_K8S="yes"
- VOLTHA_LOG_LEVEL="DEBUG"
- CONFIG_SADIS="external"
- BBSIM_CFG="configs/bbsim-sadis-att.yaml"
- ROBOT_MISC_ARGS="-e PowerSwitch ${params.extraRobotArgs}"
- KARAF_HOME="${params.karafHome}"
- DIAGS_PROFILE="VOLTHA_PROFILE"
- NUM_OF_BBSIM="${olts}"
- }
- stages {
- stage('Clone kind-voltha') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/kind-voltha",
- // refspec: "${kindVolthaChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
- stage('Cleanup') {
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh """
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- cd $WORKSPACE/kind-voltha/
- WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down || ./voltha down
- """
- }
- }
- }
- stage('Clone voltha-system-tests') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/voltha-system-tests",
- // refspec: "${volthaSystemTestsChange}"
- ]],
- branches: [[ name: "${branch}", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
-
- stage('Deploy Voltha') {
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh """
- export EXTRA_HELM_FLAGS=""
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
-
- EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${params.extraHelmFlags}"
-
- cd $WORKSPACE/kind-voltha/
- ./voltha up
- """
- }
- }
- }
-
- stage('Device Management Interface Tests') {
- environment {
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/DMITests"
- }
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh '''
- set +e
- mkdir -p $ROBOT_LOGS_DIR
- cd $WORKSPACE/kind-voltha/scripts
- ./log-collector.sh > /dev/null &
- ./log-combine.sh > /dev/null &
-
- export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR"
- make -C $WORKSPACE/voltha-system-tests ${makeTarget} || true
- '''
- }
- }
- }
- }
-
- post {
- always {
- sh '''
- set +e
- cp $WORKSPACE/kind-voltha/install-minimal.log $WORKSPACE/
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
- kubectl get nodes -o wide
- kubectl get pods -o wide
- kubectl get pods -n voltha -o wide
-
- sleep 60 # Wait for log-collector and log-combine to complete
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep '"level":"error"' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep 'ERROR' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
- extract_errors_go voltha-ofagent >> $WORKSPACE/error-report.log
- extract_errors_python onos >> $WORKSPACE/error-report.log
-
- gzip error-report.log || true
-
- cd $WORKSPACE/kind-voltha/scripts/logger/combined/
- tar czf $WORKSPACE/container-logs.tgz *
-
- cd $WORKSPACE
- gzip *-combined.log || true
-
- ## shut down voltha but leave kind-voltha cluster
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- cd $WORKSPACE/kind-voltha/
- DEPLOY_K8S=n WAIT_ON_DOWN=y ./voltha down
- kubectl delete deployment voltctl || true
- '''
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: '**/log*.html',
- otherFiles: '',
- outputFileName: '**/output*.xml',
- outputPath: 'RobotLogs',
- passThreshold: 100,
- reportFileName: '**/report*.html',
- unstableThreshold: 0]);
-
- archiveArtifacts artifacts: '*.log,*.gz,*.tgz'
-
- }
- }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.7/voltha-bbsim-tests.groovy b/jjb/pipeline/voltha/voltha-2.7/voltha-bbsim-tests.groovy
deleted file mode 100644
index 5208368..0000000
--- a/jjb/pipeline/voltha/voltha-2.7/voltha-bbsim-tests.groovy
+++ /dev/null
@@ -1,526 +0,0 @@
-// Copyright 2017-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// voltha-2.x e2e tests
-// uses kind-voltha to deploy voltha-2.X
-// uses bbsim to simulate OLT/ONUs
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 90, unit: 'MINUTES')
- }
- environment {
- PATH="$WORKSPACE/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- VOLTHA_LOG_LEVEL="DEBUG"
- FANCY=0
- WITH_SIM_ADAPTERS="n"
- NAME="test"
- VOLTCONFIG="$HOME/.volt/config-$NAME"
- KUBECONFIG="$HOME/.kube/kind-config-voltha-$NAME"
- EXTRA_HELM_FLAGS=" --set global.image_registry=mirror.registry.opennetworking.org/ --set defaults.image_registry=mirror.registry.opennetworking.org/ "
- }
-
- stages {
- stage('Clone kind-voltha') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/kind-voltha",
- refspec: "${kindVolthaChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- sh """
- if [ '${kindVolthaChange}' != '' ] ; then
- cd $WORKSPACE/kind-voltha
- git fetch https://gerrit.opencord.org/kind-voltha ${kindVolthaChange} && git checkout FETCH_HEAD
- fi
- """
- }
- }
- stage('Clone voltha-system-tests') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/voltha-system-tests",
- refspec: "${volthaSystemTestsChange}"
- ]],
- branches: [[ name: "${branch}", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- sh """
- if [ '${volthaSystemTestsChange}' != '' ] ; then
- cd $WORKSPACE/voltha-system-tests
- git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
- fi
- """
- }
- }
- // If the repo under test is not kind-voltha
- // then download it and checkout the patch
- stage('Download Patch') {
- when {
- expression {
- return "${gerritProject}" != 'kind-voltha';
- }
- }
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/${gerritProject}",
- refspec: "${gerritRefspec}"
- ]],
- branches: [[ name: "${branch}", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "${gerritProject}"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- sh """
- pushd $WORKSPACE/${gerritProject}
- git fetch https://gerrit.opencord.org/${gerritProject} ${gerritRefspec} && git checkout FETCH_HEAD
-
- echo "Currently on commit: \n"
- git log -1 --oneline
- popd
- """
- }
- }
- // If the repo under test is kind-voltha we don't need to download it again,
- // as we already have it, simply checkout the patch
- stage('Checkout kind-voltha patch') {
- when {
- expression {
- return "${gerritProject}" == 'kind-voltha';
- }
- }
- steps {
- sh """
- cd $WORKSPACE/kind-voltha
- git fetch https://gerrit.opencord.org/kind-voltha ${gerritRefspec} && git checkout FETCH_HEAD
- """
- }
- }
- stage('Create K8s Cluster') {
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh """
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
-
- cd $WORKSPACE/kind-voltha/
- JUST_K8S=y ./voltha up
- bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/kind-voltha/bin"
- """
- }
- }
- }
-
- stage('Build Images') {
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh """
- make-local () {
- make -C $WORKSPACE/\$1 DOCKER_REGISTRY=mirror.registry.opennetworking.org/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=citest docker-build
- }
- if [ "${gerritProject}" = "pyvoltha" ]; then
- make -C $WORKSPACE/pyvoltha/ dist
- export LOCAL_PYVOLTHA=$WORKSPACE/pyvoltha/
- make-local voltha-openonu-adapter
- elif [ "${gerritProject}" = "voltha-lib-go" ]; then
- make -C $WORKSPACE/voltha-lib-go/ build
- export LOCAL_LIB_GO=$WORKSPACE/voltha-lib-go/
- make-local voltha-go
- make-local voltha-openolt-adapter
- elif [ "${gerritProject}" = "voltha-protos" ]; then
- make -C $WORKSPACE/voltha-protos/ build
- export LOCAL_PROTOS=$WORKSPACE/voltha-protos/
- make-local voltha-go
- make-local voltha-openolt-adapter
- make-local voltha-openonu-adapter
- make-local ofagent-py
- elif [ "${gerritProject}" = "voltctl" ]; then
- # Set and handle GOPATH and PATH
- export GOPATH=\${GOPATH:-$WORKSPACE/go}
- export PATH=\$PATH:/usr/lib/go-1.12/bin:/usr/local/go/bin:\$GOPATH/bin
- make -C $WORKSPACE/voltctl/ build
- elif ! [[ "${gerritProject}" =~ ^(voltha-helm-charts|voltha-system-tests|kind-voltha)\$ ]]; then
- make-local ${gerritProject}
- fi
- """
- }
- }
- }
-
- stage('Push Images') {
- steps {
- sh '''
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
-
- if ! [[ "${gerritProject}" =~ ^(voltha-helm-charts|voltha-system-tests|voltctl|kind-voltha)\$ ]]; then
- export GOROOT=/usr/local/go
- export GOPATH=\$(pwd)
- docker images | grep citest
- for image in \$(docker images -f "reference=*/*/*citest" --format "{{.Repository}}"); do echo "Pushing \$image to nodes"; kind load docker-image \$image:citest --name voltha-\$NAME --nodes voltha-\$NAME-worker,voltha-\$NAME-worker2; done
- fi
- '''
- }
- }
-
- stage('ATT workflow') {
- environment {
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ATTWorkflow"
- }
- steps {
- timeout(time: 15, unit: 'MINUTES') {
- sh '''
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
-
- if [[ "${gerritProject}" == voltha-helm-charts ]]; then
- export EXTRA_HELM_FLAGS+="--set global.image_tag=null "
- fi
-
- # Workflow-specific flags
- export WITH_RADIUS=yes
- export WITH_BBSIM=yes
- export DEPLOY_K8S=yes
- export CONFIG_SADIS="external"
- export BBSIM_CFG="configs/bbsim-sadis-att.yaml"
-
- export EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${extraHelmFlags} "
-
- IMAGES=""
- if [ "${gerritProject}" = "voltha-go" ]; then
- IMAGES="rw_core ro_core "
- elif [ "${gerritProject}" = "ofagent-py" ]; then
- IMAGES="ofagent_py "
- EXTRA_HELM_FLAGS+="--set use_ofagent_go=false "
- elif [ "${gerritProject}" = "ofagent-go" ]; then
- IMAGES="ofagent_go "
- elif [ "${gerritProject}" = "voltha-onos" ]; then
- IMAGES="onos "
- EXTRA_HELM_FLAGS+="--set images.onos.repository=mirror.registry.opennetworking.org/voltha/voltha-onos "
- elif [ "${gerritProject}" = "voltha-openolt-adapter" ]; then
- IMAGES="adapter_open_olt "
- elif [ "${gerritProject}" = "voltha-openonu-adapter" ]; then
- IMAGES="adapter_open_onu "
- elif [ "${gerritProject}" = "voltha-api-server" ]; then
- IMAGES="afrouter afrouterd "
- elif [ "${gerritProject}" = "bbsim" ]; then
- IMAGES="bbsim "
- elif [ "${gerritProject}" = "pyvoltha" ]; then
- IMAGES="adapter_open_onu "
- elif [ "${gerritProject}" = "voltha-lib-go" ]; then
- IMAGES="rw_core ro_core adapter_open_olt "
- elif [ "${gerritProject}" = "voltha-protos" ]; then
- IMAGES="rw_core ro_core adapter_open_olt adapter_open_onu ofagent "
- else
- echo "No images to push"
- fi
-
- for I in \$IMAGES
- do
- EXTRA_HELM_FLAGS+="--set images.\$I.tag=citest,images.\$I.pullPolicy=Never "
- done
-
- if [ "${gerritProject}" = "voltha-helm-charts" ]; then
- export CHART_PATH=$WORKSPACE/voltha-helm-charts
- export VOLTHA_CHART=\$CHART_PATH/voltha
- export VOLTHA_ADAPTER_OPEN_OLT_CHART=\$CHART_PATH/voltha-adapter-openolt
- export VOLTHA_ADAPTER_OPEN_ONU_CHART=\$CHART_PATH/voltha-adapter-openonu
- helm dep update \$VOLTHA_CHART
- helm dep update \$VOLTHA_ADAPTER_OPEN_OLT_CHART
- helm dep update \$VOLTHA_ADAPTER_OPEN_ONU_CHART
- fi
-
- if [ "${gerritProject}" = "voltctl" ]; then
- export VOLTCTL_VERSION=$(cat $WORKSPACE/voltctl/VERSION)
- cp $WORKSPACE/voltctl/voltctl $WORKSPACE/kind-voltha/bin/voltctl
- md5sum $WORKSPACE/kind-voltha/bin/voltctl
- fi
-
- printenv
-
- # start logging
- mkdir -p $WORKSPACE/att
- _TAG=kail-att kail -n voltha -n default > $WORKSPACE/att/onos-voltha-combined.log &
-
- cd $WORKSPACE/kind-voltha/
- ./voltha up
-
- # $NAME-env.sh contains the environment we used
- # Save value of EXTRA_HELM_FLAGS there to use in subsequent stages
- echo export EXTRA_HELM_FLAGS=\\"\$EXTRA_HELM_FLAGS\\" >> $NAME-env.sh
-
- mkdir -p $ROBOT_LOGS_DIR
- export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR -e PowerSwitch"
-
- # By default, all tests tagged 'sanity' are run. This covers basic functionality
- # like running through the ATT workflow for a single subscriber.
- export TARGET=sanity-single-kind
-
- # If the Gerrit comment contains a line with "functional tests" then run the full
- # functional test suite. This covers tests tagged either 'sanity' or 'functional'.
- # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
- REGEX="functional tests"
- if [[ "$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]]; then
- TARGET=functional-single-kind
- fi
-
- make -C $WORKSPACE/voltha-system-tests \$TARGET || true
-
- # stop logging
- P_IDS="$(ps e -ww -A | grep "_TAG=kail-att" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
-
- # get pods information
- kubectl get pods -o wide > $WORKSPACE/att/pods.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/att/pod-images.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $WORKSPACE/att/pod-imagesId.txt || true
- '''
- }
- }
- }
-
- stage('DT workflow') {
- environment {
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/DTWorkflow"
- }
- steps {
- timeout(time: 15, unit: 'MINUTES') {
- sh '''
- cd $WORKSPACE/kind-voltha/
- source $NAME-env.sh
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down
-
- # Workflow-specific flags
- export WITH_RADIUS=no
- export WITH_EAPOL=no
- export WITH_DHCP=no
- export WITH_IGMP=no
- export CONFIG_SADIS="external"
- export BBSIM_CFG="configs/bbsim-sadis-dt.yaml"
-
- if [[ "${gerritProject}" == voltha-helm-charts ]]; then
- export EXTRA_HELM_FLAGS+="--set global.image_tag=null "
- fi
-
- # start logging
- mkdir -p $WORKSPACE/dt
- _TAG=kail-dt kail -n voltha -n default > $WORKSPACE/dt/onos-voltha-combined.log &
-
- DEPLOY_K8S=n ./voltha up
-
- mkdir -p $ROBOT_LOGS_DIR
- export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR -e PowerSwitch"
-
- # By default, all tests tagged 'sanityDt' are run. This covers basic functionality
- # like running through the DT workflow for a single subscriber.
- export TARGET=sanity-kind-dt
-
- # If the Gerrit comment contains a line with "functional tests" then run the full
- # functional test suite. This covers tests tagged either 'sanityDt' or 'functionalDt'.
- # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
- REGEX="functional tests"
- if [[ "$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]]; then
- TARGET=functional-single-kind-dt
- fi
-
- make -C $WORKSPACE/voltha-system-tests \$TARGET || true
-
- # stop logging
- P_IDS="$(ps e -ww -A | grep "_TAG=kail-dt" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
-
- # get pods information
- kubectl get pods -o wide > $WORKSPACE/dt/pods.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/dt/pod-images.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $WORKSPACE/dt/pod-imagesId.txt || true
- '''
- }
- }
- }
-
- stage('TT workflow') {
- environment {
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/TTWorkflow"
- }
- steps {
- timeout(time: 15, unit: 'MINUTES') {
- sh '''
- cd $WORKSPACE/kind-voltha/
- source $NAME-env.sh
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down
-
- # Workflow-specific flags
- export WITH_RADIUS=no
- export WITH_EAPOL=no
- export WITH_DHCP=yes
- export WITH_IGMP=yes
- export CONFIG_SADIS="external"
- export BBSIM_CFG="configs/bbsim-sadis-tt.yaml"
-
- if [[ "${gerritProject}" == voltha-helm-charts ]]; then
- export EXTRA_HELM_FLAGS+="--set global.image_tag=null "
- fi
-
- # start logging
- mkdir -p $WORKSPACE/tt
- _TAG=kail-tt kail -n voltha -n default > $WORKSPACE/tt/onos-voltha-combined.log &
-
- DEPLOY_K8S=n ./voltha up
-
- mkdir -p $ROBOT_LOGS_DIR
- export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR -e PowerSwitch"
-
- # By default, all tests tagged 'sanityTt' are run. This covers basic functionality
- # like running through the TT workflow for a single subscriber.
- export TARGET=sanity-kind-tt
-
- # If the Gerrit comment contains a line with "functional tests" then run the full
- # functional test suite. This covers tests tagged either 'sanityTt' or 'functionalTt'.
- # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
- REGEX="functional tests"
- if [[ "$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]]; then
- TARGET=functional-single-kind-tt
- fi
-
- make -C $WORKSPACE/voltha-system-tests \$TARGET || true
-
- # stop logging
- P_IDS="$(ps e -ww -A | grep "_TAG=kail-att" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
-
- # get pods information
- kubectl get pods -o wide > $WORKSPACE/tt/pods.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/tt/pod-images.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $WORKSPACE/tt/pod-imagesId.txt || true
- '''
- }
- }
- }
- }
-
- post {
- always {
- sh '''
-
- # get pods information
- kubectl get pods -o wide --all-namespaces
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}"
- helm ls --all-namespaces
-
- set +e
- cp $WORKSPACE/kind-voltha/install-$NAME.log $WORKSPACE/
-
- sync
- md5sum $WORKSPACE/kind-voltha/bin/voltctl
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep $1 $WORKSPACE/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep $1 $WORKSPACE/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log || true
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log || true
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log || true
- extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log || true
-
- gzip $WORKSPACE/att/onos-voltha-combined.log || true
- gzip $WORKSPACE/dt/onos-voltha-combined.log || true
- gzip $WORKSPACE/tt/onos-voltha-combined.log || true
-
- '''
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: 'RobotLogs/*/log*.html',
- otherFiles: '',
- outputFileName: 'RobotLogs/*/output*.xml',
- outputPath: '.',
- passThreshold: 100,
- reportFileName: 'RobotLogs/*/report*.html',
- unstableThreshold: 0]);
- archiveArtifacts artifacts: '*.log,**/*.log,**/*.gz,*.gz'
- }
- }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.7/voltha-dt-physical-build-and-tests.groovy b/jjb/pipeline/voltha/voltha-2.7/voltha-dt-physical-build-and-tests.groovy
deleted file mode 100644
index 2ac34bf..0000000
--- a/jjb/pipeline/voltha/voltha-2.7/voltha-dt-physical-build-and-tests.groovy
+++ /dev/null
@@ -1,448 +0,0 @@
-// Copyright 2019-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// deploy VOLTHA built from patchset on a physical pod and run e2e test
-// uses kind-voltha to deploy voltha-2.X
-
-// Need this so that deployment_config has global scope when it's read later
-deployment_config = null
-localDeploymentConfigFile = null
-localKindVolthaValuesFile = null
-localSadisConfigFile = null
-
-// The pipeline assumes these variables are always defined
-if ( params.manualBranch != "" ) {
- GERRIT_EVENT_COMMENT_TEXT = ""
- GERRIT_PROJECT = ""
- GERRIT_BRANCH = "${params.manualBranch}"
- GERRIT_CHANGE_NUMBER = ""
- GERRIT_PATCHSET_NUMBER = ""
-}
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 120, unit: 'MINUTES')
- }
-
- environment {
- KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
- VOLTCONFIG="$HOME/.volt/config-minimal"
- PATH="$WORKSPACE/voltha/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- NAME="minimal"
- FANCY=0
- //VOL-2194 ONOS SSH and REST ports hardcoded to 30115/30120 in tests
- ONOS_SSH_PORT=30115
- ONOS_API_PORT=30120
- }
-
- stages {
- stage ('Initialize') {
- steps {
- sh returnStdout: false, script: """
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- test -e $WORKSPACE/voltha/kind-voltha/voltha && cd $WORKSPACE/voltha/kind-voltha && ./voltha down
- cd $WORKSPACE
- rm -rf $WORKSPACE/*
- """
- script {
- if (env.configRepo && ! env.localConfigDir) {
- env.localConfigDir = "$WORKSPACE"
- sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/${configRepo}"
- }
- localDeploymentConfigFile = "${env.localConfigDir}/${params.deploymentConfigFile}"
- localKindVolthaValuesFile = "${env.localConfigDir}/${params.kindVolthaValuesFile}"
- localSadisConfigFile = "${env.localConfigDir}/${params.sadisConfigFile}"
- }
- }
- }
-
- stage('Repo') {
- steps {
- checkout(changelog: true,
- poll: false,
- scm: [$class: 'RepoScm',
- manifestRepositoryUrl: "${params.manifestUrl}",
- manifestBranch: "${params.branch}",
- currentBranch: true,
- destinationDir: 'voltha',
- forceSync: true,
- resetFirst: true,
- quiet: true,
- jobs: 4,
- showAllChanges: true]
- )
- }
- }
-
- stage('Get Patch') {
- when {
- expression { params.manualBranch == "" }
- }
- steps {
- sh returnStdout: false, script: """
- cd voltha
- repo download "${gerritProject}" "${gerritChangeNumber}/${gerritPatchsetNumber}"
- """
- }
- }
-
- stage('Check config files') {
- steps {
- script {
- try {
- deployment_config = readYaml file: "${localDeploymentConfigFile}"
- } catch (err) {
- echo "Error reading ${localDeploymentConfigFile}"
- throw err
- }
- sh returnStdout: false, script: """
- if [ ! -e ${localKindVolthaValuesFile} ]; then echo "${localKindVolthaValuesFile} not found"; exit 1; fi
- if [ ! -e ${localSadisConfigFile} ]; then echo "${localSadisConfigFile} not found"; exit 1; fi
- """
- }
- }
- }
-
- stage('Create KinD Cluster') {
- steps {
- sh returnStdout: false, script: """
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/voltha/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
-
- cd $WORKSPACE/voltha/kind-voltha/
- JUST_K8S=y ./voltha up
- """
- }
- }
-
- stage('Build and Push Images') {
- when {
- expression { params.manualBranch == "" }
- }
- steps {
- sh returnStdout: false, script: """
-
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/voltha/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
-
- if ! [[ "${gerritProject}" =~ ^(voltha-system-tests|kind-voltha|voltha-helm-charts)\$ ]]; then
- make -C $WORKSPACE/voltha/${gerritProject} DOCKER_REPOSITORY=voltha/ DOCKER_TAG=citest docker-build
- docker images | grep citest
- for image in \$(docker images -f "reference=*/*citest" --format "{{.Repository}}")
- do
- echo "Pushing \$image to nodes"
- kind load docker-image \$image:citest --name voltha-\$NAME --nodes voltha-\$NAME-worker,voltha-\$NAME-worker2
- docker rmi \$image:citest \$image:latest || true
- done
- fi
- """
- }
- }
-
- stage('Deploy Voltha') {
- environment {
- WITH_RADIUS="no"
- WITH_EAPOL="no"
- WITH_DHCP="no"
- WITH_IGMP="no"
- CONFIG_SADIS="no"
- WITH_SIM_ADAPTERS="no"
- DEPLOY_K8S="no"
- VOLTHA_LOG_LEVEL="DEBUG"
- }
- steps {
- script {
- sh returnStdout: false, script: """
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/voltha/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
-
- export EXTRA_HELM_FLAGS+='--set log_agent.enabled=False -f ${localKindVolthaValuesFile} '
-
- IMAGES=""
- if [ "${gerritProject}" = "voltha-go" ]; then
- IMAGES="rw_core ro_core "
- elif [ "${gerritProject}" = "ofagent-py" ]; then
- IMAGES="ofagent "
- elif [ "${gerritProject}" = "voltha-onos" ]; then
- IMAGES="onos "
- elif [ "${gerritProject}" = "voltha-openolt-adapter" ]; then
- IMAGES="adapter_open_olt "
- elif [ "${gerritProject}" = "voltha-openonu-adapter" ]; then
- IMAGES="adapter_open_onu "
- elif [ "${gerritProject}" = "voltha-openonu-adapter-go" ]; then
- IMAGES="adapter_open_onu_go "
- elif [ "${gerritProject}" = "voltha-api-server" ]; then
- IMAGES="afrouter afrouterd "
- else
- echo "No images to push"
- fi
-
- for I in \$IMAGES
- do
- EXTRA_HELM_FLAGS+="--set images.\$I.tag=citest,images.\$I.pullPolicy=Never "
- done
-
- if [ "${gerritProject}" = "voltha-helm-charts" ]; then
- export CHART_PATH=$WORKSPACE/voltha/voltha-helm-charts
- export VOLTHA_CHART=\$CHART_PATH/voltha
- export VOLTHA_ADAPTER_OPEN_OLT_CHART=\$CHART_PATH/voltha-adapter-openolt
- export VOLTHA_ADAPTER_OPEN_ONU_CHART=\$CHART_PATH/voltha-adapter-openonu
- helm dep update \$VOLTHA_CHART
- helm dep update \$VOLTHA_ADAPTER_OPEN_OLT_CHART
- helm dep update \$VOLTHA_ADAPTER_OPEN_ONU_CHART
- fi
-
- cd $WORKSPACE/voltha/kind-voltha/
- echo \$EXTRA_HELM_FLAGS
- kail -n voltha -n default > $WORKSPACE/onos-voltha-combined.log &
- ./voltha up
-
- set +e
-
- # Remove noise from voltha-core logs
- voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
- voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
- # Remove noise from openolt logs
- voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
- voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
- voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
- """
- }
- }
- }
-
- stage('Deploy Kafka Dump Chart') {
- steps {
- script {
- sh returnStdout: false, script: """
- helm repo add cord https://charts.opencord.org
- helm repo update
- if helm version -c --short|grep v2 -q; then
- helm install -n voltha-kafka-dump cord/voltha-kafka-dump
- else
- helm install voltha-kafka-dump cord/voltha-kafka-dump
- fi
- """
- }
- }
- }
-
- stage('Push Tech-Profile') {
- when {
- expression { params.profile != "Default" }
- }
- steps {
- sh returnStdout: false, script: """
- etcd_container=\$(kubectl get pods -n voltha | grep voltha-etcd-cluster | awk 'NR==1{print \$1}')
- kubectl cp $WORKSPACE/voltha/voltha-system-tests/tests/data/TechProfile-${profile}.json voltha/\$etcd_container:/tmp/flexpod.json
- kubectl exec -it \$etcd_container -n voltha -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/XGS-PON/64'
- """
- }
- }
-
- stage('Push Sadis-config') {
- steps {
- sh returnStdout: false, script: """
- curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:$ONOS_API_PORT/onos/v1/network/configuration --data @${localSadisConfigFile}
- """
- }
- }
-
- stage('Reinstall OLT software') {
- when {
- expression { params.reinstallOlt }
- }
- steps {
- script {
- deployment_config.olts.each { olt ->
- sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --remove asfvolt16 && dpkg --purge asfvolt16'"
- waitUntil {
- olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
- return olt_sw_present.toInteger() == 0
- }
- if ( params.branch == 'voltha-2.3' ) {
- oltDebVersion = oltDebVersionVoltha23
- } else {
- oltDebVersion = oltDebVersionMaster
- }
- sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --install ${oltDebVersion}'"
- waitUntil {
- olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
- return olt_sw_present.toInteger() == 1
- }
- if ( olt.fortygig ) {
- // If the OLT is connected to a 40G switch interface, set the NNI port to be downgraded
- sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'echo port ce128 sp=40000 >> /broadcom/qax.soc ; /opt/bcm68620/svk_init.sh'"
- }
- }
- }
- }
- }
-
- stage('Restart OLT processes') {
- steps {
- script {
- deployment_config.olts.each { olt ->
- sh returnStdout: false, script: """
- ssh-keyscan -H ${olt.ip} >> ~/.ssh/known_hosts
- sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; reboot'
- sleep 120
- """
- waitUntil {
- onu_discovered = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'grep \"onu discover indication\" /var/log/openolt.log | wc -l'"
- return onu_discovered.toInteger() > 0
- }
- }
- }
- }
- }
-
- stage('Run E2E Tests') {
- environment {
- ROBOT_CONFIG_FILE="${localDeploymentConfigFile}"
- ROBOT_MISC_ARGS="${params.extraRobotArgs} --removekeywords wuks -d $WORKSPACE/RobotLogs -v container_log_dir:$WORKSPACE "
- ROBOT_FILE="Voltha_DT_PODTests.robot"
- }
- steps {
- sh returnStdout: false, script: """
- cd voltha
- mkdir -p $WORKSPACE/RobotLogs
-
- # If the Gerrit comment contains a line with "functional tests" then run the full
- # functional test suite. This covers tests tagged either 'sanity' or 'functional'.
- # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
- REGEX="functional tests"
- if [[ "$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]]; then
- ROBOT_MISC_ARGS+="-i functionalDt"
- fi
- # Likewise for dataplane tests
- REGEX="dataplane tests"
- if [[ "$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]]; then
- ROBOT_MISC_ARGS+="-i dataplaneDt"
- fi
-
- make -C $WORKSPACE/voltha/voltha-system-tests voltha-dt-test || true
- """
- }
- }
-
- stage('After-Test Delay') {
- when {
- expression { params.manualBranch == "" }
- }
- steps {
- sh returnStdout: false, script: """
- # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
- REGEX="hardware test with delay\$"
- [[ "$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]] && sleep 10m || true
- """
- }
- }
- }
-
- post {
- always {
- sh returnStdout: false, script: '''
- set +e
- cp $WORKSPACE/voltha/kind-voltha/install-minimal.log $WORKSPACE/
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
- kubectl get nodes -o wide
- kubectl get pods -o wide
- kubectl get pods -n voltha -o wide
-
- sync
- pkill kail || true
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep $1 $WORKSPACE/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep $1 $WORKSPACE/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
- extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
-
- gzip $WORKSPACE/onos-voltha-combined.log
-
- ## collect events, the chart should be running by now
- kubectl get pods | grep -i voltha-kafka-dump | grep -i running
- if [[ $? == 0 ]]; then
- kubectl exec -it `kubectl get pods | grep -i voltha-kafka-dump | grep -i running | cut -f1 -d " "` ./voltha-dump-events.sh > $WORKSPACE/voltha-events.log
- fi
- '''
- script {
- deployment_config.olts.each { olt ->
- sh returnStdout: false, script: """
- until sshpass -p ${olt.pass} scp ${olt.user}@${olt.ip}:/var/log/openolt.log $WORKSPACE/openolt-${olt.ip}.log
- do
- echo "Fetching openolt.log log failed, retrying..."
- sleep 10
- done
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.ip}.log # Remove escape sequences
- until sshpass -p ${olt.pass} scp ${olt.user}@${olt.ip}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.ip}.log
- do
- echo "Fetching dev_mgmt_daemon.log failed, retrying..."
- sleep 10
- done
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.ip}.log # Remove escape sequences
- """
- }
- }
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: 'RobotLogs/log*.html',
- otherFiles: '',
- outputFileName: 'RobotLogs/output*.xml',
- outputPath: '.',
- passThreshold: 100,
- reportFileName: 'RobotLogs/report*.html',
- unstableThreshold: 0]);
- archiveArtifacts artifacts: '*.log,*.gz'
- }
- }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.7/voltha-go-multi-tests.groovy b/jjb/pipeline/voltha/voltha-2.7/voltha-go-multi-tests.groovy
deleted file mode 100644
index e7bb4f0..0000000
--- a/jjb/pipeline/voltha/voltha-2.7/voltha-go-multi-tests.groovy
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2017-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// voltha-2.x e2e tests
-// uses kind-voltha to deploy voltha-2.X
-// uses bbsim to simulate OLT/ONUs
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 60, unit: 'MINUTES')
- }
- environment {
- KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
- VOLTCONFIG="$HOME/.volt/config-minimal"
- PATH="$WORKSPACE/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- NAME="minimal"
- FANCY=0
- WITH_SIM_ADAPTERS="n"
- WITH_RADIUS="y"
- WITH_BBSIM="y"
- DEPLOY_K8S="y"
- VOLTHA_LOG_LEVEL="DEBUG"
- CONFIG_SADIS="external"
- BBSIM_CFG="configs/bbsim-sadis-att.yaml"
- }
- stages {
- stage('Clone kind-voltha') {
- steps {
- step([$class: 'WsCleanup'])
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/kind-voltha",
- refspec: "${kindVolthaChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
- stage('Clone voltha-system-tests') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/voltha-system-tests",
- refspec: "${volthaSystemTestsChange}"
- ]],
- branches: [[ name: "${branch}", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
-
- stage('Deploy Voltha') {
- steps {
- timeout(time: 15, unit: 'MINUTES') {
- sh """
- export EXTRA_HELM_FLAGS=""
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
-
- EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${params.extraHelmFlags} "
-
- cd $WORKSPACE/kind-voltha/
- WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down || ./voltha down
- ./voltha up
- """
- }
- }
- }
-
- stage('Run E2E Tests') {
- steps {
- // this may potentially fail as we don't know how many times we repeat this tests
- timeout(time: 30, unit: 'MINUTES') {
- sh """
- set +e
- mkdir -p $WORKSPACE/RobotLogs
-
- cd $WORKSPACE/kind-voltha/scripts
- ./log-collector.sh > /dev/null &
- ./log-combine.sh > /dev/null &
-
- for i in \$(seq 1 ${testRuns})
- do
- export ROBOT_MISC_ARGS="${params.extraRobotArgs} -d $WORKSPACE/RobotLogs/\$i -e PowerSwitch"
- make -C $WORKSPACE/voltha-system-tests ${makeTarget}
- echo "Completed run: \$i"
- echo ""
- done
- """
- }
- }
- }
- }
-
- post {
- always {
- sh '''
- set +e
- cp $WORKSPACE/kind-voltha/install-minimal.log $WORKSPACE/
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
- kubectl get nodes -o wide
- kubectl get pods -o wide
- kubectl get pods -n voltha -o wide
-
- sleep 60 # Wait for log-collector and log-combine to complete
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep '"level":"error"' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep 'ERROR' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
- extract_errors_go voltha-ofagent >> $WORKSPACE/error-report.log
- extract_errors_python onos >> $WORKSPACE/error-report.log
-
- gzip error-report.log || true
-
- cd $WORKSPACE/kind-voltha/scripts/logger/combined/
- tar czf $WORKSPACE/container-logs.tgz *
-
- cd $WORKSPACE
- gzip *-combined.log || true
-
- ## shut down voltha but leave kind-voltha cluster
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- cd $WORKSPACE/kind-voltha/
- DEPLOY_K8S=n WAIT_ON_DOWN=y ./voltha down
- '''
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: '**/log*.html',
- otherFiles: '',
- outputFileName: '**/output*.xml',
- outputPath: 'RobotLogs',
- passThreshold: 100,
- reportFileName: '**/report*.html',
- unstableThreshold: 0]);
- archiveArtifacts artifacts: '*.log,*.gz,*.tgz'
-
- }
- }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.7/voltha-go-tests.groovy b/jjb/pipeline/voltha/voltha-2.7/voltha-go-tests.groovy
deleted file mode 100644
index 61f600c..0000000
--- a/jjb/pipeline/voltha/voltha-2.7/voltha-go-tests.groovy
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright 2017-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// voltha-2.x e2e tests
-// uses kind-voltha to deploy voltha-2.X
-// uses bbsim to simulate OLT/ONUs
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 60, unit: 'MINUTES')
- }
- environment {
- KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
- VOLTCONFIG="$HOME/.volt/config-minimal"
- PATH="$PATH:$WORKSPACE/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- NAME="minimal"
- FANCY=0
- WITH_SIM_ADAPTERS="no"
- WITH_RADIUS="yes"
- WITH_BBSIM="yes"
- DEPLOY_K8S="yes"
- VOLTHA_LOG_LEVEL="DEBUG"
- CONFIG_SADIS="external"
- BBSIM_CFG="configs/bbsim-sadis-att.yaml"
- ROBOT_MISC_ARGS="${params.extraRobotArgs} -d $WORKSPACE/RobotLogs -e PowerSwitch"
- KARAF_HOME="${params.karafHome}"
- DIAGS_PROFILE="VOLTHA_PROFILE"
- }
- stages {
-
- stage('Clone kind-voltha') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/kind-voltha",
- // refspec: "${kindVolthaChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
- stage('Cleanup') {
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh """
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- cd $WORKSPACE/kind-voltha/
- WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down || true
- """
- }
- }
- }
- stage('Clone voltha-system-tests') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/voltha-system-tests",
- // refspec: "${volthaSystemTestsChange}"
- ]],
- branches: [[ name: "${branch}", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
-
- stage('Deploy Voltha') {
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh """
- export EXTRA_HELM_FLAGS=""
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
-
- if [ "${workFlow}" == "DT" ]; then
- export WITH_DHCP=no
- export WITH_IGMP=no
- export WITH_EAPOL=no
- export WITH_RADIUS=no
- export BBSIM_CFG="configs/bbsim-sadis-dt.yaml"
- fi
-
- EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${params.extraHelmFlags} "
- cd $WORKSPACE/kind-voltha/
- WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down || ./voltha down
- ./voltha up
- """
- }
- }
- }
-
- stage('Run E2E Tests') {
- steps {
- timeout(time: 5, unit: 'MINUTES') {
- sh '''
- set +e
- mkdir -p $WORKSPACE/RobotLogs
-
- cd $WORKSPACE/kind-voltha/scripts
- ./log-collector.sh > /dev/null &
- ./log-combine.sh > /dev/null &
-
- make -C $WORKSPACE/voltha-system-tests ${makeTarget} || true
- '''
- }
- }
- }
- }
-
- post {
- always {
- sh '''
- set +e
- cp $WORKSPACE/kind-voltha/install-minimal.log $WORKSPACE/
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
- kubectl get nodes -o wide
- kubectl get pods -o wide
- kubectl get pods -n voltha -o wide
-
- sleep 60 # Wait for log-collector and log-combine to complete
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep '"level":"error"' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep 'ERROR' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
- extract_errors_go voltha-ofagent >> $WORKSPACE/error-report.log
- extract_errors_python onos >> $WORKSPACE/error-report.log
-
- gzip error-report.log || true
-
- cd $WORKSPACE/kind-voltha/scripts/logger/combined/
- tar czf $WORKSPACE/container-logs.tgz *
-
- cd $WORKSPACE
- gzip *-combined.log || true
-
- ## shut down voltha but leave kind-voltha cluster
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- cd $WORKSPACE/kind-voltha/
- DEPLOY_K8S=n WAIT_ON_DOWN=y ./voltha down
- kubectl delete deployment voltctl || true
- '''
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: 'RobotLogs/log*.html',
- otherFiles: '',
- outputFileName: 'RobotLogs/output*.xml',
- outputPath: '.',
- passThreshold: 100,
- reportFileName: 'RobotLogs/report*.html',
- unstableThreshold: 0]);
- archiveArtifacts artifacts: '*.log,*.gz,*.tgz'
-
- }
- }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.7/voltha-nightly-tests-bbsim.groovy b/jjb/pipeline/voltha/voltha-2.7/voltha-nightly-tests-bbsim.groovy
deleted file mode 100644
index 31aa380..0000000
--- a/jjb/pipeline/voltha/voltha-2.7/voltha-nightly-tests-bbsim.groovy
+++ /dev/null
@@ -1,289 +0,0 @@
-// Copyright 2017-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// voltha-2.x e2e tests
-// uses kind-voltha to deploy voltha-2.X
-// uses bbsim to simulate OLT/ONUs
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 100, unit: 'MINUTES')
- }
- environment {
- KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
- VOLTCONFIG="$HOME/.volt/config-minimal"
- PATH="$PATH:$WORKSPACE/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- NAME="minimal"
- FANCY=0
- WITH_SIM_ADAPTERS="no"
- WITH_RADIUS="yes"
- WITH_BBSIM="yes"
- DEPLOY_K8S="yes"
- VOLTHA_LOG_LEVEL="DEBUG"
- CONFIG_SADIS="external"
- BBSIM_CFG="configs/bbsim-sadis-att.yaml"
- ROBOT_MISC_ARGS="-e PowerSwitch ${params.extraRobotArgs}"
- KARAF_HOME="${params.karafHome}"
- DIAGS_PROFILE="VOLTHA_PROFILE"
- NUM_OF_BBSIM="${olts}"
- }
- stages {
- stage('Clone kind-voltha') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/kind-voltha",
- // refspec: "${kindVolthaChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
- stage('Cleanup') {
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh """
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- cd $WORKSPACE/kind-voltha/
- WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down || ./voltha down
- """
- }
- }
- }
- stage('Clone voltha-system-tests') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/voltha-system-tests",
- refspec: "${volthaSystemTestsChange}"
- ]],
- branches: [[ name: "${branch}", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- script {
- sh(script:"""
- if [ '${volthaSystemTestsChange}' != '' ] ; then
- cd $WORKSPACE/voltha-system-tests;
- git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
- fi
- """)
- }
- }
- }
-
- stage('Deploy Voltha') {
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh """
- export EXTRA_HELM_FLAGS=""
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- export INFRA_NS="infra"
- echo "on master, using default settings for kind-voltha"
- fi
-
- EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${params.extraHelmFlags} --set defaults.image_registry=mirror.registry.opennetworking.org/ "
-
- cd $WORKSPACE/kind-voltha/
- ./voltha up
- """
- }
- }
- }
-
- stage('Functional Tests') {
- environment {
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/FunctionalTests"
- }
- steps {
- timeout(time: 20, unit: 'MINUTES') {
- sh '''
- set +e
- mkdir -p $ROBOT_LOGS_DIR
- cd $WORKSPACE/kind-voltha/scripts
- ./log-collector.sh > /dev/null &
- ./log-combine.sh > /dev/null &
-
- export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR"
- make -C $WORKSPACE/voltha-system-tests ${makeTarget} || true
- '''
- }
- }
- }
-
- stage('Alarm Tests') {
- environment {
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/AlarmTests"
- }
- when {
- expression {
- return params.withAlarms
- }
- }
- steps {
- timeout(time: 5, unit: 'MINUTES') {
- sh '''
- set +e
- mkdir -p $WORKSPACE/RobotLogs
-
- export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR"
- make -C $WORKSPACE/voltha-system-tests ${makeAlarmtestTarget} || true
- '''
- }
- }
- }
-
- stage('Failure/Recovery Tests') {
- environment {
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/FailureTests"
- }
- steps {
- timeout(time: 30, unit: 'MINUTES') {
- sh '''
- set +e
- mkdir -p $WORKSPACE/RobotLogs
-
- export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR"
- make -C $WORKSPACE/voltha-system-tests ${makeFailtestTarget} || true
- '''
- }
- }
- }
- stage('Multiple OLT Tests') {
- environment {
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/MultipleOLTTests"
- }
- steps {
- timeout(time: 15, unit: 'MINUTES') {
- sh '''
- if [ "${olts}" -gt 1 ]; then
- set +e
- mkdir -p $WORKSPACE/RobotLogs
- export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR"
- make -C $WORKSPACE/voltha-system-tests ${makeMultiOltTarget} || true
- fi
- '''
- }
- }
- }
-
- stage('Error Tests') {
- environment {
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ErrorTests"
- }
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh '''
- set +e
- mkdir -p $WORKSPACE/RobotLogs
-
- export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR"
- make -C $WORKSPACE/voltha-system-tests ${makeErrortestTarget} || true
- '''
- }
- }
- }
- }
-
- post {
- always {
- sh '''
- set +e
- cp $WORKSPACE/kind-voltha/install-minimal.log $WORKSPACE/
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
- kubectl get nodes -o wide
- kubectl get pods -o wide
- kubectl get pods -n voltha -o wide
-
- sleep 60 # Wait for log-collector and log-combine to complete
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep '"level":"error"' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep 'ERROR' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
- extract_errors_go voltha-ofagent >> $WORKSPACE/error-report.log
- extract_errors_python onos >> $WORKSPACE/error-report.log
-
- gzip error-report.log || true
-
- cd $WORKSPACE/kind-voltha/scripts/logger/combined/
- tar czf $WORKSPACE/container-logs.tgz *
-
- cd $WORKSPACE
- gzip *-combined.log || true
-
- ## shut down voltha but leave kind-voltha cluster
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- cd $WORKSPACE/kind-voltha/
- DEPLOY_K8S=n WAIT_ON_DOWN=y ./voltha down
- kubectl delete deployment voltctl || true
- '''
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: '**/log*.html',
- otherFiles: '',
- outputFileName: '**/output*.xml',
- outputPath: 'RobotLogs',
- passThreshold: 100,
- reportFileName: '**/report*.html',
- unstableThreshold: 0]);
-
- archiveArtifacts artifacts: '*.log,*.gz,*.tgz'
-
- }
- }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.7/voltha-openonu-go-test-bbsim.groovy b/jjb/pipeline/voltha/voltha-2.7/voltha-openonu-go-test-bbsim.groovy
deleted file mode 100755
index 7462c85..0000000
--- a/jjb/pipeline/voltha/voltha-2.7/voltha-openonu-go-test-bbsim.groovy
+++ /dev/null
@@ -1,546 +0,0 @@
-// Copyright 2017-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// voltha-2.x e2e tests
-// uses kind-voltha to deploy voltha-2.X
-// uses bbsim to simulate OLT/ONUs
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 130, unit: 'MINUTES')
- }
- environment {
- KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
- VOLTCONFIG="$HOME/.volt/config-minimal"
- PATH="$PATH:$WORKSPACE/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- NAME="minimal"
- FANCY=0
- WITH_SIM_ADAPTERS="no"
- WITH_RADIUS="yes"
- WITH_BBSIM="yes"
- DEPLOY_K8S="yes"
- VOLTHA_LOG_LEVEL="DEBUG"
- CONFIG_SADIS="external"
- BBSIM_CFG="configs/bbsim-sadis-att.yaml"
- ROBOT_MISC_ARGS="-e PowerSwitch ${params.extraRobotArgs}"
- KARAF_HOME="${params.karafHome}"
- DIAGS_PROFILE="VOLTHA_PROFILE"
- NUM_OF_BBSIM="${olts}"
- }
- stages {
- stage('Clone kind-voltha') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/kind-voltha",
- // refspec: "${kindVolthaChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
- stage('Cleanup') {
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh """
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- cd $WORKSPACE/kind-voltha/
- WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down || ./voltha down
- """
- }
- }
- }
- stage('Clone voltha-system-tests') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/voltha-system-tests",
- // refspec: "${volthaSystemTestsChange}"
- ]],
- branches: [[ name: "${branch}", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
-
- stage('Deploy Voltha') {
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh """
- export EXTRA_HELM_FLAGS=""
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
-
- EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${params.extraHelmFlags} --set defaults.image_registry=mirror.registry.opennetworking.org/ "
-
- cd $WORKSPACE/kind-voltha/
- ./voltha up
- bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/kind-voltha/bin"
- """
- }
- }
- }
-
- stage('Run E2E Tests 1t1gem') {
- environment {
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/1t1gem"
- }
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh '''
- # start logging
- mkdir -p $WORKSPACE/1t1gem
- _TAG=kail-1t1gem kail -n voltha -n default > $WORKSPACE/1t1gem/onos-voltha-combined.log &
-
- mkdir -p $ROBOT_LOGS_DIR/1t1gem
- export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR"
- export KVSTOREPREFIX=voltha_voltha
-
- make -C $WORKSPACE/voltha-system-tests ${makeTarget} || true
-
- # stop logging
- P_IDS="$(ps e -ww -A | grep "_TAG=kail-1t1gem" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
-
- # get pods information
- kubectl get pods -o wide --all-namespaces > $WORKSPACE/1t1gem/pods.txt || true
- '''
- }
- }
- }
-
- stage('Run E2E Tests 1t4gem') {
- environment {
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/1t4gem"
- }
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh '''
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- cd $WORKSPACE/kind-voltha/
- WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down
-
- export EXTRA_HELM_FLAGS=""
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- export EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${extraHelmFlags} "
-
- # start logging
- mkdir -p $WORKSPACE/1t4gem
- _TAG=kail-1t4gem kail -n voltha -n default > $WORKSPACE/1t4gem/onos-voltha-combined.log &
-
- DEPLOY_K8S=n ./voltha up
-
- mkdir -p $ROBOT_LOGS_DIR/1t4gem
- export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR"
- export KVSTOREPREFIX=voltha_voltha
-
- make -C $WORKSPACE/voltha-system-tests ${make1t4gemTestTarget} || true
-
- # stop logging
- P_IDS="$(ps e -ww -A | grep "_TAG=kail-1t4gem" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
-
- # get pods information
- kubectl get pods -o wide --all-namespaces > $WORKSPACE/1t4gem/pods.txt || true
- '''
- }
- }
- }
-
- stage('Run E2E Tests 1t8gem') {
- environment {
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/1t8gem"
- }
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh '''
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- cd $WORKSPACE/kind-voltha/
- WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down
-
- export EXTRA_HELM_FLAGS=""
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- export EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${extraHelmFlags} "
-
- # start logging
- mkdir -p $WORKSPACE/1t8gem
- _TAG=kail-1t8gem kail -n voltha -n default > $WORKSPACE/1t8gem/onos-voltha-combined.log &
-
- DEPLOY_K8S=n ./voltha up
-
- mkdir -p $ROBOT_LOGS_DIR/1t8gem
- export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR"
- export KVSTOREPREFIX=voltha_voltha
-
- make -C $WORKSPACE/voltha-system-tests ${make1t8gemTestTarget} || true
-
- # stop logging
- P_IDS="$(ps e -ww -A | grep "_TAG=kail-1t8gem" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
-
- # get pods information
- kubectl get pods -o wide --all-namespaces > $WORKSPACE/1t8gem/pods.txt || true
- '''
- }
- }
- }
-
- stage('Run MIB Upload Tests') {
- when { beforeAgent true; expression { return "${olts}" == "1" } }
- environment {
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/openonu-go-MIB"
- }
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh '''
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- cd $WORKSPACE/kind-voltha/
- WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down
-
- export EXTRA_HELM_FLAGS=""
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- export EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${extraHelmFlags} "
- export EXTRA_HELM_FLAGS+="--set pon=2,onu=2,controlledActivation=only-onu "
-
- # start logging
- mkdir -p $WORKSPACE/mib
- _TAG=kail-mib kail -n voltha -n default > $WORKSPACE/mib/onos-voltha-combined.log &
-
- DEPLOY_K8S=n ./voltha up
-
- mkdir -p $ROBOT_LOGS_DIR
- export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR"
- export TARGET_DEFAULT=mib-upload-templating-openonu-go-adapter-test
-
- make -C $WORKSPACE/voltha-system-tests \$TARGET_DEFAULT || true
-
- # stop logging
- P_IDS="$(ps e -ww -A | grep "_TAG=kail-mib" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
-
- # get pods information
- kubectl get pods -o wide --all-namespaces > $WORKSPACE/mib/pods.txt || true
- '''
- }
- }
- }
-
- stage('Reconcile DT workflow') {
- environment {
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ReconcileDT"
- }
- steps {
- timeout(time: 20, unit: 'MINUTES') {
- sh '''
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- cd $WORKSPACE/kind-voltha/
- WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down
-
- export EXTRA_HELM_FLAGS=""
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- export EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${extraHelmFlags} "
-
- # Workflow-specific flags
- export WITH_RADIUS=no
- export WITH_EAPOL=no
- export WITH_DHCP=no
- export WITH_IGMP=no
- export CONFIG_SADIS="external"
- export BBSIM_CFG="configs/bbsim-sadis-dt.yaml"
-
- # start logging
- mkdir -p $WORKSPACE/reconciledt
- _TAG=kail-reconcile-dt kail -n voltha -n default > $WORKSPACE/reconciledt/onos-voltha-combined.log &
-
- DEPLOY_K8S=n ./voltha up
-
- mkdir -p $ROBOT_LOGS_DIR
- export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR -e PowerSwitch"
-
- make -C $WORKSPACE/voltha-system-tests ${makeReconcileDtTestTarget} || true
-
- # stop logging
- P_IDS="$(ps e -ww -A | grep "_TAG=kail-reconcile-dt" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
-
- # get pods information
- kubectl get pods -o wide --all-namespaces > $WORKSPACE/reconciledt/pods.txt || true
- '''
- }
- }
- }
-
- stage('Reconcile ATT workflow') {
- environment {
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ReconcileATT"
- }
- steps {
- timeout(time: 20, unit: 'MINUTES') {
- sh '''
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- cd $WORKSPACE/kind-voltha/
- WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down
-
- export EXTRA_HELM_FLAGS=""
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- export EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${extraHelmFlags} "
-
- # Workflow-specific flags
- export WITH_RADIUS=yes
- export WITH_EAPOL=yes
- export WITH_BBSIM=yes
- export DEPLOY_K8S=yes
- export CONFIG_SADIS="external"
- export BBSIM_CFG="configs/bbsim-sadis-att.yaml"
-
- if [ "${gerritProject}" = "voltctl" ]; then
- export VOLTCTL_VERSION=$(cat $WORKSPACE/voltctl/VERSION)
- cp $WORKSPACE/voltctl/voltctl $WORKSPACE/kind-voltha/bin/voltctl
- md5sum $WORKSPACE/kind-voltha/bin/voltctl
- fi
-
- # start logging
- mkdir -p $WORKSPACE/reconcileatt
- _TAG=kail-reconcile-att kail -n voltha -n default > $WORKSPACE/reconcileatt/onos-voltha-combined.log &
-
- DEPLOY_K8S=n ./voltha up
-
- mkdir -p $ROBOT_LOGS_DIR
- export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR -e PowerSwitch"
-
- make -C $WORKSPACE/voltha-system-tests ${makeReconcileTestTarget} || true
-
- # stop logging
- P_IDS="$(ps e -ww -A | grep "_TAG=kail-reconcile-att" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
-
- # get pods information
- kubectl get pods -o wide --all-namespaces > $WORKSPACE/reconcileatt/pods.txt || true
- '''
- }
- }
- }
-
- stage('Reconcile TT workflow') {
- environment {
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ReconcileTT"
- }
- steps {
- timeout(time: 20, unit: 'MINUTES') {
- sh '''
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- cd $WORKSPACE/kind-voltha/
- WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down
-
- export EXTRA_HELM_FLAGS=""
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- export EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${extraHelmFlags} "
-
- # Workflow-specific flags
- export WITH_RADIUS=no
- export WITH_EAPOL=no
- export WITH_DHCP=yes
- export WITH_IGMP=yes
- export CONFIG_SADIS="external"
- export BBSIM_CFG="configs/bbsim-sadis-tt.yaml"
-
- # start logging
- mkdir -p $WORKSPACE/reconcilett
- _TAG=kail-reconcile-tt kail -n voltha -n default > $WORKSPACE/reconcilett/onos-voltha-combined.log &
-
- DEPLOY_K8S=n ./voltha up
-
- mkdir -p $ROBOT_LOGS_DIR
- export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR -e PowerSwitch"
-
- make -C $WORKSPACE/voltha-system-tests ${makeReconcileTtTestTarget} || true
-
- # stop logging
- P_IDS="$(ps e -ww -A | grep "_TAG=kail-reconcile-tt" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
-
- # get pods information
- kubectl get pods -o wide --all-namespaces > $WORKSPACE/reconcilett/pods.txt || true
- '''
- }
- }
- }
- }
- post {
- always {
- sh '''
- # get pods information
- kubectl get pods -o wide
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}"
- helm ls
-
- sync
- pkill kail || true
- md5sum $WORKSPACE/kind-voltha/bin/voltctl
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep $1 $WORKSPACE/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep $1 $WORKSPACE/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log || true
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log || true
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log || true
- extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log || true
-
- gzip $WORKSPACE/onos-voltha-combined.log || true
- '''
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: 'RobotLogs/*/log*.html',
- otherFiles: '',
- outputFileName: 'RobotLogs/*/output*.xml',
- outputPath: '.',
- passThreshold: 100,
- reportFileName: 'RobotLogs/*/report*.html',
- unstableThreshold: 0]);
- archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.txt'
- }
- }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.7/voltha-physical-build-and-tests.groovy b/jjb/pipeline/voltha/voltha-2.7/voltha-physical-build-and-tests.groovy
deleted file mode 100644
index f0b6787..0000000
--- a/jjb/pipeline/voltha/voltha-2.7/voltha-physical-build-and-tests.groovy
+++ /dev/null
@@ -1,456 +0,0 @@
-// Copyright 2019-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// deploy VOLTHA built from patchset on a physical pod and run e2e test
-// uses kind-voltha to deploy voltha-2.X
-
-// NOTE we are importing the library even if it's global so that it's
-// easier to change the keywords during a replay
-library identifier: 'cord-jenkins-libraries@master',
- retriever: modernSCM([
- $class: 'GitSCMSource',
- remote: 'https://gerrit.opencord.org/ci-management.git'
-])
-
-// Need this so that deployment_config has global scope when it's read later
-deployment_config = null
-localDeploymentConfigFile = null
-localKindVolthaValuesFile = null
-localSadisConfigFile = null
-
-// The pipeline assumes these variables are always defined
-if ( params.manualBranch != "" ) {
- GERRIT_EVENT_COMMENT_TEXT = ""
- GERRIT_PROJECT = ""
- GERRIT_BRANCH = "${params.manualBranch}"
- GERRIT_CHANGE_NUMBER = ""
- GERRIT_PATCHSET_NUMBER = ""
-}
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 120, unit: 'MINUTES')
- }
-
- environment {
- KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
- VOLTCONFIG="$HOME/.volt/config-minimal"
- PATH="$WORKSPACE/bin:$WORKSPACE/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- NAME="minimal"
- FANCY=0
- //VOL-2194 ONOS SSH and REST ports hardcoded to 30115/30120 in tests
- ONOS_SSH_PORT=30115
- ONOS_API_PORT=30120
- }
-
- stages {
- stage ('Initialize') {
- steps {
- sh returnStdout: false, script: """
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- test -e $WORKSPACE/kind-voltha/voltha && cd $WORKSPACE/kind-voltha && ./voltha down
- cd $WORKSPACE
- rm -rf $WORKSPACE/*
- """
- script {
- if (env.configRepo && ! env.localConfigDir) {
- env.localConfigDir = "$WORKSPACE"
- sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/${configRepo}"
- }
- localDeploymentConfigFile = "${env.localConfigDir}/${params.deploymentConfigFile}"
- localKindVolthaValuesFile = "${env.localConfigDir}/${params.kindVolthaValuesFile}"
- localSadisConfigFile = "${env.localConfigDir}/${params.sadisConfigFile}"
- }
- }
- }
-
- stage('Download Code') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/kind-voltha",
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- getVolthaCode([
- branch: "${branch}",
- gerritProject: "${gerritProject}",
- gerritRefspec: "${gerritRefspec}",
- volthaSystemTestsChange: "${volthaSystemTestsChange}",
- volthaHelmChartsChange: "${volthaHelmChartsChange}",
- ])
- }
- }
-
- stage('Check config files') {
- steps {
- script {
- try {
- deployment_config = readYaml file: "${localDeploymentConfigFile}"
- } catch (err) {
- echo "Error reading ${localDeploymentConfigFile}"
- throw err
- }
- sh returnStdout: false, script: """
- if [ ! -e ${localKindVolthaValuesFile} ]; then echo "${localKindVolthaValuesFile} not found"; exit 1; fi
- if [ ! -e ${localSadisConfigFile} ]; then echo "${localSadisConfigFile} not found"; exit 1; fi
- """
- }
- }
- }
-
- stage('Build patch') {
- steps {
- // NOTE that the correct patch has already been checked out
- // during the getVolthaCode step
- buildVolthaComponent("${gerritProject}")
- }
- }
-
- stage('Create KinD Cluster') {
- steps {
- sh returnStdout: false, script: """
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- cd $WORKSPACE/kind-voltha/
- JUST_K8S=y ./voltha up
- """
- }
- }
-
- stage('Load image in kind nodes') {
- when {
- expression { params.manualBranch == "" }
- }
- steps {
- sh returnStdout: false, script: """
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- if ! [[ "${gerritProject}" =~ ^(voltha-system-tests|kind-voltha|voltha-helm-charts)\$ ]]; then
- docker images | grep citest
- for image in \$(docker images -f "reference=*/*citest" --format "{{.Repository}}")
- do
- echo "Pushing \$image to nodes"
- kind load docker-image \$image:citest --name voltha-\$NAME --nodes voltha-\$NAME-worker,voltha-\$NAME-worker2
- docker rmi \$image:citest \$image:latest || true
- done
- fi
- """
- }
- }
-
- stage('Deploy Voltha') {
- environment {
- WITH_SIM_ADAPTERS="no"
- WITH_RADIUS="yes"
- DEPLOY_K8S="no"
- VOLTHA_LOG_LEVEL="DEBUG"
- }
- steps {
- script {
- sh returnStdout: false, script: """
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
-
- export EXTRA_HELM_FLAGS+='--set log_agent.enabled=False -f ${localKindVolthaValuesFile} '
-
- IMAGES=""
- if [ "${gerritProject}" = "voltha-go" ]; then
- IMAGES="rw_core ro_core "
- elif [ "${gerritProject}" = "ofagent-py" ]; then
- IMAGES="ofagent "
- elif [ "${gerritProject}" = "voltha-onos" ]; then
- IMAGES="onos "
- elif [ "${gerritProject}" = "voltha-openolt-adapter" ]; then
- IMAGES="adapter_open_olt "
- elif [ "${gerritProject}" = "voltha-openonu-adapter" ]; then
- IMAGES="adapter_open_onu "
- elif [ "${gerritProject}" = "voltha-openonu-adapter-go" ]; then
- IMAGES="adapter_open_onu_go "
- elif [ "${gerritProject}" = "voltha-api-server" ]; then
- IMAGES="afrouter afrouterd "
- else
- echo "No images to push"
- fi
-
- for I in \$IMAGES
- do
- EXTRA_HELM_FLAGS+="--set images.\$I.tag=citest,images.\$I.pullPolicy=Never "
- done
-
- if [ "${gerritProject}" = "voltha-helm-charts" ]; then
- export CHART_PATH=$WORKSPACE/voltha-helm-charts
- export VOLTHA_CHART=\$CHART_PATH/voltha
- export VOLTHA_ADAPTER_OPEN_OLT_CHART=\$CHART_PATH/voltha-adapter-openolt
- export VOLTHA_ADAPTER_OPEN_ONU_CHART=\$CHART_PATH/voltha-adapter-openonu
- helm dep update \$VOLTHA_CHART
- helm dep update \$VOLTHA_ADAPTER_OPEN_OLT_CHART
- helm dep update \$VOLTHA_ADAPTER_OPEN_ONU_CHART
- fi
-
- cd $WORKSPACE/kind-voltha/
- echo \$EXTRA_HELM_FLAGS
- kail -n voltha -n default > $WORKSPACE/onos-voltha-combined.log &
- ./voltha up
-
- set +e
-
- # Remove noise from voltha-core logs
- voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
- voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
- # Remove noise from openolt logs
- voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
- voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
- voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
- """
- }
- }
- }
-
- stage('Deploy Kafka Dump Chart') {
- steps {
- script {
- sh returnStdout: false, script: """
- helm repo add cord https://charts.opencord.org
- helm repo update
- if helm version -c --short|grep v2 -q; then
- helm install -n voltha-kafka-dump cord/voltha-kafka-dump
- else
- helm install voltha-kafka-dump cord/voltha-kafka-dump
- fi
- """
- }
- }
- }
-
- stage('Push Tech-Profile') {
- when {
- expression { params.profile != "Default" }
- }
- steps {
- sh returnStdout: false, script: """
- etcd_container=\$(kubectl get pods -n voltha | grep voltha-etcd-cluster | awk 'NR==1{print \$1}')
- kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-${profile}.json voltha/\$etcd_container:/tmp/flexpod.json
- kubectl exec -it \$etcd_container -n voltha -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/XGS-PON/64'
- """
- }
- }
-
- stage('Push Sadis-config') {
- steps {
- sh returnStdout: false, script: """
- ssh-keygen -R [${deployment_config.nodes[0].ip}]:30115
- ssh-keyscan -p 30115 -H ${deployment_config.nodes[0].ip} >> ~/.ssh/known_hosts
- sshpass -p karaf ssh -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.dhcpl2relay"
- sshpass -p karaf ssh -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.aaa"
- sshpass -p karaf ssh -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.olt"
- sshpass -p karaf ssh -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.FlowObjectiveManager"
- sshpass -p karaf ssh -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager"
- curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:$ONOS_API_PORT/onos/v1/network/configuration --data @${localSadisConfigFile}
- """
- }
- }
-
- stage('Reinstall OLT software') {
- when {
- expression { params.reinstallOlt }
- }
- steps {
- script {
- deployment_config.olts.each { olt ->
- sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --remove asfvolt16 && dpkg --purge asfvolt16'"
- waitUntil {
- olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
- return olt_sw_present.toInteger() == 0
- }
- if ( params.branch == 'voltha-2.3' ) {
- oltDebVersion = oltDebVersionVoltha23
- } else {
- oltDebVersion = oltDebVersionMaster
- }
- sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --install ${oltDebVersion}'"
- waitUntil {
- olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
- return olt_sw_present.toInteger() == 1
- }
- if ( olt.fortygig ) {
- // If the OLT is connected to a 40G switch interface, set the NNI port to be downgraded
- sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'echo port ce128 sp=40000 >> /broadcom/qax.soc ; /opt/bcm68620/svk_init.sh'"
- }
- }
- }
- }
- }
-
- stage('Restart OLT processes') {
- steps {
- script {
- deployment_config.olts.each { olt ->
- sh returnStdout: false, script: """
- ssh-keyscan -H ${olt.ip} >> ~/.ssh/known_hosts
- sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; reboot'
- sleep 120
- """
- waitUntil {
- onu_discovered = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'grep \"onu discover indication\" /var/log/openolt.log | wc -l'"
- return onu_discovered.toInteger() > 0
- }
- }
- }
- }
- }
-
- stage('Run E2E Tests') {
- environment {
- ROBOT_CONFIG_FILE="${localDeploymentConfigFile}"
- ROBOT_MISC_ARGS="${params.extraRobotArgs} --removekeywords wuks -d $WORKSPACE/RobotLogs -v container_log_dir:$WORKSPACE "
- ROBOT_FILE="Voltha_PODTests.robot"
- }
- steps {
- sh returnStdout: false, script: """
- mkdir -p $WORKSPACE/RobotLogs
-
- # If the Gerrit comment contains a line with "functional tests" then run the full
- # functional test suite. This covers tests tagged either 'sanity' or 'functional'.
- # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
- REGEX="functional tests"
- if [[ "$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]]; then
- ROBOT_MISC_ARGS+="-i functional"
- fi
- # Likewise for dataplane tests
- REGEX="dataplane tests"
- if [[ "$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]]; then
- ROBOT_MISC_ARGS+="-i dataplane"
- fi
-
- make -C $WORKSPACE/voltha-system-tests voltha-test || true
- """
- }
- }
-
- stage('After-Test Delay') {
- when {
- expression { params.manualBranch == "" }
- }
- steps {
- sh returnStdout: false, script: """
- # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
- REGEX="hardware test with delay\$"
- [[ "$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]] && sleep 10m || true
- """
- }
- }
- }
-
- post {
- always {
- sh returnStdout: false, script: '''
- set +e
- cp $WORKSPACE/kind-voltha/install-minimal.log $WORKSPACE/
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
- kubectl get nodes -o wide
- kubectl get pods -o wide
- kubectl get pods -n voltha -o wide
-
- sync
- pkill kail || true
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep $1 $WORKSPACE/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep $1 $WORKSPACE/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
- extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
-
- gzip $WORKSPACE/onos-voltha-combined.log
-
- ## collect events, the chart should be running by now
- kubectl get pods | grep -i voltha-kafka-dump | grep -i running
- if [[ $? == 0 ]]; then
- kubectl exec -it `kubectl get pods | grep -i voltha-kafka-dump | grep -i running | cut -f1 -d " "` ./voltha-dump-events.sh > $WORKSPACE/voltha-events.log
- fi
- '''
- script {
- deployment_config.olts.each { olt ->
- sh returnStdout: false, script: """
- until sshpass -p ${olt.pass} scp ${olt.user}@${olt.ip}:/var/log/openolt.log $WORKSPACE/openolt-${olt.ip}.log
- do
- echo "Fetching openolt.log log failed, retrying..."
- sleep 10
- done
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.ip}.log # Remove escape sequences
- until sshpass -p ${olt.pass} scp ${olt.user}@${olt.ip}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.ip}.log
- do
- echo "Fetching dev_mgmt_daemon.log failed, retrying..."
- sleep 10
- done
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.ip}.log # Remove escape sequences
- """
- }
- }
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: 'RobotLogs/log*.html',
- otherFiles: '',
- outputFileName: 'RobotLogs/output*.xml',
- outputPath: '.',
- passThreshold: 100,
- reportFileName: 'RobotLogs/report*.html',
- unstableThreshold: 0]);
- archiveArtifacts artifacts: '*.log,*.gz'
- }
- }
-}
diff --git a/jjb/pipeline/voltha/voltha-2.7/voltha-system-test-bbsim.groovy b/jjb/pipeline/voltha/voltha-2.7/voltha-system-test-bbsim.groovy
deleted file mode 100644
index aa08ecb..0000000
--- a/jjb/pipeline/voltha/voltha-2.7/voltha-system-test-bbsim.groovy
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2017-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// voltha-2.x e2e tests
-// uses kind-voltha to deploy voltha-2.X
-// uses bbsim to simulate OLT/ONUs
-
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 80, unit: 'MINUTES')
- }
- environment {
- KUBECONFIG="$HOME/.kube/kind-config-voltha-full"
- VOLTCONFIG="$HOME/.volt/config-full"
- PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$WORKSPACE/kind-voltha/bin"
- TYPE="full"
- FANCY=0
- WITH_SIM_ADAPTERS="n"
- WITH_RADIUS="y"
- WITH_BBSIM="y"
- DEPLOY_K8S="y"
- VOLTHA_LOG_LEVEL="DEBUG"
- CONFIG_SADIS="external"
- BBSIM_CFG="configs/bbsim-sadis-att.yaml"
- ROBOT_MISC_ARGS="-d $WORKSPACE/RobotLogs"
- NUM_OF_ETCD=3
- SCHEDULE_ON_CONTROL_NODES="y"
- }
-
- stages {
- stage('Create Kubernetes Cluster') {
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh """
- git clone https://gerrit.opencord.org/kind-voltha
- pushd kind-voltha/
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- JUST_K8S=y ./voltha up
- popd
- """
- }
- }
- }
-
- stage('Setup log collector') {
- steps {
- sh """
- bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/kind-voltha/bin"
- kail -n voltha -n default > $WORKSPACE/onos-voltha-combined.log &
- """
- }
- }
-
- stage('Deploy Voltha') {
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh """
- export EXTRA_HELM_FLAGS=""
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
-
- EXTRA_HELM_FLAGS+="${params.extraHelmFlags} "
- echo \$EXTRA_HELM_FLAGS
-
- pushd kind-voltha/
- ./voltha up
- popd
- """
- }
- }
- }
-
- stage('Run E2E Tests') {
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh '''
- rm -rf $WORKSPACE/RobotLogs; mkdir -p $WORKSPACE/RobotLogs
- git clone -b ${branch} https://gerrit.opencord.org/voltha-system-tests
- make ROBOT_DEBUG_LOG_OPT="-l sanity_log.html -r sanity_report.html -o sanity_output.xml" -C $WORKSPACE/voltha-system-tests ${makeTarget}
- '''
- }
- }
- }
-
- stage('Kubernetes ETCD Scale Test') {
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh '''
- make ROBOT_DEBUG_LOG_OPT="-l functional_log.html -r functional_report.html -o functional_output.xml" -C $WORKSPACE/voltha-system-tests system-scale-test
- '''
- }
- }
- }
-
- stage('Kubernetes ETCD Failure Test') {
- steps {
- timeout(time: 10, unit: 'MINUTES') {
- sh '''
- make ROBOT_DEBUG_LOG_OPT="-l failure_log.html -r failure_report.html -o failure_output.xml" -C $WORKSPACE/voltha-system-tests failure-test
- '''
- }
- }
- }
-
- }
-
- post {
- always {
- sh '''
- set +e
- cp $WORKSPACE/kind-voltha/install-full.log $WORKSPACE/
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
- kubectl get nodes -o wide
- kubectl get pods -o wide
- kubectl get pods -n voltha -o wide
-
- sync
- pkill kail || true
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep $1 $WORKSPACE/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep $1 $WORKSPACE/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
- extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
-
- ## shut down kind-voltha
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
- cd $WORKSPACE/kind-voltha
- WAIT_ON_DOWN=y ./voltha down
-
- '''
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: 'RobotLogs/*log*.html',
- otherFiles: '',
- outputFileName: 'RobotLogs/*output*.xml',
- outputPath: '.',
- passThreshold: 100,
- reportFileName: 'RobotLogs/*report*.html',
- unstableThreshold: 0]);
- archiveArtifacts artifacts: '*.log'
-
- }
- }
-}
diff --git a/jjb/pipeline/voltha/master/periodic-bbsim-tests.groovy b/jjb/pipeline/voltha/voltha-2.8/bbsim-tests.groovy
similarity index 85%
rename from jjb/pipeline/voltha/master/periodic-bbsim-tests.groovy
rename to jjb/pipeline/voltha/voltha-2.8/bbsim-tests.groovy
index fb49076..ef6964b 100755
--- a/jjb/pipeline/voltha/master/periodic-bbsim-tests.groovy
+++ b/jjb/pipeline/voltha/voltha-2.8/bbsim-tests.groovy
@@ -44,22 +44,22 @@
}
stage('Deploy Voltha') {
if (teardown) {
- timeout(20) {
+ timeout(10) {
script {
sh """
mkdir -p $WORKSPACE/${testTarget}-components
- _TAG=kail-startup kail -n infra -n voltha > $WORKSPACE/${testTarget}-components/onos-voltha-startup-combined.log &
+ _TAG=kail-startup kail -n ${infraNamespace} -n ${volthaNamespace} > $WORKSPACE/${testTarget}-components/onos-voltha-startup-combined.log &
"""
// if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
def localCharts = false
- if (volthaHelmChartsChange != "") {
+ if (gerritProject == "voltha-helm-charts" || branch != "master") {
localCharts = true
}
// NOTE temporary workaround expose ONOS node ports
- def localHelmFlags = extraHelmFlags + " --set global.log_level=${logLevel.toUpperCase()} " +
+ def localHelmFlags = extraHelmFlags.trim() + " --set global.log_level=${logLevel.toUpperCase()} " +
" --set onos-classic.onosSshPort=30115 " +
" --set onos-classic.onosApiPort=30120 " +
" --set onos-classic.onosOfPort=31653 " +
@@ -89,6 +89,9 @@
kill -9 \$P_ID
done
fi
+ cd $WORKSPACE/${testTarget}-components/
+ gzip -k onos-voltha-startup-combined.log
+ rm onos-voltha-startup-combined.log
"""
}
sh """
@@ -108,7 +111,7 @@
// start logging
sh """
mkdir -p $WORKSPACE/${testTarget}-components
- _TAG=kail-${workflow} kail -n infra -n voltha > $WORKSPACE/${testTarget}-components/onos-voltha-combined.log &
+ _TAG=kail-${workflow} kail -n ${infraNamespace} -n ${volthaNamespace} > $WORKSPACE/${testTarget}-components/onos-voltha-combined.log &
"""
sh """
mkdir -p $WORKSPACE/${robotLogsDir}/${testTarget}-robot
@@ -127,6 +130,10 @@
kill -9 \$P_ID
done
fi
+ cd $WORKSPACE/${testTarget}-components/
+ rm onos-voltha-combined.log.gz || true
+ gzip -k onos-voltha-combined.log
+ rm onos-voltha-combined.log
"""
getPodsInfo("$WORKSPACE/${testTarget}-components")
}
@@ -152,7 +159,8 @@
outputPath: '.',
passThreshold: 100,
reportFileName: "RobotLogs/*/report*.html",
- unstableThreshold: 0]);
+ unstableThreshold: 0,
+ onlyCritical: true]);
}
pipeline {
@@ -203,11 +211,25 @@
kind get clusters | grep ${clusterName} | wc -l
"""
if (clusterExists.trim() == "0") {
- createKubernetesCluster([nodes: 3, name: clusterName])
+ createKubernetesCluster([branch: "${branch}", nodes: 3, name: clusterName])
}
}
}
}
+ stage('Replace voltctl') {
+ // if the project is voltctl override the downloaded one with the built one
+ when {
+ expression {
+ return gerritProject == "voltctl"
+ }
+ }
+ steps{
+ sh """
+ mv `ls $WORKSPACE/voltctl/release/voltctl-*-linux-amd*` $WORKSPACE/bin/voltctl
+ chmod +x $WORKSPACE/bin/voltctl
+ """
+ }
+ }
stage('Load image in kind nodes') {
when {
expression {
diff --git a/jjb/pipeline/voltha/voltha-2.8/device-management-mock-tests.groovy b/jjb/pipeline/voltha/voltha-2.8/device-management-mock-tests.groovy
new file mode 100644
index 0000000..10263dd
--- /dev/null
+++ b/jjb/pipeline/voltha/voltha-2.8/device-management-mock-tests.groovy
@@ -0,0 +1,172 @@
+// Copyright 2017-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// NOTE we are importing the library even if it's global so that it's
+// easier to change the keywords during a replay
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+
+def localCharts = false
+
+pipeline {
+
+ /* no label, executor is determined by JJB */
+ agent {
+ label "${params.buildNode}"
+ }
+ options {
+ timeout(time: 90, unit: 'MINUTES')
+ }
+ environment {
+ KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
+ PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ROBOT_MISC_ARGS="-d $WORKSPACE/RobotLogs"
+ }
+
+ stages {
+
+ stage('Download Code') {
+ steps {
+ getVolthaCode([
+ branch: "${branch}",
+ gerritProject: "${gerritProject}",
+ gerritRefspec: "${gerritRefspec}",
+ volthaSystemTestsChange: "${volthaSystemTestsChange}",
+ volthaHelmChartsChange: "${volthaHelmChartsChange}",
+ ])
+ }
+ }
+ stage('Build Redfish Importer Image') {
+ steps {
+ sh """
+ make -C $WORKSPACE/device-management/\$1 DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build-importer
+ """
+ }
+ }
+ stage('Build demo_test Image') {
+ steps {
+ sh """
+ make -C $WORKSPACE/device-management/\$1/demo_test DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build
+ """
+ }
+ }
+ stage('Build mock-redfish-server Image') {
+ steps {
+ sh """
+ make -C $WORKSPACE/device-management/\$1/mock-redfish-server DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build
+ """
+ }
+ }
+ stage('Create K8s Cluster') {
+ steps {
+ createKubernetesCluster([branch: "${branch}", nodes: 3])
+ }
+ }
+ stage('Load image in kind nodes') {
+ steps {
+ loadToKind()
+ }
+ }
+ stage('Deploy Voltha') {
+ steps {
+ script {
+ if (branch != "master" || volthaHelmChartsChange != "") {
+ // if we're using a release or testing changes in the charts, then use the local clone
+ localCharts = true
+ }
+ }
+ volthaDeploy([
+ workflow: "att",
+ extraHelmFlags: extraHelmFlags,
+ dockerRegistry: "mirror.registry.opennetworking.org",
+ localCharts: localCharts,
+ ])
+ // start logging
+ sh """
+ mkdir -p $WORKSPACE/att
+ _TAG=kail-att kail -n infra -n voltha -n default > $WORKSPACE/att/onos-voltha-combined.log &
+ """
+ // forward ONOS and VOLTHA ports
+ sh """
+ _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8101:8101&
+ _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8181:8181&
+ _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha svc/voltha-voltha-api 55555:55555&
+ """
+ }
+ }
+
+ stage('Run E2E Tests') {
+ steps {
+ sh '''
+ mkdir -p $WORKSPACE/RobotLogs
+
+ # tell the kubernetes script to use images tagged citest and pullPolicy:Never
+ sed -i 's/master/citest/g' $WORKSPACE/device-management/kubernetes/deploy*.yaml
+ sed -i 's/imagePullPolicy: Always/imagePullPolicy: Never/g' $WORKSPACE/device-management/kubernetes/deploy*.yaml
+ make -C $WORKSPACE/device-management functional-mock-test || true
+ '''
+ }
+ }
+ }
+
+ post {
+ always {
+ sh '''
+ set +e
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
+ kubectl get nodes -o wide
+ kubectl get pods -o wide --all-namespaces
+
+ sync
+ pkill kail || true
+
+ ## Pull out errors from log files
+ extract_errors_go() {
+ echo
+ echo "Error summary for $1:"
+ grep $1 $WORKSPACE/att/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
+ echo
+ }
+
+ extract_errors_python() {
+ echo
+ echo "Error summary for $1:"
+ grep $1 $WORKSPACE/att/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
+ echo
+ }
+
+ extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
+ extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
+ extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
+ extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
+
+ gzip $WORKSPACE/att/onos-voltha-combined.log
+ '''
+ step([$class: 'RobotPublisher',
+ disableArchiveOutput: false,
+ logFileName: 'RobotLogs/log*.html',
+ otherFiles: '',
+ outputFileName: 'RobotLogs/output*.xml',
+ outputPath: '.',
+ passThreshold: 80,
+ reportFileName: 'RobotLogs/report*.html',
+ unstableThreshold: 0]);
+ archiveArtifacts artifacts: '**/*.log,**/*.gz'
+ }
+ }
+}
diff --git a/jjb/pipeline/voltha/voltha-2.8/physical-build.groovy b/jjb/pipeline/voltha/voltha-2.8/physical-build.groovy
new file mode 100644
index 0000000..93be6a2
--- /dev/null
+++ b/jjb/pipeline/voltha/voltha-2.8/physical-build.groovy
@@ -0,0 +1,397 @@
+// Copyright 2017-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// used to deploy VOLTHA and configure ONOS physical PODs
+
+// NOTE we are importing the library even if it's global so that it's
+// easier to change the keywords during a replay
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+
+def infraNamespace = "infra"
+def volthaNamespace = "voltha"
+
+def deploy_custom_oltAdapterChart(namespace, name, chart, extraHelmFlags) {
+ sh """
+ helm install --create-namespace --set defaults.image_pullPolicy=Always --namespace ${namespace} ${extraHelmFlags} ${name} ${chart}
+ """
+}
+
+pipeline {
+
+ /* no label, executor is determined by JJB */
+ agent {
+ label "${params.buildNode}"
+ }
+ options {
+ timeout(time: 35, unit: 'MINUTES')
+ }
+ environment {
+ PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
+ KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
+ }
+
+ stages{
+ stage('Download Code') {
+ steps {
+ getVolthaCode([
+ branch: "${branch}",
+ volthaSystemTestsChange: "${volthaSystemTestsChange}",
+ volthaHelmChartsChange: "${volthaHelmChartsChange}",
+ ])
+ }
+ }
+ stage ("Parse deployment configuration file") {
+ steps {
+ sh returnStdout: true, script: "rm -rf ${configBaseDir}"
+ sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
+ script {
+ if ( params.workFlow == "DT" ) {
+ deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
+ }
+ else if ( params.workFlow == "TT" )
+ {
+ deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
+ }
+ else
+ {
+ deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
+ }
+ }
+ }
+ }
+ stage('Clean up') {
+ steps {
+ timeout(15) {
+ script {
+ helmTeardown(["default", infraNamespace, volthaNamespace])
+ }
+ timeout(1) {
+ sh returnStdout: false, script: '''
+ # remove orphaned port-forward from different namespaces
+ ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
+ '''
+ }
+ }
+ }
+ }
+ stage('Install Voltha') {
+ steps {
+ timeout(20) {
+ script {
+ // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
+ def localCharts = false
+ if (volthaHelmChartsChange != "" || branch != "master") {
+ localCharts = true
+ }
+
+ // should the config file be suffixed with the workflow? see "deployment_config"
+ def localHelmFlags = "-f $WORKSPACE/${configBaseDir}/${configKubernetesDir}/voltha/${configFileName}.yml --set global.log_level=${logLevel} "
+
+ if (workFlow.toLowerCase() == "dt") {
+ localHelmFlags += " --set radius.enabled=false "
+ }
+ if (workFlow.toLowerCase() == "tt") {
+ localHelmFlags += " --set radius.enabled=false --set global.incremental_evto_update=true "
+ if (enableMultiUni.toBoolean()) {
+ localHelmFlags += " --set voltha-adapter-openonu.adapter_open_onu.uni_port_mask=${uniPortMask} "
+ }
+ }
+
+ // NOTE temporary workaround expose ONOS node ports (pod-config needs to be updated to contain these values)
+ // and to connect the ofagent to all instances of ONOS
+ localHelmFlags = localHelmFlags + " --set onos-classic.onosSshPort=30115 " +
+ "--set onos-classic.onosApiPort=30120 " +
+ "--set onos-classic.onosOfPort=31653 " +
+ "--set onos-classic.individualOpenFlowNodePorts=true " +
+ "--set voltha.onos_classic.replicas=${params.NumOfOnos}"
+
+ if (bbsimReplicas.toInteger() != 0) {
+ localHelmFlags = localHelmFlags + " --set onu=${onuNumber},pon=${ponNumber} "
+ }
+
+ // adding user specified helm flags at the end so they'll have priority over everything else
+ localHelmFlags = localHelmFlags + " ${extraHelmFlags}"
+
+ if(openoltAdapterChart != "onf/voltha-adapter-openolt") {
+ localHelmFlags = localHelmFlags + " --set voltha-adapter-openolt.enabled=false"
+ }
+
+ volthaDeploy([
+ workflow: workFlow.toLowerCase(),
+ extraHelmFlags: localHelmFlags,
+ localCharts: localCharts,
+ kubeconfig: "$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf",
+ onosReplica: params.NumOfOnos,
+ atomixReplica: params.NumOfAtomix,
+ kafkaReplica: params.NumOfKafka,
+ etcdReplica: params.NumOfEtcd,
+ bbsimReplica: bbsimReplicas.toInteger(),
+ ])
+
+ if(openoltAdapterChart != "onf/voltha-adapter-openolt"){
+ deploy_custom_oltAdapterChart(volthaNamespace, oltAdapterReleaseName, openoltAdapterChart, extraHelmFlags)
+ }
+ }
+ sh """
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG="etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd ${params.VolthaEtcdPort}:2379; done"&
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG="kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
+ ps aux | grep port-forward
+ """
+ getPodsInfo("$WORKSPACE")
+ }
+ }
+ }
+ stage('Push Tech-Profile') {
+ steps {
+ script {
+ if ( params.configurePod && params.profile != "Default" ) {
+ for(int i=0; i < deployment_config.olts.size(); i++) {
+ def tech_prof_directory = "XGS-PON"
+ if (deployment_config.olts[i].containsKey("board_technology")){
+ tech_prof_directory = deployment_config.olts[i]["board_technology"]
+ }
+ timeout(1) {
+ sh returnStatus: true, script: """
+ export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
+ etcd_container=\$(kubectl get pods -n ${infraNamespace} | grep etcd | awk 'NR==1{print \$1}')
+ if [[ "${workFlow}" == "TT" ]]; then
+ if [[ "${params.enableMultiUni}" == "true" ]]; then
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-HSIA.json \$etcd_container:/tmp/hsia.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64'
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-VoIP.json \$etcd_container:/tmp/voip.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65'
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-MCAST-AdditionalBW-None.json \$etcd_container:/tmp/mcast_additionalBW_none.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_none.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66'
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-multi-uni-MCAST-AdditionalBW-NA.json \$etcd_container:/tmp/mcast_additionalBW_na.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_na.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/67'
+ else
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-HSIA.json \$etcd_container:/tmp/hsia.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64'
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-VoIP.json \$etcd_container:/tmp/voip.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65'
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST-AdditionalBW-None.json \$etcd_container:/tmp/mcast_additionalBW_none.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_none.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66'
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST-AdditionalBW-NA.json \$etcd_container:/tmp/mcast_additionalBW_na.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast_additionalBW_na.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/67'
+ fi
+ else
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-${profile}.json \$etcd_container:/tmp/flexpod.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64'
+ fi
+ """
+ }
+ timeout(1) {
+ sh returnStatus: true, script: """
+ export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
+ etcd_container=\$(kubectl get pods -n ${infraNamespace} | grep etcd | awk 'NR==1{print \$1}')
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'ETCDCTL_API=3 etcdctl get --prefix service/voltha/technology_profiles/${tech_prof_directory}/64'
+ """
+ }
+ }
+ }
+ }
+ }
+ }
+ stage('Push MIB templates') {
+ steps {
+ sh """
+ export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
+ etcd_container=\$(kubectl get pods -n ${infraNamespace} | grep etcd | awk 'NR==1{print \$1}')
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/MIB_Alpha.json \$etcd_container:/tmp/MIB_Alpha.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Alpha.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/go_templates/BRCM/BVM4K00BRA0915-0083/5023_020O02414'
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Alpha.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/templates/BRCM/BVM4K00BRA0915-0083/5023_020O02414'
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/MIB_Scom.json \$etcd_container:/tmp/MIB_Scom.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Scom.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/go_templates/SCOM/Glasfaser-Modem/090140.1.0.304'
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Scom.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/templates/SCOM/Glasfaser-Modem/090140.1.0.304'
+ """
+ }
+ }
+ stage('Push Sadis-config') {
+ steps {
+ timeout(1) {
+ sh returnStatus: true, script: """
+ if [[ "${workFlow}" == "DT" ]]; then
+ curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-DT.json
+ elif [[ "${workFlow}" == "TT" ]]; then
+ curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-TT.json
+ else
+ # this is the ATT case, rename the file in *-sadis-ATT.json so that we can avoid special cases and just load the file
+ curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis.json
+ fi
+ """
+ }
+ }
+ }
+ stage('Switch Configurations in ONOS') {
+ steps {
+ script {
+ if ( deployment_config.fabric_switches.size() > 0 ) {
+ timeout(1) {
+ def netcfg = "$WORKSPACE/${configBaseDir}/${configToscaDir}/voltha/${configFileName}-onos-netcfg-switch.json"
+ if (params.inBandManagement){
+ netcfg = "$WORKSPACE/${configBaseDir}/${configToscaDir}/voltha/${configFileName}-onos-netcfg-switch-inband.json"
+ }
+ sh """
+ curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @${netcfg}
+ curl -sSL --user karaf:karaf -X POST http://${deployment_config.nodes[0].ip}:30120/onos/v1/applications/org.onosproject.segmentrouting/active
+ """
+ }
+ timeout(1) {
+ waitUntil {
+ sr_active_out = sh returnStatus: true, script: """
+ sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.dhcpl2relay"
+ sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.aaa"
+ sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.olt"
+ #TRACE in the pipeliner is too chatty, moving to DEBUG
+ sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.opencord.olt.driver"
+ sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.FlowObjectiveManager"
+ sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager"
+ curl -sSL --user karaf:karaf -X GET http://${deployment_config.nodes[0].ip}:30120/onos/v1/applications/org.onosproject.segmentrouting | jq '.state' | grep ACTIVE
+ sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled false"
+ sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "cfg set org.onosproject.net.flow.impl.FlowRuleManager purgeOnDisconnection false"
+ sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "cfg set org.onosproject.net.meter.impl.MeterManager purgeOnDisconnection false"
+ """
+ return sr_active_out == 0
+ }
+ }
+ timeout(7) {
+ for(int i=0; i < deployment_config.hosts.src.size(); i++) {
+ for(int j=0; j < deployment_config.olts.size(); j++) {
+ def aggPort = -1
+ if(deployment_config.olts[j].serial == deployment_config.hosts.src[i].olt){
+ aggPort = deployment_config.olts[j].aggPort
+ if(aggPort == -1){
+ throw new Exception("Upstream port for the olt is not configured, field aggPort is empty")
+ }
+ sh """
+ sleep 30 # NOTE why are we sleeping?
+ curl -X POST --user karaf:karaf --header 'Content-Type: application/json' --header 'Accept: application/json' -d '{"deviceId": "${deployment_config.fabric_switches[0].device_id}", "vlanId": "${deployment_config.hosts.src[i].s_tag}", "endpoints": [${deployment_config.fabric_switches[0].bngPort},${aggPort}]}' 'http://${deployment_config.nodes[0].ip}:30120/onos/segmentrouting/xconnect'
+ """
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ stage('Reinstall OLT software') {
+ steps {
+ script {
+ if ( params.reinstallOlt ) {
+ for(int i=0; i < deployment_config.olts.size(); i++) {
+ // NOTE what is oltDebVersion23? is that for VOLTHA-2.3? do we still need this differentiation?
+ sh returnStdout: true, script: """
+ ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
+ if [ "${params.inBandManagement}" == "true" ]; then
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'kill -9 `pgrep -f "[b]ash /opt/openolt/openolt_dev_mgmt_daemon_process_watchdog"` || true'
+ fi
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} "dpkg --install ${deployment_config.olts[i].oltDebVersion}"
+ sleep 10
+ """
+ timeout(5) {
+ waitUntil {
+ olt_sw_present = sh returnStdout: true, script: """
+ if [ "${deployment_config.olts[i].oltDebVersion}" == *"asfvolt16"* ]; then
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep asfvolt16 | wc -l'
+ else
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep asgvolt64 | wc -l'
+ fi
+ if (${deployment_config.olts[i].fortygig}); then
+ if [[ "${params.inBandManagement}" == "true" ]]; then
+ ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'mkdir -p /opt/openolt/'
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp /root/watchdog-script/* /opt/openolt/'
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp /root/bal_cli_appl/example_user_appl /broadcom'
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp in-band-startup-script/* /etc/init.d/'
+ fi
+ fi
+ """
+ return olt_sw_present.toInteger() > 0
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ stage('Restart OLT processes') {
+ steps {
+ script {
+ //rebooting OLTs
+ for(int i=0; i < deployment_config.olts.size(); i++) {
+ if ( params.oltAdapterReleaseName != "open-olt" ) {
+ timeout(15) {
+ sh returnStdout: true, script: """
+ ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; rm -f /var/log/openolt_process_watchdog.log; reboot > /dev/null &' || true
+ """
+ }
+ } else {
+ sh returnStdout: true, script: """
+ ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'reboot > /dev/null &' || true
+ """
+ }
+ }
+ sh returnStdout: true, script: """
+ sleep ${params.waitTimerForOltUp}
+ """
+ //Checking dev_management_deamon and openoltprocesses
+ for(int i=0; i < deployment_config.olts.size(); i++) {
+ if ( params.oltAdapterReleaseName != "open-olt" ) {
+ timeout(15) {
+ waitUntil {
+ devprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep dev_mgmt_daemon | wc -l'"
+ return devprocess.toInteger() > 0
+ }
+ }
+ timeout(15) {
+ waitUntil {
+ openoltprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep openolt | wc -l'"
+ return openoltprocess.toInteger() > 0
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ post {
+ aborted {
+ getPodsInfo("$WORKSPACE/failed")
+ sh """
+ kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.log || true
+ """
+ archiveArtifacts artifacts: '**/*.log,**/*.txt'
+ }
+ failure {
+ getPodsInfo("$WORKSPACE/failed")
+ sh """
+ kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.logs || true
+ """
+ archiveArtifacts artifacts: '**/*.log,**/*.txt'
+ }
+ always {
+ archiveArtifacts artifacts: '*.txt'
+ }
+ }
+}
diff --git a/jjb/pipeline/voltha/voltha-2.7/software-upgrades.groovy b/jjb/pipeline/voltha/voltha-2.8/software-upgrades.groovy
similarity index 79%
rename from jjb/pipeline/voltha/voltha-2.7/software-upgrades.groovy
rename to jjb/pipeline/voltha/voltha-2.8/software-upgrades.groovy
index ef28597..700ac94 100644
--- a/jjb/pipeline/voltha/voltha-2.7/software-upgrades.groovy
+++ b/jjb/pipeline/voltha/voltha-2.8/software-upgrades.groovy
@@ -22,17 +22,16 @@
])
def test_software_upgrade(name) {
stage('Deploy Voltha - '+ name) {
- def extraHelmFlags = "${extraHelmFlags} --set global.log_level=DEBUG,onu=1,pon=1 --set onos-classic.replicas=3,onos-classic.atomix.replicas=3 "
-
- extraHelmFlags = extraHelmFlags + """ --set voltha.services.controller[0].service=voltha-infra-onos-classic-0.voltha-infra-onos-classic-hs.infra.svc \
- --set voltha.services.controller[0].port=6653 \
- --set voltha.services.controller[0].address=voltha-infra-onos-classic-0.voltha-infra-onos-classic-hs.infra.svc:6653 \
- --set voltha.services.controller[1].service=voltha-infra-onos-classic-1.voltha-infra-onos-classic-hs.infra.svc \
- --set voltha.services.controller[1].port=6653 \
- --set voltha.services.controller[1].address=voltha-infra-onos-classic-1.voltha-infra-onos-classic-hs.infra.svc:6653 \
- --set voltha.services.controller[2].service=voltha-infra-onos-classic-2.voltha-infra-onos-classic-hs.infra.svc \
- --set voltha.services.controller[2].port=6653 \
- --set voltha.services.controller[2].address=voltha-infra-onos-classic-2.voltha-infra-onos-classic-hs.infra.svc:6653 """
+ def extraHelmFlags = extraHelmFlags.trim()
+ extraHelmFlags = extraHelmFlags + " --set global.log_level=DEBUG,onu=1,pon=1 --set onos-classic.replicas=3,onos-classic.atomix.replicas=3 "
+ if ("${name}" == "onos-app-upgrade" || "${name}" == "onu-software-upgrade") {
+ extraHelmFlags = extraHelmFlags + "--set global.image_tag=master --set onos-classic.image.tag=master "
+ }
+ if ("${name}" == "voltha-component-upgrade") {
+ extraHelmFlags = extraHelmFlags + "--set images.onos_config_loader.tag=master-onos-config-loader --set onos-classic.image.tag=master "
+ }
+ extraHelmFlags = extraHelmFlags + " --set onos-classic.onosSshPort=30115 --set onos-classic.onosApiPort=30120 "
+ extraHelmFlags = extraHelmFlags + " --set voltha.onos_classic.replicas=3"
//ONOS custom image handling
if ( onosImg.trim() != '' ) {
String[] split;
@@ -40,11 +39,13 @@
split = onosImg.split(':')
extraHelmFlags = extraHelmFlags + "--set onos-classic.image.repository=" + split[0] +",onos-classic.image.tag=" + split[1] + " "
}
-
+ def localCharts = false
+ if (branch != "master") {
+ localCharts = true
+ }
// Currently only testing with ATT workflow
// TODO: Support for other workflows
- // NOTE localCharts is set to "true" so that we use the locally cloned version of the chart (set to voltha-2.7)
- volthaDeploy([workflow: "att", extraHelmFlags: extraHelmFlags, localCharts: true])
+ volthaDeploy([workflow: "att", extraHelmFlags: extraHelmFlags, localCharts: localCharts])
// start logging
sh """
rm -rf $WORKSPACE/${name} || true
@@ -109,11 +110,12 @@
export TARGET=voltha-comp-upgrade-test
fi
if [[ ${name} == 'onu-software-upgrade' ]]; then
- export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v onu_image_name:${onuImageName.trim()} -v onu_image_url:${onuImageUrl.trim()} -v onu_image_version:${onuImageVersion.trim()} -v onu_image_crc:${onuImageCrc.trim()} -v onu_image_local_dir:${onuImageLocalDir.trim()} -e PowerSwitch"
+ export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v image_version:${onuImageVersion.trim()} -v image_url:${onuImageUrl.trim()} -v image_vendor:${onuImageVendor.trim()} -v image_activate_on_success:${onuImageActivateOnSuccess.trim()} -v image_commit_on_success:${onuImageCommitOnSuccess.trim()} -v image_crc:${onuImageCrc.trim()} -e PowerSwitch"
export TARGET=onu-upgrade-test
fi
export VOLTCONFIG=$HOME/.volt/config-minimal
export KUBECONFIG=$HOME/.kube/kind-config-voltha-minimal
+ ROBOT_MISC_ARGS+=" -v ONOS_SSH_PORT:30115 -v ONOS_REST_PORT:30120"
# Run the specified tests
make -C $WORKSPACE/voltha-system-tests \$TARGET || true
"""
@@ -186,25 +188,9 @@
helmTeardown(['infra', 'voltha'])
}
}
- stage('Install latest voltctl') {
- steps {
- sh """
- mkdir -p $WORKSPACE/bin || true
- # install voltctl
- HOSTOS="\$(uname -s | tr "[:upper:]" "[:lower:"])"
- HOSTARCH="\$(uname -m | tr "[:upper:]" "[:lower:"])"
- if [ "\$HOSTARCH" == "x86_64" ]; then
- HOSTARCH="amd64"
- fi
- VC_VERSION="\$(curl --fail -sSL https://api.github.com/repos/opencord/voltctl/releases/latest | jq -r .tag_name | sed -e 's/^v//g')"
- curl -Lo $WORKSPACE/bin/voltctl https://github.com/opencord/voltctl/releases/download/v\$VC_VERSION/voltctl-\$VC_VERSION-\$HOSTOS-\$HOSTARCH
- chmod +x $WORKSPACE/bin/voltctl
- """
- }
- }
stage('Create K8s Cluster') {
steps {
- createKubernetesCluster([nodes: 3])
+ createKubernetesCluster([branch: "${branch}", nodes: 3])
}
}
stage('Run Test') {
@@ -236,7 +222,8 @@
outputPath: '.',
passThreshold: 100,
reportFileName: 'RobotLogs/*/report*.html',
- unstableThreshold: 0]);
+ unstableThreshold: 0,
+ onlyCritical: true]);
archiveArtifacts artifacts: '*.log,**/*.log,**/*.gz,*.gz,*.txt,**/*.txt'
}
}
diff --git a/jjb/pipeline/voltha/voltha-2.8/tucson-build-and-test.groovy b/jjb/pipeline/voltha/voltha-2.8/tucson-build-and-test.groovy
new file mode 100644
index 0000000..78a0f5e
--- /dev/null
+++ b/jjb/pipeline/voltha/voltha-2.8/tucson-build-and-test.groovy
@@ -0,0 +1,364 @@
+
+// Copyright 2017-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// used to deploy VOLTHA and configure ONOS physical PODs
+// NOTE we are importing the library even if it's global so that it's
+// easier to change the keywords during a replay
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+def infraNamespace = "infra"
+def volthaNamespace = "voltha"
+def clusterName = "kind-ci"
+pipeline {
+ /* no label, executor is determined by JJB */
+ agent {
+ label "${params.buildNode}"
+ }
+ options {
+ timeout(time: 120, unit: 'MINUTES')
+ }
+ environment {
+ PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
+ KUBECONFIG="$HOME/.kube/kind-${clusterName}"
+ VOLTCONFIG="$HOME/.volt/config"
+ LOG_FOLDER="$WORKSPACE/${workflow}/"
+ APPS_TO_LOG="etcd kafka onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server onos-config-loader"
+
+ }
+ stages{
+ stage('Download Code') {
+ steps {
+ getVolthaCode([
+ branch: "${branch}",
+ gerritProject: "${gerritProject}",
+ gerritRefspec: "${gerritRefspec}",
+ volthaSystemTestsChange: "${volthaSystemTestsChange}",
+ volthaHelmChartsChange: "${volthaHelmChartsChange}",
+ ])
+ }
+ }
+ stage ("Parse deployment configuration file") {
+ steps {
+ sh returnStdout: true, script: "rm -rf ${configBaseDir}"
+ sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
+ script {
+
+ if (params.workflow.toUpperCase() == "TT") {
+ error("The Tucson POD does not support TT workflow at the moment")
+ }
+
+ if ( params.workflow.toUpperCase() == "DT" ) {
+ deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
+ }
+ else if ( params.workflow.toUpperCase() == "TT" ) {
+ deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
+ }
+ else {
+ deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
+ }
+ }
+ }
+ }
+ stage('Clean up') {
+ steps {
+ timeout(15) {
+ script {
+ helmTeardown(["default", infraNamespace, volthaNamespace])
+ }
+ timeout(1) {
+ sh returnStdout: false, script: '''
+ # remove orphaned port-forward from different namespaces
+ ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
+ '''
+ }
+ }
+ }
+ }
+ stage('Build patch') {
+ steps {
+ // NOTE that the correct patch has already been checked out
+ // during the getVolthaCode step
+ buildVolthaComponent("${gerritProject}")
+ }
+ }
+ stage('Create K8s Cluster') {
+ steps {
+ script {
+ def clusterExists = sh returnStdout: true, script: """
+ kind get clusters | grep ${clusterName} | wc -l
+ """
+ if (clusterExists.trim() == "0") {
+ createKubernetesCluster([branch: "${branch}", nodes: 3, name: clusterName])
+ }
+ }
+ }
+ }
+ stage('Load image in kind nodes') {
+ steps {
+ loadToKind()
+ }
+ }
+ stage('Install Voltha') {
+ steps {
+ timeout(20) {
+ script {
+ imageFlags = getVolthaImageFlags(gerritProject)
+ // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
+ def localCharts = false
+ if (volthaHelmChartsChange != "" || gerritProject == "voltha-helm-charts" || branch != "master") {
+ localCharts = true
+ }
+ def flags = "-f $WORKSPACE/${configBaseDir}/${configKubernetesDir}/voltha/${configFileName}.yml ${imageFlags} "
+ // NOTE temporary workaround expose ONOS node ports (pod-config needs to be updated to contain these values)
+ flags = flags + "--set onos-classic.onosSshPort=30115 " +
+ "--set onos-classic.onosApiPort=30120 " +
+ "--set onos-classic.onosOfPort=31653 " +
+ "--set onos-classic.individualOpenFlowNodePorts=true " + extraHelmFlags
+ volthaDeploy([
+ workflow: workFlow.toLowerCase(),
+ extraHelmFlags: flags,
+ localCharts: localCharts,
+ kubeconfig: "$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf",
+ onosReplica: 3,
+ atomixReplica: 3,
+ kafkaReplica: 3,
+ etcdReplica: 3,
+ ])
+ }
+ // start logging
+ sh """
+ rm -rf $WORKSPACE/${workFlow}/
+ mkdir -p $WORKSPACE/${workFlow}
+ _TAG=kail-${workFlow} kail -n infra -n voltha > $WORKSPACE/${workFlow}/onos-voltha-combined.log &
+ """
+ sh returnStdout: false, script: '''
+ # start logging with kail
+
+ mkdir -p $LOG_FOLDER
+
+ list=($APPS_TO_LOG)
+ for app in "${list[@]}"
+ do
+ echo "Starting logs for: ${app}"
+ _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
+ done
+ '''
+ sh """
+ JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
+ JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd 2379:2379; done"&
+ JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
+ ps aux | grep port-forward
+ """
+ getPodsInfo("$WORKSPACE")
+ }
+ }
+ }
+ stage('Deploy Kafka Dump Chart') {
+ steps {
+ script {
+ sh returnStdout: false, script: """
+ helm repo add cord https://charts.opencord.org
+ helm repo update
+ if helm version -c --short|grep v2 -q; then
+ helm install -n voltha-kafka-dump cord/voltha-kafka-dump
+ else
+ helm install voltha-kafka-dump cord/voltha-kafka-dump
+ fi
+ """
+ }
+ }
+ }
+ stage('Push Tech-Profile') {
+ when {
+ expression { params.profile != "Default" }
+ }
+ steps {
+ sh returnStdout: false, script: """
+ etcd_container=\$(kubectl get pods -n voltha | grep voltha-etcd-cluster | awk 'NR==1{print \$1}')
+ kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-${profile}.json voltha/\$etcd_container:/tmp/flexpod.json
+ kubectl exec -it \$etcd_container -n voltha -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/XGS-PON/64'
+ """
+ }
+ }
+
+ stage('Push Sadis-config') {
+ steps {
+ sh returnStdout: false, script: """
+ ssh-keygen -R [${deployment_config.nodes[0].ip}]:30115
+ ssh-keyscan -p 30115 -H ${deployment_config.nodes[0].ip} >> ~/.ssh/known_hosts
+ sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.dhcpl2relay"
+ sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.aaa"
+ sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.olt"
+ sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.FlowObjectiveManager"
+ sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager"
+
+ if [[ "${workFlow.toUpperCase()}" == "DT" ]]; then
+ curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-DT.json
+ elif [[ "${workFlow.toUpperCase()}" == "TT" ]]; then
+ curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-TT.json
+ else
+ # this is the ATT case, rename the file in *-sadis-ATT.json so that we can avoid special cases and just load the file
+ curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis.json
+ fi
+ """
+ }
+ }
+ stage('Reinstall OLT software') {
+ when {
+ expression { params.reinstallOlt }
+ }
+ steps {
+ script {
+ deployment_config.olts.each { olt ->
+ sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --remove asfvolt16 && dpkg --purge asfvolt16'"
+ waitUntil {
+ olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
+ return olt_sw_present.toInteger() == 0
+ }
+ if ( params.branch == 'voltha-2.3' ) {
+ oltDebVersion = oltDebVersionVoltha23
+ } else {
+ oltDebVersion = oltDebVersionMaster
+ }
+ sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --install ${oltDebVersion}'"
+ waitUntil {
+ olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
+ return olt_sw_present.toInteger() == 1
+ }
+ if ( olt.fortygig ) {
+ // If the OLT is connected to a 40G switch interface, set the NNI port to be downgraded
+ sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'echo port ce128 sp=40000 >> /broadcom/qax.soc ; /opt/bcm68620/svk_init.sh'"
+ }
+ }
+ }
+ }
+ }
+
+ stage('Restart OLT processes') {
+ steps {
+ script {
+ deployment_config.olts.each { olt ->
+ sh returnStdout: false, script: """
+ ssh-keyscan -H ${olt.ip} >> ~/.ssh/known_hosts
+ sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; reboot'
+ sleep 120
+ """
+ waitUntil {
+ onu_discovered = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'grep \"onu discover indication\" /var/log/openolt.log | wc -l'"
+ return onu_discovered.toInteger() > 0
+ }
+ }
+ }
+ }
+ }
+ stage('Run E2E Tests') {
+ steps {
+ script {
+ // different workflows need different make targets and different robot files
+ if ( params.workflow.toUpperCase() == "DT" ) {
+ robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
+ robotFile = "Voltha_DT_PODTests.robot"
+ makeTarget = "voltha-dt-test"
+ robotFunctionalKeyword = "-i functionalDt"
+ robotDataplaneKeyword = "-i dataplaneDt"
+ }
+ else if ( params.workflow.toUpperCase() == "TT" ) {
+ // TODO the TT tests have diffent tags, address once/if TT is support on the Tucson POD
+ robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
+ robotFile = "Voltha_TT_PODTests.robot"
+ makeTarget = "voltha-tt-test"
+ robotFunctionalKeyword = "-i functionalTt"
+ robotDataplaneKeyword = "-i dataplaneTt"
+ }
+ else {
+ robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
+ robotFile = "Voltha_PODTests.robot"
+ makeTarget = "voltha-test"
+ robotFunctionalKeyword = "-i functional"
+ robotDataplaneKeyword = "-i dataplane"
+ }
+ }
+ sh returnStdout: false, script: """
+ mkdir -p $WORKSPACE/RobotLogs
+
+ export ROBOT_CONFIG_FILE="$WORKSPACE/${robotConfigFile}"
+ export ROBOT_MISC_ARGS="${params.extraRobotArgs} --removekeywords wuks -d $WORKSPACE/RobotLogs -v container_log_dir:$WORKSPACE "
+ export ROBOT_FILE="${robotFile}"
+
+ # If the Gerrit comment contains a line with "functional tests" then run the full
+ # functional test suite. This covers tests tagged either 'sanity' or 'functional'.
+ # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
+ REGEX="functional tests"
+ if [[ "${gerritComment}" =~ \$REGEX ]]; then
+ ROBOT_MISC_ARGS+="${robotFunctionalKeyword} "
+ fi
+ # Likewise for dataplane tests
+ REGEX="dataplane tests"
+ if [[ "${gerritComment}" =~ \$REGEX ]]; then
+ ROBOT_MISC_ARGS+="${robotDataplaneKeyword}"
+ fi
+
+ make -C $WORKSPACE/voltha-system-tests ${makeTarget} || true
+ """
+ }
+ }
+ }
+ post {
+ always {
+ // stop logging
+ sh """
+ P_IDS="\$(ps e -ww -A | grep "_TAG=kail-${workFlow}" | grep -v grep | awk '{print \$1}')"
+ if [ -n "\$P_IDS" ]; then
+ echo \$P_IDS
+ for P_ID in \$P_IDS; do
+ kill -9 \$P_ID
+ done
+ fi
+ gzip $WORKSPACE/${workFlow}/onos-voltha-combined.log || true
+ """
+ sh '''
+ # stop the kail processes
+ list=($APPS_TO_LOG)
+ for app in "${list[@]}"
+ do
+ echo "Stopping logs for: ${app}"
+ _TAG="kail-$app"
+ P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
+ if [ -n "$P_IDS" ]; then
+ echo $P_IDS
+ for P_ID in $P_IDS; do
+ kill -9 $P_ID
+ done
+ fi
+ done
+ '''
+ step([$class: 'RobotPublisher',
+ disableArchiveOutput: false,
+ logFileName: 'RobotLogs/log*.html',
+ otherFiles: '',
+ outputFileName: 'RobotLogs/output*.xml',
+ outputPath: '.',
+ passThreshold: 100,
+ reportFileName: 'RobotLogs/report*.html',
+ unstableThreshold: 0,
+ onlyCritical: true]);
+ archiveArtifacts artifacts: '**/*.txt,**/*.gz,*.gz,**/*.log'
+ }
+ }
+}
+
+// refs/changes/06/24206/5
diff --git a/jjb/pipeline/voltha-dt-physical-functional-tests.groovy b/jjb/pipeline/voltha/voltha-2.8/voltha-dt-physical-functional-tests.groovy
similarity index 73%
rename from jjb/pipeline/voltha-dt-physical-functional-tests.groovy
rename to jjb/pipeline/voltha/voltha-2.8/voltha-dt-physical-functional-tests.groovy
index 0f9d373..05e0902 100644
--- a/jjb/pipeline/voltha-dt-physical-functional-tests.groovy
+++ b/jjb/pipeline/voltha/voltha-2.8/voltha-dt-physical-functional-tests.groovy
@@ -12,6 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+
node {
// Need this so that deployment_config has global scope when it's read later
deployment_config = null
@@ -23,7 +29,7 @@
label "${params.buildNode}"
}
options {
- timeout(time: 640, unit: 'MINUTES')
+ timeout(time: "${timeout}", unit: 'MINUTES')
}
environment {
@@ -33,26 +39,9 @@
}
stages {
- stage('Clone kind-voltha') {
- steps {
- step([$class: 'WsCleanup'])
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/kind-voltha",
- refspec: "${kindVolthaChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
stage('Clone voltha-system-tests') {
steps {
+ step([$class: 'WsCleanup'])
checkout([
$class: 'GitSCM',
userRemoteConfigs: [[
@@ -76,51 +65,9 @@
}
}
}
- stage('Clone cord-tester') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/cord-tester",
- refspec: "${cordTesterChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "cord-tester"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
- // This checkout allows us to show changes in Jenkins
- // we only do this on master as we don't branch all the repos for all the releases
- // (we should compute the difference by tracking the container version, not the code)
- stage('Download All the VOLTHA repos') {
- when {
- expression {
- return "${branch}" == 'master';
- }
- }
- steps {
- checkout(changelog: true,
- poll: false,
- scm: [$class: 'RepoScm',
- manifestRepositoryUrl: "${params.manifestUrl}",
- manifestBranch: "${params.branch}",
- currentBranch: true,
- destinationDir: 'voltha',
- forceSync: true,
- resetFirst: true,
- quiet: true,
- jobs: 4,
- showAllChanges: true]
- )
- }
- }
stage ('Initialize') {
steps {
- sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/${configBaseDir}"
+ sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
script {
deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
}
@@ -128,9 +75,8 @@
mkdir -p $WORKSPACE/bin
bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/bin"
cd $WORKSPACE
- if [ "${params.branch}" != "master" ]; then
- cd $WORKSPACE/kind-voltha
- source releases/${params.branch}
+ if [ "${params.branch}" == "voltha-2.8" ]; then
+ VOLTCTL_VERSION=1.6.11
else
VOLTCTL_VERSION=\$(curl -sSL https://api.github.com/repos/opencord/voltctl/releases/latest | jq -r .tag_name | sed -e 's/^v//g')
fi
@@ -172,10 +118,6 @@
}
steps {
sh """
- cd $WORKSPACE/kind-voltha/scripts
- ./log-collector.sh > /dev/null &
- ./log-combine.sh > /dev/null &
-
mkdir -p $ROBOT_LOGS_DIR
if ( ${powerSwitch} ); then
export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanityDt -i functionalDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
@@ -227,11 +169,11 @@
ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ONOSHAScenarios"
}
steps {
- sh """
- mkdir -p $ROBOT_LOGS_DIR
- export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v workflow:${params.workFlow} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
- make -C $WORKSPACE/voltha-system-tests voltha-test || true
- """
+ sh """
+ mkdir -p $ROBOT_LOGS_DIR
+ export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v workflow:${params.workFlow} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
+ make -C $WORKSPACE/voltha-system-tests voltha-test || true
+ """
}
}
@@ -280,43 +222,6 @@
kubectl get pods -n voltha -o wide
kubectl get pods -o wide
- sleep 60 # Wait for log-collector and log-combine to complete
-
- # Clean up "announcer" pod used by the tests if present
- kubectl delete pod announcer || true
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep '"level":"error"' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep 'ERROR' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
- extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
- extract_errors_python onos >> $WORKSPACE/error-report.log
-
- gzip error-report.log || true
- rm error-report.log || true
-
- cd $WORKSPACE/kind-voltha/scripts/logger/combined/
- tar czf $WORKSPACE/container-logs.tgz *
- rm * || true
-
- cd $WORKSPACE
- gzip *-combined.log || true
- rm *-combined.log || true
-
# store information on running charts
helm ls > $WORKSPACE/helm-list.txt || true
@@ -324,10 +229,6 @@
kubectl get pods --all-namespaces -o wide > $WORKSPACE/pods.txt || true
kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-images.txt || true
kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-imagesId.txt || true
-
- # collect ETCD cluster logs
- mkdir -p $WORKSPACE/etcd
- printf '%s\n' $(kubectl get pods -l app=etcd -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I% bash -c "kubectl logs % > $WORKSPACE/etcd/%.log"
'''
script {
deployment_config.olts.each { olt ->
@@ -353,9 +254,10 @@
outputPath: 'RobotLogs',
passThreshold: 100,
reportFileName: '**/report*.html',
- unstableThreshold: 0
+ unstableThreshold: 0,
+ onlyCritical: true
]);
- archiveArtifacts artifacts: '*.log,*.gz,*.tgz,etcd/*.log,*.txt'
+ archiveArtifacts artifacts: '**/*.log,**/*.tgz,*.txt'
}
}
}
diff --git a/jjb/pipeline/voltha-physical-functional-tests.groovy b/jjb/pipeline/voltha/voltha-2.8/voltha-physical-functional-tests.groovy
similarity index 77%
copy from jjb/pipeline/voltha-physical-functional-tests.groovy
copy to jjb/pipeline/voltha/voltha-2.8/voltha-physical-functional-tests.groovy
index a2d9c8d..aca802b 100644
--- a/jjb/pipeline/voltha-physical-functional-tests.groovy
+++ b/jjb/pipeline/voltha/voltha-2.8/voltha-physical-functional-tests.groovy
@@ -12,6 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+
node {
// Need this so that deployment_config has global scope when it's read later
deployment_config = null
@@ -23,7 +29,7 @@
label "${params.buildNode}"
}
options {
- timeout(time: 380, unit: 'MINUTES')
+ timeout(time: "${timeout}", unit: 'MINUTES')
}
environment {
@@ -32,26 +38,9 @@
PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
}
stages {
- stage('Clone kind-voltha') {
- steps {
- step([$class: 'WsCleanup'])
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/kind-voltha",
- refspec: "${kindVolthaChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
stage('Clone voltha-system-tests') {
steps {
+ step([$class: 'WsCleanup'])
checkout([
$class: 'GitSCM',
userRemoteConfigs: [[
@@ -67,23 +56,6 @@
])
}
}
- stage('Clone cord-tester') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/cord-tester",
- refspec: "${cordTesterChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "cord-tester"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
stage('Download All the VOLTHA repos') {
when {
expression {
@@ -108,7 +80,7 @@
}
stage ('Initialize') {
steps {
- sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/${configBaseDir}"
+ sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
script {
deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
}
@@ -116,9 +88,8 @@
mkdir -p $WORKSPACE/bin
bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/bin"
cd $WORKSPACE
- if [ "${params.branch}" != "master" ]; then
- cd $WORKSPACE/kind-voltha
- source releases/${params.branch}
+ if [ "${params.branch}" == "voltha-2.8" ]; then
+ VOLTCTL_VERSION=1.6.11
else
VOLTCTL_VERSION=\$(curl -sSL https://api.github.com/repos/opencord/voltctl/releases/latest | jq -r .tag_name | sed -e 's/^v//g')
fi
@@ -158,10 +129,6 @@
}
steps {
sh """
- cd $WORKSPACE/kind-voltha/scripts
- ./log-collector.sh > /dev/null &
- ./log-combine.sh > /dev/null &
-
mkdir -p $ROBOT_LOGS_DIR
if ( ${powerSwitch} ); then
export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanity -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
@@ -238,54 +205,15 @@
}
post {
always {
+ getPodsInfo("$WORKSPACE/pods")
sh returnStdout: false, script: '''
set +e
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
- kubectl get nodes -o wide
- kubectl get pods -n voltha -o wide
- kubectl get pods -o wide
- sleep 60 # Wait for log-collector and log-combine to complete
-
- # Clean up "announcer" pod used by the tests if present
- kubectl delete pod announcer || true
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep '"level":"error"' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep 'ERROR' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
- extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
- extract_errors_python onos >> $WORKSPACE/error-report.log
-
- gzip error-report.log || true
- rm error-report.log || true
-
- cd $WORKSPACE/kind-voltha/scripts/logger/combined/
- tar czf $WORKSPACE/container-logs.tgz *
- rm * || true
-
+ # collect logs collected in the Robot Framework StartLogging keyword
cd $WORKSPACE
gzip *-combined.log || true
rm *-combined.log || true
- # store information on running charts
- helm ls > $WORKSPACE/helm-list.txt || true
-
# store information on the running pods
kubectl get pods --all-namespaces -o wide > $WORKSPACE/pods.txt || true
kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-images.txt || true
@@ -320,12 +248,10 @@
outputPath: 'RobotLogs',
passThreshold: 100,
reportFileName: '**/report*.html',
- unstableThreshold: 0
+ unstableThreshold: 0,
+ onlyCritical: true
]);
- archiveArtifacts artifacts: '*.log,*.gz,*.tgz,etcd/*.log'
- }
- unstable {
- step([$class: 'Mailer', notifyEveryUnstableBuild: true, recipients: "${notificationEmail}", sendToIndividuals: false])
+ archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
}
}
}
diff --git a/jjb/pipeline/voltha-physical-soak-dt-tests.groovy b/jjb/pipeline/voltha/voltha-2.8/voltha-physical-soak-dt-tests.groovy
similarity index 68%
copy from jjb/pipeline/voltha-physical-soak-dt-tests.groovy
copy to jjb/pipeline/voltha/voltha-2.8/voltha-physical-soak-dt-tests.groovy
index 49b7d29..a9a2005 100644
--- a/jjb/pipeline/voltha-physical-soak-dt-tests.groovy
+++ b/jjb/pipeline/voltha/voltha-2.8/voltha-physical-soak-dt-tests.groovy
@@ -12,18 +12,26 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+
node {
// Need this so that deployment_config has global scope when it's read later
deployment_config = null
}
+def volthaNamespace = "voltha"
+
pipeline {
/* no label, executor is determined by JJB */
agent {
label "${params.buildNode}"
}
options {
- timeout(time: 280, unit: 'MINUTES')
+ timeout(time: "${timeout}", unit: 'MINUTES')
}
environment {
@@ -34,26 +42,9 @@
stages {
- stage('Clone kind-voltha') {
- steps {
- step([$class: 'WsCleanup'])
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/kind-voltha",
- refspec: "${kindVolthaChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
stage('Clone voltha-system-tests') {
steps {
+ step([$class: 'WsCleanup'])
checkout([
$class: 'GitSCM',
userRemoteConfigs: [[
@@ -77,23 +68,6 @@
}
}
}
- stage('Clone cord-tester') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/cord-tester",
- refspec: "${cordTesterChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "cord-tester"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
// This checkout allows us to show changes in Jenkins
// we only do this on master as we don't branch all the repos for all the releases
// (we should compute the difference by tracking the container version, not the code)
@@ -121,7 +95,7 @@
}
stage ('Initialize') {
steps {
- sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/${configBaseDir}"
+ sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
script {
deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
}
@@ -129,10 +103,8 @@
mkdir -p $WORKSPACE/bin
bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/bin"
cd $WORKSPACE
- if [ "${params.branch}" != "master" ]; then
- cd $WORKSPACE/kind-voltha
- source releases/${params.branch}
- VC_VERSION=1.1.8
+ if [ "${params.branch}" == "voltha-2.8" ]; then
+ VC_VERSION=1.6.11
else
VC_VERSION=\$(curl -sSL https://api.github.com/repos/opencord/voltctl/releases/latest | jq -r .tag_name | sed -e 's/^v//g')
fi
@@ -172,16 +144,14 @@
}
steps {
sh """
- cd $WORKSPACE/kind-voltha/scripts
- ./log-collector.sh > /dev/null &
- ./log-combine.sh > /dev/null &
-
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG="prometheus" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n cattle-prometheus svc/access-prometheus 31301:80; done"&
+ ps aux | grep port-forward
mkdir -p $ROBOT_LOGS_DIR
if [ "${params.testType}" == "Functional" ]; then
if ( ${powerSwitch} ); then
- export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i soak -e dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
+ export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i soak -e dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
else
- export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i soak -e dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
+ export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i soak -e dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
fi
make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
fi
@@ -199,7 +169,7 @@
sh """
mkdir -p $ROBOT_LOGS_DIR
if [ "${params.testType}" == "Failure" ]; then
- export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i soak -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
+ export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i soak -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
fi
"""
@@ -216,7 +186,7 @@
sh """
mkdir -p $ROBOT_LOGS_DIR
if [ "${params.testType}" == "Dataplane" ]; then
- export ROBOT_MISC_ARGS="--removekeywords wuks -i BandwidthProfileUDPDt -i TechProfileDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
+ export ROBOT_MISC_ARGS="--removekeywords wuks -i soakDataplane -e bbsim -e notready -d $ROBOT_LOGS_DIR -v SOAK_TEST:True -v logging:False -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
fi
"""
@@ -226,44 +196,14 @@
}
post {
always {
+ getPodsInfo("$WORKSPACE/pods")
sh returnStdout: false, script: '''
set +e
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
- kubectl get nodes -o wide
- kubectl get pods -n voltha -o wide
- sleep 60 # Wait for log-collector and log-combine to complete
-
- # Clean up "announcer" pod used by the tests if present
- kubectl delete pod announcer || true
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep '"level":"error"' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep 'ERROR' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
- extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
- extract_errors_python onos >> $WORKSPACE/error-report.log
-
- cd $WORKSPACE/kind-voltha/scripts/logger/combined/
- tar czf $WORKSPACE/container-logs.tgz *
-
+ # collect logs collected in the Robot Framework StartLogging keyword
cd $WORKSPACE
gzip *-combined.log || true
+ rm *-combined.log || true
# collect ETCD cluster logs
mkdir -p $WORKSPACE/etcd
@@ -291,12 +231,18 @@
outputPath: 'RobotLogs',
passThreshold: 100,
reportFileName: '**/report*.html',
- unstableThreshold: 0
+ unstableThreshold: 0,
+ onlyCritical: true
]);
- archiveArtifacts artifacts: '*.log,*.gz,*.tgz,etcd/*.log'
- }
- unstable {
- step([$class: 'Mailer', notifyEveryUnstableBuild: true, recipients: "${notificationEmail}", sendToIndividuals: false])
+ // get cpu usage by container
+ sh """
+ mkdir -p $WORKSPACE/plots || true
+ cd $WORKSPACE/voltha-system-tests
+ source ./vst_venv/bin/activate || true
+ sleep 60 # we have to wait for prometheus to collect all the information
+ python scripts/sizing.py -o $WORKSPACE/plots -a 0.0.0.0:31301 -n ${volthaNamespace} -s 3600 || true
+ """
+ archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt,plots/*'
}
}
}
diff --git a/jjb/pipeline/voltha/voltha-2.8/voltha-scale-multi-stack.groovy b/jjb/pipeline/voltha/voltha-2.8/voltha-scale-multi-stack.groovy
new file mode 100644
index 0000000..3c02c4e
--- /dev/null
+++ b/jjb/pipeline/voltha/voltha-2.8/voltha-scale-multi-stack.groovy
@@ -0,0 +1,463 @@
+// Copyright 2019-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// deploy VOLTHA using kind-voltha and performs a scale test
+
+// NOTE we are importing the library even if it's global so that it's
+// easier to change the keywords during a replay
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+
+pipeline {
+
+ /* no label, executor is determined by JJB */
+ agent {
+ label "${params.buildNode}"
+ }
+ options {
+ timeout(time: 120, unit: 'MINUTES')
+ }
+ environment {
+ JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
+ KUBECONFIG="$HOME/.kube/config"
+ SSHPASS="karaf"
+ PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
+
+ LOG_FOLDER="$WORKSPACE/logs"
+ }
+
+ stages {
+ stage ('Cleanup') {
+ steps {
+ timeout(time: 11, unit: 'MINUTES') {
+ script {
+ def namespaces = ["infra"]
+ // FIXME we may have leftovers from more VOLTHA stacks (eg: run1 had 10 stacks, run2 had 2 stacks)
+ volthaStacks.toInteger().times {
+ namespaces += "voltha${it + 1}"
+ }
+ helmTeardown(namespaces)
+ }
+ sh returnStdout: false, script: '''
+ helm repo add onf https://charts.opencord.org
+ helm repo update
+
+ # remove all persistent volume claims
+ kubectl delete pvc --all-namespaces --all
+ PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
+ while [[ \$PVCS != 0 ]]; do
+ sleep 5
+ PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
+ done
+
+ # remove orphaned port-forward from different namespaces
+ ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
+
+ cd $WORKSPACE
+ rm -rf $WORKSPACE/*
+ '''
+ }
+ }
+ }
+ stage('Download Code') {
+ steps {
+ getVolthaCode([
+ branch: "${release}",
+ volthaSystemTestsChange: "${volthaSystemTestsChange}",
+ volthaHelmChartsChange: "${volthaHelmChartsChange}",
+ ])
+ }
+ }
+ stage('Deploy common infrastructure') {
+ // includes monitoring
+ steps {
+ sh '''
+ if [ ${withMonitoring} = true ] ; then
+ helm install -n infra nem-monitoring cord/nem-monitoring \
+ -f $HOME/voltha-scale/grafana.yaml \
+ --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
+ --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
+ fi
+ '''
+ }
+ }
+ stage('Start logging') {
+ steps {
+ script {
+ startComponentsLogs([
+ appsToLog: [
+ 'app.kubernetes.io/name=etcd',
+ 'app.kubernetes.io/name=kafka',
+ 'app=onos-classic',
+ 'app=adapter-open-onu',
+ 'app=adapter-open-olt',
+ 'app=rw-core',
+ 'app=ofagent',
+ 'app=bbsim',
+ 'app=radius',
+ 'app=bbsim-sadis-server',
+ 'app=onos-config-loader',
+ ]
+ ])
+ }
+ }
+ }
+ stage('Deploy VOLTHA infrastructure') {
+ steps {
+ timeout(time: 5, unit: 'MINUTES') {
+ script {
+ def localCharts = false
+ if (volthaHelmChartsChange != "" || release != "master") {
+ localCharts = true
+ }
+
+ def infraHelmFlags =
+ "--set global.log_level=${logLevel} " +
+ "--set radius.enabled=${withEapol} " +
+ "--set onos-classic.onosSshPort=30115 " +
+ "--set onos-classic.onosApiPort=30120 " +
+ params.extraHelmFlags
+
+ volthaInfraDeploy([
+ workflow: workflow,
+ infraNamespace: "infra",
+ extraHelmFlags: infraHelmFlags,
+ localCharts: localCharts,
+ onosReplica: onosReplicas,
+ atomixReplica: atomixReplicas,
+ kafkaReplica: kafkaReplicas,
+ etcdReplica: etcdReplicas,
+ ])
+ }
+ }
+ }
+ }
+ stage('Deploy Voltha') {
+ steps {
+ deploy_voltha_stacks(params.volthaStacks)
+ }
+ }
+ stage('Configuration') {
+ steps {
+ script {
+ sh returnStdout: false, script: """
+
+ # forward ETCD port
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG=etcd-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/etcd 9999:2379; done 2>&1 " &
+
+ # forward ONOS ports
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG=onos-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/voltha-infra-onos-classic-hs 8101:8101; done 2>&1 " &
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG=onos-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/voltha-infra-onos-classic-hs 8181:8181; done 2>&1 " &
+
+ # make sure the the port-forward has started before moving forward
+ sleep 5
+ """
+ sh returnStdout: false, script: """
+ # TODO this needs to be repeated per stack
+ # kubectl exec \$(kubectl get pods | grep -E "bbsim[0-9]" | awk 'NR==1{print \$1}') -- bbsimctl log ${logLevel.toLowerCase()} false
+
+ #Setting link discovery
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 900
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.opencord.olt.impl.Olt provisionDelay 1000
+
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.onosproject
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.opencord
+
+ # Set Flows/Ports/Meters poll frequency
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
+
+ #SR is not needed in scale tests and not currently used by operators in production, can be disabled.
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.onosproject.segmentrouting
+
+
+ if [ ${withFlows} = false ]; then
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.olt
+ fi
+ """
+ }
+ }
+ }
+ stage('Setup Test') {
+ steps {
+ sh '''
+ mkdir -p $WORKSPACE/RobotLogs
+ cd $WORKSPACE/voltha-system-tests
+ make vst_venv
+ '''
+ }
+ }
+ stage('Run Test') {
+ steps {
+ test_voltha_stacks(params.volthaStacks)
+ }
+ }
+ }
+ post {
+ always {
+ stopComponentsLogs([compress: true])
+ // collect result, done in the "post" step so it's executed even in the
+ // event of a timeout in the tests
+ plot([
+ csvFileName: 'scale-test.csv',
+ csvSeries: [
+ [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ ],
+ group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (Stacks: ${params.volthaStacks}, OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus})", yaxis: 'Time (s)', useDescr: true
+ ])
+ step([$class: 'RobotPublisher',
+ disableArchiveOutput: false,
+ logFileName: 'RobotLogs/**/log.html',
+ otherFiles: '',
+ outputFileName: 'RobotLogs/**/output.xml',
+ outputPath: '.',
+ passThreshold: 100,
+ reportFileName: 'RobotLogs/**/report.html',
+ onlyCritical: true,
+ unstableThreshold: 0]);
+ // get all the logs from kubernetes PODs
+ sh returnStdout: false, script: '''
+
+ # store information on running charts
+ helm ls --all-namespaces > $LOG_FOLDER/helm-list.txt || true
+
+ # store information on the running pods
+ kubectl get pods --all-namespaces -o wide > $LOG_FOLDER/pods.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
+
+ # copy the ONOS logs directly from the container to avoid the color codes
+ printf '%s\n' $(kubectl get pods -n infra -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp -n infra #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
+
+ '''
+ // dump all the BBSim(s) ONU information
+ script {
+ for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
+ stack_ns="voltha"+i
+ sh """
+ mkdir -p \$LOG_FOLDER/${stack_ns}
+ BBSIM_IDS=\$(kubectl -n ${stack_ns} get pods | grep bbsim | grep -v server | awk '{print \$1}')
+ IDS=(\$BBSIM_IDS)
+
+ for bbsim in "\${IDS[@]}"
+ do
+ kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl onu list > \$LOG_FOLDER/${stack_ns}/\$bbsim-device-list.txt || true
+ kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl service list > \$LOG_FOLDER/${stack_ns}/\$bbsim-service-list.txt || true
+ kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl olt resources GEM_PORT > \$LOG_FOLDER/${stack_ns}/\$bbsim-flows-gem-ports.txt || true
+ kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl olt resources ALLOC_ID > \$LOG_FOLDER/${stack_ns}/\$bbsim-flows-alloc-ids.txt || true
+ kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl olt pons > \$LOG_FOLDER/${stack_ns}/\$bbsim-pon-resources.txt || true
+ done
+ """
+ }
+ }
+ // get ONOS debug infos
+ sh '''
+
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt || true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt || true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt || true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt || true
+
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 netcfg > $LOG_FOLDER/onos-netcfg.txt || true
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg get > $LOG_FOLDER/onos-cfg.txt || true
+
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt || true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt || true
+
+ if [ ${withFlows} = true ] ; then
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt || true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt || true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt || true
+ fi
+
+ if [ ${provisionSubscribers} = true ]; then
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt || true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt || true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt || true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt || true
+ fi
+
+ if [ ${withEapol} = true ] ; then
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt || true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt || true
+ fi
+
+ if [ ${withDhcp} = true ] ; then
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt || true
+ fi
+ '''
+ // collect etcd metrics
+ sh '''
+ mkdir -p $WORKSPACE/etcd-metrics
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time.json || true
+ '''
+ // get VOLTHA debug infos
+ script {
+ for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
+ stack_ns="voltha"+i
+ voltcfg="~/.volt/config-voltha"+i
+ try {
+ sh """
+
+ # _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555& > /dev/null 2>&1
+ _TAG="voltha-port-forward" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555 > /dev/null 2>&1; done"&
+
+ voltctl -m 8MB device list -o json > $LOG_FOLDER/${stack_ns}/device-list.json || true
+ python -m json.tool $LOG_FOLDER/${stack_ns}/device-list.json > $LOG_FOLDER/${stack_ns}/voltha-devices-list.json || true
+ rm $LOG_FOLDER/${stack_ns}/device-list.json || true
+ voltctl -m 8MB device list > $LOG_FOLDER/${stack_ns}/voltha-devices-list.txt || true
+
+ DEVICE_LIST=
+ printf '%s\n' \$(voltctl -m 8MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device flows # > $LOG_FOLDER/${stack_ns}/voltha-device-flows-#.txt" || true
+ printf '%s\n' \$(voltctl -m 8MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/${stack_ns}/voltha-device-ports-#.txt" || true
+
+ printf '%s\n' \$(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice flows # > $LOG_FOLDER/${stack_ns}/voltha-logicaldevice-flows-#.txt" || true
+ printf '%s\n' \$(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice port list # > $LOG_FOLDER/${stack_ns}/voltha-logicaldevice-ports-#.txt" || true
+
+ # remove VOLTHA port-forward
+ ps aux | grep port-forw | grep voltha-api | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
+ """
+ } catch(e) {
+ println e
+ sh '''
+ echo "Can't get device list from voltctl"
+ '''
+ }
+ }
+ }
+ // get cpu usage by container
+ sh '''
+ if [ ${withMonitoring} = true ] ; then
+ cd $WORKSPACE/voltha-system-tests
+ source ./vst_venv/bin/activate
+ sleep 60 # we have to wait for prometheus to collect all the information
+ python scripts/sizing.py -o $WORKSPACE/plots || true
+ fi
+ '''
+ archiveArtifacts artifacts: 'kind-voltha/install-*.log,execution-time-*.txt,logs/**/*.txt,logs/**/*.tar.gz,logs/**/*.tgz,RobotLogs/**/*,plots/*,etcd-metrics/*'
+ }
+ }
+}
+
+def deploy_voltha_stacks(numberOfStacks) {
+ for (int i = 1; i <= numberOfStacks.toInteger(); i++) {
+ timeout(time: 5, unit: 'MINUTES') {
+ stage("Deploy VOLTHA stack " + i) {
+
+ def localCharts = false
+ if (volthaHelmChartsChange != "" || release != "master") {
+ localCharts = true
+ }
+
+ def volthaHelmFlags =
+ "--set global.log_level=${logLevel} " +
+ "--set enablePerf=true,onu=${onus},pon=${pons} " +
+ "--set securityContext.enabled=false " +
+ params.extraHelmFlags
+
+ volthaStackDeploy([
+ bbsimReplica: olts.toInteger(),
+ infraNamespace: "infra",
+ volthaNamespace: "voltha${i}",
+ stackName: "voltha${i}",
+ stackId: i,
+ workflow: workflow,
+ extraHelmFlags: volthaHelmFlags,
+ localCharts: localCharts,
+ onosReplica: onosReplicas,
+ ])
+ }
+ }
+ }
+}
+
+def test_voltha_stacks(numberOfStacks) {
+ for (int i = 1; i <= numberOfStacks.toInteger(); i++) {
+ stage("Test VOLTHA stack " + i) {
+ timeout(time: 15, unit: 'MINUTES') {
+ sh """
+
+ # we are restarting the voltha-api port-forward for each stack, no need to have a different voltconfig file
+ voltctl -s 127.0.0.1:55555 config > $HOME/.volt/config
+ export VOLTCONFIG=$HOME/.volt/config
+
+ # _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555& > /dev/null 2>&1
+ _TAG="voltha-port-forward" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555 > /dev/null 2>&1; done"&
+
+
+ ROBOT_PARAMS="-v stackId:${i} \
+ -v olt:${olts} \
+ -v pon:${pons} \
+ -v onu:${onus} \
+ -v workflow:${workflow} \
+ -v withEapol:${withEapol} \
+ -v withDhcp:${withDhcp} \
+ -v withIgmp:${withIgmp} \
+ --noncritical non-critical \
+ -e igmp \
+ -e teardown "
+
+ if [ ${withEapol} = false ] ; then
+ ROBOT_PARAMS+="-e authentication "
+ fi
+
+ if [ ${withDhcp} = false ] ; then
+ ROBOT_PARAMS+="-e dhcp "
+ fi
+
+ if [ ${provisionSubscribers} = false ] ; then
+ # if we're not considering subscribers then we don't care about authentication and dhcp
+ ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
+ fi
+
+ if [ ${withFlows} = false ] ; then
+ ROBOT_PARAMS+="-i setup -i activation "
+ fi
+
+ cd $WORKSPACE/voltha-system-tests
+ source ./vst_venv/bin/activate
+ robot -d $WORKSPACE/RobotLogs/voltha${i} \
+ \$ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
+
+ # collect results
+ python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/voltha${i}/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time-voltha${i}.txt || true
+ cat $WORKSPACE/execution-time-voltha${i}.txt
+ """
+ sh """
+ # remove VOLTHA port-forward
+ ps aux | grep port-forw | grep voltha-api | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 2>&1 > /dev/null || true
+ """
+ }
+ }
+ }
+}
diff --git a/jjb/pipeline/voltha/voltha-2.7/voltha-scale-test.groovy b/jjb/pipeline/voltha/voltha-2.8/voltha-scale-test.groovy
similarity index 80%
rename from jjb/pipeline/voltha/voltha-2.7/voltha-scale-test.groovy
rename to jjb/pipeline/voltha/voltha-2.8/voltha-scale-test.groovy
index 11bb1c4..0f010c4 100644
--- a/jjb/pipeline/voltha/voltha-2.7/voltha-scale-test.groovy
+++ b/jjb/pipeline/voltha/voltha-2.8/voltha-scale-test.groovy
@@ -14,8 +14,14 @@
// deploy VOLTHA and performs a scale test
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+
// this function generates the correct parameters for ofAgent
-// to connect to multple ONOS instances
+// to connect to multiple ONOS instances
def ofAgentConnections(numOfOnos, releaseName, namespace) {
def params = " "
numOfOnos.times {
@@ -36,14 +42,14 @@
environment {
JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
KUBECONFIG="$HOME/.kube/config"
- VOLTCONFIG="$HOME/.volt/config-2.7" // voltha-2.7 does not have ingress and still relies on port-forwarding
+ VOLTCONFIG="$HOME/.volt/config"
SSHPASS="karaf"
VOLTHA_LOG_LEVEL="${logLevel}"
NUM_OF_BBSIM="${olts}"
NUM_OF_OPENONU="${openonuAdapterReplicas}"
NUM_OF_ONOS="${onosReplicas}"
NUM_OF_ATOMIX="${atomixReplicas}"
- EXTRA_HELM_FLAGS="${extraHelmFlags} " // note that the trailing space is required to separate the parameters from appends done later
+ EXTRA_HELM_FLAGS=" "
APPS_TO_LOG="etcd kafka onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server onos-config-loader"
LOG_FOLDER="$WORKSPACE/logs"
@@ -55,27 +61,19 @@
stage ('Cleanup') {
steps {
timeout(time: 11, unit: 'MINUTES') {
+ script {
+ helmTeardown(["default"])
+ }
sh returnStdout: false, script: '''
helm repo add onf https://charts.opencord.org
helm repo update
- NAMESPACES="voltha1 voltha2 infra default"
- for NS in $NAMESPACES
- do
- for hchart in $(helm list -n $NS -q | grep -E -v 'docker-registry|kafkacat');
- do
- echo "Purging chart: ${hchart}"
- helm delete -n $NS "${hchart}"
- done
- done
-
- # wait for pods to be removed
- echo -ne "\nWaiting for PODs to be removed..."
- PODS=$(kubectl get pods --all-namespaces --no-headers | grep -v -E "kube|cattle|registry|fleet|ingress-nginx" | wc -l)
- while [[ $PODS != 0 ]]; do
+ # remove all persistent volume claims
+ kubectl delete pvc --all-namespaces --all
+ PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
+ while [[ \$PVCS != 0 ]]; do
sleep 5
- echo -ne "."
- PODS=$(kubectl get pods --all-namespaces --no-headers | grep -v -E "kube|cattle|registry|fleet|ingress-nginx" | wc -l)
+ PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
done
# remove orphaned port-forward from different namespaces
@@ -87,54 +85,13 @@
}
}
}
- stage('Clone voltha-system-tests') {
+ stage('Download Code') {
steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/voltha-system-tests",
- refspec: "${volthaSystemTestsChange}"
- ]],
- branches: [[ name: "${release}", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
+ getVolthaCode([
+ branch: "${release}",
+ volthaSystemTestsChange: "${volthaSystemTestsChange}",
+ volthaHelmChartsChange: "${volthaHelmChartsChange}",
])
- script {
- sh(script:"""
- if [ '${volthaSystemTestsChange}' != '' ] ; then
- cd $WORKSPACE/voltha-system-tests;
- git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
- fi
- """)
- }
- }
- }
- stage('Clone voltha-helm-charts') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/voltha-helm-charts",
- refspec: "${volthaHelmChartsChange}"
- ]],
- branches: [[ name: "${release}", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-helm-charts"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- script {
- sh(script:"""
- if [ '${volthaHelmChartsChange}' != '' ] ; then
- cd $WORKSPACE/voltha-helm-charts;
- git fetch https://gerrit.opencord.org/voltha-helm-charts ${volthaHelmChartsChange} && git checkout FETCH_HEAD
- fi
- """)
- }
}
}
stage('Build patch') {
@@ -155,19 +112,8 @@
}
}
stage('Deploy common infrastructure') {
- // includes monitoring, kafka, etcd
steps {
sh '''
- helm install kafka $HOME/teone/helm-charts/kafka --set replicaCount=${kafkaReplicas},replicas=${kafkaReplicas} --set persistence.enabled=false \
- --set zookeeper.replicaCount=${kafkaReplicas} --set zookeeper.persistence.enabled=false \
- --set prometheus.kafka.enabled=true,prometheus.operator.enabled=true,prometheus.jmx.enabled=true,prometheus.operator.serviceMonitor.namespace=default
-
- # the ETCD chart use "auth" for resons different than BBsim, so strip that away
- ETCD_FLAGS=$(echo ${extraHelmFlags} | sed -e 's/--set auth=false / /g') | sed -e 's/--set auth=true / /g'
- ETCD_FLAGS+=" --set auth.rbac.enabled=false,persistence.enabled=false,statefulset.replicaCount=${etcdReplicas}"
- ETCD_FLAGS+=" --set memoryMode=${inMemoryEtcdStorage} "
- helm install --set replicas=${etcdReplicas} etcd $HOME/teone/helm-charts/etcd $ETCD_FLAGS
-
if [ ${withMonitoring} = true ] ; then
helm install nem-monitoring onf/nem-monitoring \
-f $HOME/voltha-scale/grafana.yaml \
@@ -193,7 +139,7 @@
_TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
done
'''
- sh returnStdout: false, script: """
+ def returned_flags = sh (returnStdout: true, script: """
export EXTRA_HELM_FLAGS+=' '
@@ -248,11 +194,6 @@
# No persistent-volume-claims in Atomix
EXTRA_HELM_FLAGS+="--set onos-classic.atomix.persistence.enabled=false "
- echo "Installing with the following extra arguments:"
- echo $EXTRA_HELM_FLAGS
-
-
-
# Use custom built images
if [ '\$GERRIT_PROJECT' == 'voltha-go' ]; then
@@ -282,43 +223,49 @@
if [ '\$GERRIT_PROJECT' == 'bbsim' ]; then
EXTRA_HELM_FLAGS+="--set images.bbsim.repository=${dockerRegistry}/voltha/bbsim,images.bbsim.tag=voltha-scale "
fi
+ echo \$EXTRA_HELM_FLAGS
- helm upgrade --install voltha-infra onf/voltha-infra \$EXTRA_HELM_FLAGS \
- --set onos-classic.replicas=${onosReplicas},onos-classic.atomix.replicas=${atomixReplicas} \
- --set etcd.enabled=false,kafka.enabled=false \
- --set global.log_level=${logLevel} \
- -f $WORKSPACE/voltha-helm-charts/examples/${workflow}-values.yaml \
- --set onos-classic.onosSshPort=30115 --set onos-classic.onosApiPort=30120 \
- --version 0.1.13
+ """).trim()
- helm upgrade --install voltha1 onf/voltha-stack \$EXTRA_HELM_FLAGS \
- --set global.stack_name=voltha1 \
- --set global.voltha_infra_name=voltha-infra \
- --set global.voltha_infra_namespace=default \
- --set global.log_level=${logLevel} \
- ${ofAgentConnections(onosReplicas.toInteger(), "voltha-infra", "default")} \
- --set voltha.services.kafka.adapter.address=kafka.default.svc:9092 \
- --set voltha.services.kafka.cluster.address=kafka.default.svc:9092 \
- --set voltha.services.etcd.address=etcd.default.svc:2379 \
- --set voltha-adapter-openolt.services.kafka.adapter.address=kafka.default.svc:9092 \
- --set voltha-adapter-openolt.services.kafka.cluster.address=kafka.default.svc:9092 \
- --set voltha-adapter-openolt.services.etcd.address=etcd.default.svc:2379 \
- --set voltha-adapter-openonu.services.kafka.adapter.address=kafka.default.svc:9092 \
- --set voltha-adapter-openonu.services.kafka.cluster.address=kafka.default.svc:9092 \
- --set voltha-adapter-openonu.services.etcd.address=etcd.default.svc:2379 \
- --version 0.1.17
+ def extraHelmFlags = returned_flags
+ // The added space before params.extraHelmFlags is required due to the .trim() above
+ def infraHelmFlags =
+ "--set global.log_level=${logLevel} " +
+ "--set radius.enabled=${withEapol} " +
+ "--set onos-classic.onosSshPort=30115 " +
+ "--set onos-classic.onosApiPort=30120 " +
+ extraHelmFlags + " " + params.extraHelmFlags
+ println "Passing the following parameters to the VOLTHA infra deploy: ${infraHelmFlags}."
- for i in {0..${olts.toInteger() - 1}}; do
- stackId=1
- helm upgrade --install bbsim\$i onf/bbsim \$EXTRA_HELM_FLAGS \
- --set olt_id="\${stackId}\${i}" \
- --set onu=${onus},pon=${pons} \
- --set global.log_level=${logLevel.toLowerCase()} \
- -f $WORKSPACE/voltha-helm-charts/examples/${workflow}-values.yaml \
- --version 4.2.0
- done
- """
+ // in a released version we always want to use the local version of the helm-charts
+ def localCharts = true
+
+ volthaInfraDeploy([
+ workflow: workflow,
+ infraNamespace: "default",
+ extraHelmFlags: infraHelmFlags,
+ localCharts: localCharts,
+ onosReplica: onosReplicas,
+ atomixReplica: atomixReplicas,
+ kafkaReplica: kafkaReplicas,
+ etcdReplica: etcdReplicas,
+ onosReplica: onosReplicas,
+ ])
+
+ stackHelmFlags = " --set onu=${onus},pon=${pons} --set global.log_level=${logLevel.toLowerCase()} "
+ stackHelmFlags += " --set voltha.ingress.enabled=true --set voltha.ingress.enableVirtualHosts=true --set voltha.fullHostnameOverride=voltha.scale1.dev "
+ stackHelmFlags += extraHelmFlags + " " + params.extraHelmFlags
+
+ volthaStackDeploy([
+ bbsimReplica: olts.toInteger(),
+ infraNamespace: "default",
+ volthaNamespace: "default",
+ stackName: "voltha1", // TODO support custom charts
+ workflow: workflow,
+ extraHelmFlags: stackHelmFlags,
+ localCharts: localCharts,
+ ])
sh """
set +x
@@ -365,24 +312,28 @@
# sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.onosproject.mcast
# sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.opencord.igmpproxy
# sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.opencord.olt
+ # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.onosproject.net.flowobjective.impl.FlowObjectiveManager
kubectl exec \$(kubectl get pods | grep -E "bbsim[0-9]" | awk 'NR==1{print \$1}') -- bbsimctl log ${logLevel.toLowerCase()} false
- # Set Flows/Ports/Meters poll frequency
+ # Set Flows/Ports/Meters/Groups poll frequency
sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.group.impl.OpenFlowGroupProvider groupPollInterval ${onosGroupInterval}
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.FlowObjectiveManager numThreads ${flowObjWorkerThreads}
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager objectiveTimeoutMs 300000
if [ ${withFlows} = false ]; then
sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 app deactivate org.opencord.olt
fi
if [ '${workflow}' = 'tt' ]; then
- etcd_container=\$(kubectl get pods --all-namespaces | grep etcd | awk 'NR==1{print \$2}')
+ etcd_container=\$(kubectl get pods --all-namespaces | grep etcd-0 | awk 'NR==1{print \$2}')
kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-HSIA.json \$etcd_container:/tmp/hsia.json
put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64')
kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-VoIP.json \$etcd_container:/tmp/voip.json
put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65')
- kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST.json \$etcd_container:/tmp/mcast.json
+ kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST-AdditionalBW-NA.json \$etcd_container:/tmp/mcast.json
put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66')
fi
@@ -423,7 +374,7 @@
sh """
# load MIB template
wget https://raw.githubusercontent.com/opencord/voltha-openonu-adapter-go/master/templates/BBSM-12345123451234512345-00000000000001-v1.json
- cat BBSM-12345123451234512345-00000000000001-v1.json | kubectl exec -it \$(kubectl get pods |grep etcd | awk 'NR==1{print \$1}') -- etcdctl put service/voltha/omci_mibs/go_templates/BBSM/12345123451234512345/00000000000001
+ cat BBSM-12345123451234512345-00000000000001-v1.json | kubectl exec -it \$(kubectl get pods |grep etcd-0 | awk 'NR==1{print \$1}') -- etcdctl put service/voltha/omci_mibs/go_templates/BBSM/12345123451234512345/00000000000001
"""
}
}
@@ -477,12 +428,12 @@
-v olt:${olts} \
-v pon:${pons} \
-v onu:${onus} \
+ -v ONOS_SSH_PORT:30115 \
+ -v ONOS_REST_PORT:30120 \
-v workflow:${workflow} \
-v withEapol:${withEapol} \
-v withDhcp:${withDhcp} \
-v withIgmp:${withIgmp} \
- -v ONOS_SSH_PORT:30115 \
- -v ONOS_REST_PORT:30120 \
--noncritical non-critical \
-e igmp -e teardown "
@@ -511,45 +462,6 @@
}
}
}
- stage('Run Igmp Tests') {
- environment {
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/IgmpTests"
- }
- when {
- expression {
- return params.withIgmp
- }
- }
- steps {
- sh '''
- set +e
- mkdir -p $ROBOT_LOGS_DIR
- cd $WORKSPACE/voltha-system-tests
- make vst_venv
- '''
- timeout(time: 11, unit: 'MINUTES') {
- sh '''
- ROBOT_PARAMS="--exitonfailure \
- -v olt:${olts} \
- -v pon:${pons} \
- -v onu:${onus} \
- -v workflow:${workflow} \
- -v withEapol:${withEapol} \
- -v withDhcp:${withDhcp} \
- -v withIgmp:${withIgmp} \
- --noncritical non-critical \
- -i igmp \
- -e setup -e activation -e flow-before \
- -e authentication -e provision -e flow-after \
- -e dhcp -e teardown "
- cd $WORKSPACE/voltha-system-tests
- source ./vst_venv/bin/activate
- robot -d $ROBOT_LOGS_DIR \
- $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
- '''
- }
- }
- }
}
post {
always {
@@ -610,7 +522,7 @@
fi
cd voltha-system-tests
- source ./vst_venv/bin/activate
+ source ./vst_venv/bin/activate || true
python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time.txt || true
cat $WORKSPACE/execution-time.txt
'''
@@ -650,6 +562,7 @@
outputPath: 'RobotLogs',
passThreshold: 100,
reportFileName: '**/report*.html',
+ onlyCritical: true,
unstableThreshold: 0]);
// get all the logs from kubernetes PODs
sh returnStdout: false, script: '''
@@ -666,8 +579,13 @@
printf '%s\n' $(kubectl get pods -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
# get ONOS cfg from the 3 nodes
- printf '%s\n' $(kubectl get pods -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl exec -it # -- ${karafHome}/bin/client cfg get > $LOG_FOLDER/#.cfg" || true
+ # kubectl exec -t voltha-infra-onos-classic-0 -- sh /root/onos/apache-karaf-4.2.9/bin/client cfg get > ~/voltha-infra-onos-classic-0-cfg.txt || true
+ # kubectl exec -t voltha-infra-onos-classic-1 -- sh /root/onos/apache-karaf-4.2.9/bin/client cfg get > ~/voltha-infra-onos-classic-1-cfg.txt || true
+ # kubectl exec -t voltha-infra-onos-classic-2 -- sh /root/onos/apache-karaf-4.2.9/bin/client cfg get > ~/voltha-infra-onos-classic-2-cfg.txt || true
+ # kubectl exec -t voltha-infra-onos-classic-0 -- sh /root/onos/apache-karaf-4.2.9/bin/client obj-next-ids > ~/voltha-infra-onos-classic-0-next-objs.txt || true
+ # kubectl exec -t voltha-infra-onos-classic-1 -- sh /root/onos/apache-karaf-4.2.9/bin/client obj-next-ids > ~/voltha-infra-onos-classic-1-next-objs.txt || true
+ # kubectl exec -t voltha-infra-onos-classic-2 -- sh /root/onos/apache-karaf-4.2.9/bin/client obj-next-ids > ~/voltha-infra-onos-classic-2-next-objs.txt || true
# get radius logs out of the container
kubectl cp $(kubectl get pods -l app=radius --no-headers | awk '{print $1}'):/var/log/freeradius/radius.log $LOG_FOLDER/radius.log || true
@@ -751,6 +669,10 @@
curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time-bucket.json || true
curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_wal_fsync_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-wal-fsync-time-bucket.json || true
curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_network_peer_round_trip_time_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-network-peer-round-trip-time-seconds.json || true
+ etcd_namespace=\$(kubectl get pods --all-namespaces | grep etcd-0 | awk 'NR==1{print \$1}')
+ etcd_container=\$(kubectl get pods --all-namespaces | grep etcd-0 | awk 'NR==1{print \$2}')
+ kubectl exec -it -n \$etcd_namespace \$etcd_container -- etcdctl defrag --cluster || true
+ kubectl exec -it -n \$etcd_namespace \$etcd_container -- etcdctl endpoint status -w table > $WORKSPACE/etcd-metrics/etcd-status-table.txt || true
'''
// get VOLTHA debug infos
@@ -778,9 +700,9 @@
sh '''
if [ ${withMonitoring} = true ] ; then
cd $WORKSPACE/voltha-system-tests
- source ./vst_venv/bin/activate
+ source ./vst_venv/bin/activate || true
sleep 60 # we have to wait for prometheus to collect all the information
- python tests/scale/sizing.py -o $WORKSPACE/plots || true
+ python scripts/sizing.py -o $WORKSPACE/plots || true
fi
'''
archiveArtifacts artifacts: 'execution-time.txt,logs/*,logs/pprof/*,RobotLogs/**/*,plots/*,etcd-metrics/*'
@@ -790,8 +712,6 @@
def start_port_forward(olts) {
sh """
- daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward --address 0.0.0.0 -n default svc/voltha1-voltha-api 55555:55555
-
bbsimRestPortFwd=50071
for i in {0..${olts.toInteger() - 1}}; do
daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward --address 0.0.0.0 -n default svc/bbsim\${i} \${bbsimRestPortFwd}:50071
diff --git a/jjb/pipeline/voltha/voltha-2.8/voltha-tt-physical-functional-tests.groovy b/jjb/pipeline/voltha/voltha-2.8/voltha-tt-physical-functional-tests.groovy
new file mode 100644
index 0000000..c6cd0d2
--- /dev/null
+++ b/jjb/pipeline/voltha/voltha-2.8/voltha-tt-physical-functional-tests.groovy
@@ -0,0 +1,236 @@
+// Copyright 2017-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+
+node {
+ // Need this so that deployment_config has global scope when it's read later
+ deployment_config = null
+}
+
+pipeline {
+ /* no label, executor is determined by JJB */
+ agent {
+ label "${params.buildNode}"
+ }
+ options {
+ timeout(time: "${timeout}", unit: 'MINUTES')
+ }
+
+ environment {
+ KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
+ VOLTCONFIG="$HOME/.volt/config-minimal"
+ PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ }
+
+ stages {
+ stage('Clone voltha-system-tests') {
+ steps {
+ step([$class: 'WsCleanup'])
+ checkout([
+ $class: 'GitSCM',
+ userRemoteConfigs: [[
+ url: "https://gerrit.opencord.org/voltha-system-tests",
+ refspec: "${volthaSystemTestsChange}"
+ ]],
+ branches: [[ name: "${branch}", ]],
+ extensions: [
+ [$class: 'WipeWorkspace'],
+ [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
+ [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
+ ],
+ ])
+ script {
+ sh(script:"""
+ if [ '${volthaSystemTestsChange}' != '' ] ; then
+ cd $WORKSPACE/voltha-system-tests;
+ git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
+ fi
+ """)
+ }
+ }
+ }
+ // This checkout allows us to show changes in Jenkins
+ // we only do this on master as we don't branch all the repos for all the releases
+ // (we should compute the difference by tracking the container version, not the code)
+ stage('Download All the VOLTHA repos') {
+ when {
+ expression {
+ return "${branch}" == 'master';
+ }
+ }
+ steps {
+ checkout(changelog: true,
+ poll: false,
+ scm: [$class: 'RepoScm',
+ manifestRepositoryUrl: "${params.manifestUrl}",
+ manifestBranch: "${params.branch}",
+ currentBranch: true,
+ destinationDir: 'voltha',
+ forceSync: true,
+ resetFirst: true,
+ quiet: true,
+ jobs: 4,
+ showAllChanges: true]
+ )
+ }
+ }
+ stage ('Initialize') {
+ steps {
+ sh returnStdout: false, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
+ script {
+ deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
+ }
+ sh returnStdout: false, script: """
+ mkdir -p $WORKSPACE/bin
+ bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/bin"
+ cd $WORKSPACE
+ if [ "${params.branch}" == "voltha-2.8" ]; then
+ VOLTCTL_VERSION=1.6.11
+ else
+ VOLTCTL_VERSION=\$(curl -sSL https://api.github.com/repos/opencord/voltctl/releases/latest | jq -r .tag_name | sed -e 's/^v//g')
+ fi
+
+ HOSTOS=\$(uname -s | tr "[:upper:]" "[:lower:"])
+ HOSTARCH=\$(uname -m | tr "[:upper:]" "[:lower:"])
+ if [ \$HOSTARCH == "x86_64" ]; then
+ HOSTARCH="amd64"
+ fi
+ curl -o $WORKSPACE/bin/voltctl -sSL https://github.com/opencord/voltctl/releases/download/v\${VOLTCTL_VERSION}/voltctl-\${VOLTCTL_VERSION}-\${HOSTOS}-\${HOSTARCH}
+ chmod 755 $WORKSPACE/bin/voltctl
+ voltctl version --clientonly
+
+
+ # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
+ # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
+ # We should change this. In the meantime here is a workaround.
+ if [ "${params.branch}" == "master" ]; then
+ set +e
+
+
+ # Remove noise from voltha-core logs
+ voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
+ voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
+ # Remove noise from openolt logs
+ voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
+ voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
+ voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
+ fi
+ """
+ }
+ }
+
+ stage('Functional Tests') {
+ environment {
+ ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
+ ROBOT_FILE="Voltha_TT_PODTests.robot"
+ ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/FunctionalTests"
+ }
+ steps {
+ sh """
+ mkdir -p $ROBOT_LOGS_DIR
+ if ( ${powerSwitch} ); then
+ export ROBOT_MISC_ARGS="--removekeywords wuks -i functionalTT -i PowerSwitch -i sanityTT -i sanityTT-MCAST -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
+ else
+ export ROBOT_MISC_ARGS="--removekeywords wuks -i functionalTT -e PowerSwitch -i sanityTT -i sanityTT-MCAST -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
+ fi
+ make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
+ """
+ }
+ }
+
+ stage('Failure/Recovery Tests') {
+ environment {
+ ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
+ ROBOT_FILE="Voltha_TT_FailureScenarios.robot"
+ ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/FailureScenarios"
+ }
+ steps {
+ sh """
+ mkdir -p $ROBOT_LOGS_DIR
+ if [ ${params.enableMultiUni} = false ]; then
+ if ( ${powerSwitch} ); then
+ export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
+ else
+ export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
+ fi
+ make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
+ fi
+ """
+ }
+ }
+
+ stage('Multi-Tcont Tests') {
+ environment {
+ ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
+ ROBOT_FILE="Voltha_TT_MultiTcontTests.robot"
+ ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/MultiTcontScenarios"
+ ROBOT_TEST_INPUT_FILE="$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-TT-multi-tcont-tests-input.yaml"
+ }
+ steps {
+ sh """
+ mkdir -p $ROBOT_LOGS_DIR
+ if [ ${params.enableMultiUni} = false ]; then
+ if ( ${powerSwitch} ); then
+ export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel} -V $ROBOT_TEST_INPUT_FILE"
+ else
+ export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel} -V $ROBOT_TEST_INPUT_FILE"
+ fi
+ make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
+ fi
+ """
+ }
+ }
+
+ }
+ post {
+ always {
+ getPodsInfo("$WORKSPACE/pods")
+ sh returnStdout: false, script: '''
+ set +e
+
+ # collect logs collected in the Robot Framework StartLogging keyword
+ cd $WORKSPACE
+ gzip *-combined.log || true
+ rm *-combined.log || true
+ '''
+ script {
+ deployment_config.olts.each { olt ->
+ if (olt.type == null || olt.type == "" || olt.type == "openolt") {
+ sh returnStdout: false, script: """
+ sshpass -p ${olt.pass} scp ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
+ sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log # Remove escape sequences
+ """
+ }
+ }
+ }
+ step([$class: 'RobotPublisher',
+ disableArchiveOutput: false,
+ logFileName: '**/log*.html',
+ otherFiles: '',
+ outputFileName: '**/output*.xml',
+ outputPath: 'RobotLogs',
+ passThreshold: 100,
+ reportFileName: '**/report*.html',
+ unstableThreshold: 0,
+ onlyCritical: true
+ ]);
+ archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.tgz,*.txt,pods/*.txt'
+ }
+ }
+}
diff --git a/jjb/pipeline/xos-integration-tests.groovy b/jjb/pipeline/xos-integration-tests.groovy
index 5874c43..42eb8ad 100644
--- a/jjb/pipeline/xos-integration-tests.groovy
+++ b/jjb/pipeline/xos-integration-tests.groovy
@@ -201,7 +201,6 @@
reportFileName: 'RobotLogs/report*.html',
unstableThreshold: 0]);
archiveArtifacts artifacts: '*.log'
- step([$class: 'Mailer', notifyEveryUnstableBuild: true, recipients: "${params.notificationEmail}", sendToIndividuals: false])
}
}
diff --git a/jjb/shell/tagcollisionreject.sh b/jjb/shell/tagcollisionreject.sh
index c1ab545..01772c5 100755
--- a/jjb/shell/tagcollisionreject.sh
+++ b/jjb/shell/tagcollisionreject.sh
@@ -126,9 +126,10 @@
found_parent=true
fi
done
+ fi
# if patch == 0, check that there was a release with MAJOR.MINOR-1.X
- elif [[ "$PATCH" == 0 ]]; then
+ if [[ "$PATCH" == 0 ]]; then
new_minor=$(( $MINOR - 1 ))
parent_version="$MAJOR.$new_minor.x"
for existing_tag in $existing_tags
@@ -138,9 +139,10 @@
found_parent=true
fi
done
+ fi
# if patch != 0 check that there was a release with MAJOR.MINOR.PATCH-1
- elif [[ "$PATCH" != 0 ]]; then
+ if [[ "$PATCH" != 0 ]]; then
new_patch=$(( $PATCH - 1 ))
parent_version="$MAJOR.$MINOR.$new_patch"
for existing_tag in $existing_tags
diff --git a/jjb/software-upgrades.yaml b/jjb/software-upgrades.yaml
index a1110cf..61a4711 100644
--- a/jjb/software-upgrades.yaml
+++ b/jjb/software-upgrades.yaml
@@ -11,59 +11,61 @@
pipeline-script: 'voltha/master/software-upgrades.groovy'
build-node: 'ubuntu18.04-basebuild-8c-15g'
code-branch: 'master'
- aaa-version: '2.4.0.SNAPSHOT'
- aaa-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/aaa-app/2.4.0-SNAPSHOT/aaa-app-2.4.0-20210504.145538-2.oar'
- olt-version: '4.5.0.SNAPSHOT'
- olt-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/olt-app/4.5.0-SNAPSHOT/olt-app-4.5.0-20210514.121228-5.oar'
- dhcpl2relay-version: '2.5.0.SNAPSHOT'
- dhcpl2relay-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/dhcpl2relay-app/2.5.0-SNAPSHOT/dhcpl2relay-app-2.5.0-20210511.123715-4.oar'
- igmpproxy-version: '2.3.0.SNAPSHOT'
- igmpproxy-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/onos-app-igmpproxy-app/2.3.0-SNAPSHOT/onos-app-igmpproxy-app-2.3.0-20210511.123731-3.oar'
- sadis-version: '5.4.0.SNAPSHOT'
- sadis-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/sadis-app/5.4.0-SNAPSHOT/sadis-app-5.4.0-20210504.124302-5.oar'
- mcast-version: '2.4.0.SNAPSHOT'
- mcast-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/mcast-app/2.4.0-SNAPSHOT/mcast-app-2.4.0-20210511.123716-4.oar'
- kafka-version: '2.7.0.SNAPSHOT'
- kafka-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/kafka/2.7.0-SNAPSHOT/kafka-2.7.0-20210504.153949-3.oar'
- adapter-open-olt-image: 'voltha/voltha-openolt-adapter:3.3.3'
- adapter-open-onu-image: 'voltha/voltha-openonu-adapter-go:1.3.0'
- rw-core-image: 'voltha/voltha-rw-core:2.9.1'
- ofagent-image: 'voltha/voltha-ofagent-go:1.6.1'
- onu-image-name: 'software-image.img'
- onu-image-url: 'http://bbsim0:50074/images'
- onu-image-version: 'v1.0.0'
+ aaa-version: '2.4.0'
+ aaa-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/aaa-app/2.4.0/aaa-app-2.4.0.oar'
+ olt-version: '4.5.0'
+ olt-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/olt-app/4.5.0/olt-app-4.5.0.oar'
+ dhcpl2relay-version: '2.5.0'
+ dhcpl2relay-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/dhcpl2relay-app/2.5.0/dhcpl2relay-app-2.5.0.oar'
+ igmpproxy-version: '2.3.0'
+ igmpproxy-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/onos-app-igmpproxy-app/2.3.0/onos-app-igmpproxy-app-2.3.0.oar'
+ sadis-version: '5.4.0'
+ sadis-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/sadis-app/5.4.0/sadis-app-5.4.0.oar'
+ mcast-version: '2.4.0'
+ mcast-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/mcast-app/2.4.0/mcast-app-2.4.0.oar'
+ kafka-version: '2.7.0'
+ kafka-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/kafka/2.7.0/kafka-2.7.0.oar'
+ adapter-open-olt-image: 'voltha/voltha-openolt-adapter:3.6.2'
+ adapter-open-onu-image: 'voltha/voltha-openonu-adapter-go:1.4.0'
+ rw-core-image: 'voltha/voltha-rw-core:2.9.3'
+ ofagent-image: 'voltha/voltha-ofagent-go:1.6.5'
+ onu-image-version: 'BBSM_IMG_00002'
+ onu-image-url: 'http://bbsim0:50074/images/software-image.img'
+ onu-image-vendor: 'BBSM'
+ onu-image-activate-on-success: 'false'
+ onu-image-commit-on-success: 'false'
onu-image-crc: '0'
- onu-image-local-dir: '/tmp'
time-trigger: "H H/23 * * *"
- 'software-upgrades-test':
- name: 'periodic-software-upgrade-test-bbsim-2.7'
- pipeline-script: 'voltha/voltha-2.7/software-upgrades.groovy'
+ name: 'periodic-software-upgrade-test-bbsim-2.8'
+ pipeline-script: 'voltha/voltha-2.8/software-upgrades.groovy'
build-node: 'ubuntu18.04-basebuild-8c-15g'
- code-branch: 'voltha-2.7'
- aaa-version: '2.3.0'
- aaa-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/aaa-app/2.3.0/aaa-app-2.3.0.oar'
- olt-version: '4.4.0'
- olt-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/olt-app/4.4.0/olt-app-4.4.0.oar'
- dhcpl2relay-version: '2.4.0'
- dhcpl2relay-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/dhcpl2relay-app/2.4.0/dhcpl2relay-app-2.4.0.oar'
- igmpproxy-version: '2.2.0'
- igmpproxy-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/onos-app-igmpproxy-app/2.2.0/onos-app-igmpproxy-app-2.2.0.oar'
- sadis-version: '5.3.0'
- sadis-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/sadis-app/5.3.0/sadis-app-5.3.0.oar'
- mcast-version: '2.3.2'
- mcast-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/mcast-app/2.3.2/mcast-app-2.3.2.oar'
- kafka-version: '2.6.0'
- kafka-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/kafka/2.6.0/kafka-2.6.0.oar'
- adapter-open-olt-image: 'voltha/voltha-openolt-adapter:3.1.8'
- adapter-open-onu-image: 'voltha/voltha-openonu-adapter-go:1.2.11'
- rw-core-image: 'voltha/voltha-rw-core:2.7.0'
- ofagent-image: 'voltha/voltha-ofagent-go:1.5.2'
- onu-image-name: 'software-image.img'
- onu-image-url: 'http://bbsim0:50074/images'
- onu-image-version: 'v1.0.0'
+ code-branch: 'voltha-2.8'
+ aaa-version: '2.4.0'
+ aaa-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/aaa-app/2.4.0/aaa-app-2.4.0.oar'
+ olt-version: '4.5.0'
+ olt-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/olt-app/4.5.0/olt-app-4.5.0.oar'
+ dhcpl2relay-version: '2.5.0'
+ dhcpl2relay-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/dhcpl2relay-app/2.5.0/dhcpl2relay-app-2.5.0.oar'
+ igmpproxy-version: '2.3.0'
+ igmpproxy-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/onos-app-igmpproxy-app/2.3.0/onos-app-igmpproxy-app-2.3.0.oar'
+ sadis-version: '5.4.0'
+ sadis-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/sadis-app/5.4.0/sadis-app-5.4.0.oar'
+ mcast-version: '2.4.0'
+ mcast-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/mcast-app/2.4.0/mcast-app-2.4.0.oar'
+ kafka-version: '2.7.0'
+ kafka-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/kafka/2.7.0/kafka-2.7.0.oar'
+ adapter-open-olt-image: 'voltha/voltha-openolt-adapter:3.5.5'
+ adapter-open-onu-image: 'voltha/voltha-openonu-adapter-go:1.3.12'
+ rw-core-image: 'voltha/voltha-rw-core:2.9.3'
+ ofagent-image: 'voltha/voltha-ofagent-go:1.6.5'
+ onu-image-version: 'BBSM_IMG_00002'
+ onu-image-url: 'http://bbsim0:50074/images/software-image.img'
+ onu-image-vendor: 'BBSM'
+ onu-image-activate-on-success: 'false'
+ onu-image-commit-on-success: 'false'
onu-image-crc: '0'
- onu-image-local-dir: '/tmp'
time-trigger: "H H/23 * * *"
- job-template:
@@ -216,9 +218,9 @@
description: 'Voltha Ofagent Component Image'
- string:
- name: onuImageName
- default: '{onu-image-name}'
- description: 'Name of ONU Image to Upgrade'
+ name: onuImageVersion
+ default: '{onu-image-version}'
+ description: 'Version of ONU Image to Upgrade'
- string:
name: onuImageUrl
@@ -226,20 +228,25 @@
description: 'Url of ONU Image to Upgrade'
- string:
- name: onuImageVersion
- default: '{onu-image-version}'
- description: 'Version of ONU Image to Upgrade'
+ name: onuImageVendor
+ default: '{onu-image-vendor}'
+ description: 'Vendor of ONU Image to Upgrade'
+
+ - string:
+ name: onuImageActivateOnSuccess
+ default: '{onu-image-activate-on-success}'
+ description: 'Activate ONU Image'
+
+ - string:
+ name: onuImageCommitOnSuccess
+ default: '{onu-image-commit-on-success}'
+ description: 'Commit ONU Image'
- string:
name: onuImageCrc
default: '{onu-image-crc}'
description: 'CRC of ONU Image to Upgrade'
- - string:
- name: onuImageLocalDir
- default: '{onu-image-local-dir}'
- description: 'Local Dir of ONU Image to Upgrade'
-
project-type: pipeline
concurrent: true
diff --git a/jjb/verify/abstract-olt.yaml b/jjb/verify/abstract-olt.yaml
deleted file mode 100644
index fcd9bff..0000000
--- a/jjb/verify/abstract-olt.yaml
+++ /dev/null
@@ -1,100 +0,0 @@
----
-# verification jobs for 'abstract-olt' repo
-
-- project:
- name: abstract-olt
- project: '{name}'
-
- jobs:
- - 'verify-abstract-olt-jobs':
- branch-regexp: '{supported-branches-regexp}'
-
-- job-group:
- name: 'verify-abstract-olt-jobs'
- jobs:
- - 'verify-licensed'
- - 'abstract-olt-tests':
- dependency-jobs: 'verify_abstract-olt_licensed'
-
-- job-template:
- id: 'abstract-olt-tests'
- name: 'verify_{project}_tests'
-
- description: |
- Created by {id} job-template from ci-management/jjb/verify/abstract-olt.yaml
-
- triggers:
- - cord-infra-gerrit-trigger-patchset:
- gerrit-server-name: '{gerrit-server-name}'
- project-regexp: '^{project}$'
- branch-regexp: '{branch-regexp}'
- dependency-jobs: '{dependency-jobs}'
- file-include-regexp: '{all-files-regexp}'
-
- properties:
- - cord-infra-properties:
- build-days-to-keep: '{build-days-to-keep}'
- artifact-num-to-keep: '{artifact-num-to-keep}'
-
- wrappers:
- - lf-infra-wrappers:
- build-timeout: 20
- jenkins-ssh-credential: '{jenkins-ssh-credential}'
-
- scm:
- - cord-infra-gerrit-scm:
- git-url: '$GIT_URL/$GERRIT_PROJECT'
- refspec: '$GERRIT_REFSPEC'
- branch: '$GERRIT_BRANCH'
- submodule-recursive: 'false'
- choosing-strategy: 'gerrit'
- jenkins-ssh-credential: '{jenkins-ssh-credential}'
- basedir: '{project}'
-
- node: 'ubuntu18.04-basebuild-1c-2g'
- project-type: freestyle
- concurrent: true
-
- builders:
- - shell: |
- #!/usr/bin/env bash
- set -eux -o pipefail
-
- export GOPATH=~/go
- export PATH=$PATH:/usr/lib/go-1.12/bin:/usr/local/go/bin:~/go/bin
-
- # move code the proper location
- mkdir -p $GOPATH/src/gerrit.opencord.org
- mv abstract-olt $GOPATH/src/gerrit.opencord.org/abstract-olt
-
- # get prereqs
- go get -v github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway
- go get -v github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
- go get -v github.com/golang/protobuf/protoc-gen-go
-
- pushd $GOPATH/src/gerrit.opencord.org/abstract-olt
-
- make test
-
- go test -v ./... 2>&1 | go-junit-report > $WORKSPACE/junit-report.xml
-
- go test -coverprofile=coverage.txt -covermode=count ./...
- gocover-cobertura < coverage.txt > $WORKSPACE/coverage.xml
-
- popd
-
-
- publishers:
- - junit:
- results: "junit-report.xml"
- - cobertura:
- report-file: "coverage.xml"
- targets:
- - files:
- healthy: 80
- unhealthy: 0
- failing: 0
- - method:
- healthy: 50
- unhealthy: 0
- failing: 0
diff --git a/jjb/verify/bbsim-sadis-server.yaml b/jjb/verify/bbsim-sadis-server.yaml
index 2ce60d3..fea3a4b 100644
--- a/jjb/verify/bbsim-sadis-server.yaml
+++ b/jjb/verify/bbsim-sadis-server.yaml
@@ -8,9 +8,9 @@
jobs:
- 'verify-bbsim-sadis-server-jobs':
branch-regexp: '{all-branches-regexp}'
- - 'verify-bbsim-sadis-server-jobs-voltha-2.7':
- name-extension: '-voltha-2.7'
- branch-regexp: '{kind-voltha-regexp}'
+ - 'verify-bbsim-sadis-server-jobs-voltha-2.8':
+ name-extension: '-voltha-2.8'
+ branch-regexp: '^voltha-2.8$'
- 'verify-bbsim-sadis-server-jobs-master':
branch-regexp: '^master$'
- 'publish-bbsim-sadis-server-jobs':
@@ -29,16 +29,15 @@
unit-test-keep-going: 'true'
- job-group:
- name: 'verify-bbsim-sadis-server-jobs-voltha-2.7'
+ name: 'verify-bbsim-sadis-server-jobs-voltha-2.8'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.8/bbsim-tests.groovy'
- job-group:
name: 'verify-bbsim-sadis-server-jobs-master'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha/master/bbsim-tests.groovy'
extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
- job-group:
diff --git a/jjb/verify/bbsim.yaml b/jjb/verify/bbsim.yaml
index 37dfbae..3b64b88 100644
--- a/jjb/verify/bbsim.yaml
+++ b/jjb/verify/bbsim.yaml
@@ -8,10 +8,10 @@
jobs:
- 'verify-bbsim-jobs':
branch-regexp: '{all-branches-regexp}'
- - 'verify-bbsim-jobs-voltha-2.7':
- name-extension: '-voltha-2.7'
- override-branch: 'voltha-2.7'
- branch-regexp: '{kind-voltha-regexp}'
+ - 'verify-bbsim-jobs-voltha-2.8':
+ name-extension: '-voltha-2.8'
+ override-branch: 'voltha-2.8'
+ branch-regexp: '^voltha-2.8$'
- 'verify-bbsim-jobs-master':
branch-regexp: '^master$'
- 'publish-bbsim-jobs':
@@ -30,17 +30,42 @@
unit-test-keep-going: 'true'
- job-group:
- name: 'verify-bbsim-jobs-voltha-2.7'
+ name: 'verify-bbsim-jobs-voltha-2.8'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.8/bbsim-tests.groovy'
+ testTargets: |
+ - target: sanity-bbsim-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: sanity-bbsim-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: sanity-bbsim-tt
+ workflow: tt
+ flags: ""
+ teardown: true
- job-group:
name: 'verify-bbsim-jobs-master'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha/master/bbsim-tests.groovy'
extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
+ testTargets: |
+ - target: sanity-bbsim-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: sanity-bbsim-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: sanity-bbsim-tt
+ workflow: tt
+ flags: ""
+ teardown: true
- job-group:
name: 'publish-bbsim-jobs'
diff --git a/jjb/verify/kind-voltha.yaml b/jjb/verify/kind-voltha.yaml
deleted file mode 100644
index 8b43798..0000000
--- a/jjb/verify/kind-voltha.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-# verification jobs for 'kind-voltha' repo
-
-- project:
- name: kind-voltha
- project: '{name}'
-
- jobs:
- - 'verify-kind-voltha-jobs':
- branch-regexp: '{all-branches-regexp}'
-
-- job-group:
- name: 'verify-kind-voltha-jobs'
- jobs:
- - 'verify-licensed'
- - 'tag-collision-reject'
- - 'make-unit-test':
- unit-test-targets: 'test'
- junit-allow-empty-results: true
- - 'voltha-patch-test':
- pipeline-script: 'voltha/voltha-2.7/voltha-bbsim-tests.groovy'
- name-extension: '-2.7'
- override-branch: 'voltha-2.7'
- kindVolthaChange: '$GERRIT_REFSPEC'
diff --git a/jjb/verify/ofagent-go.yaml b/jjb/verify/ofagent-go.yaml
index 9483d6e..c629c6b 100644
--- a/jjb/verify/ofagent-go.yaml
+++ b/jjb/verify/ofagent-go.yaml
@@ -8,9 +8,9 @@
jobs:
- 'verify-ofagent-go-jobs':
branch-regexp: '{all-branches-regexp}'
- - 'verify-ofagent-jobs-voltha-2.7':
- name-extension: '-voltha-2.7'
- override-branch: 'voltha-2.7'
+ - 'verify-ofagent-jobs-voltha-2.8':
+ name-extension: '-voltha-2.8'
+ override-branch: 'voltha-2.8'
branch-regexp: '{kind-voltha-regexp}'
- 'verify-ofagent-jobs-master':
branch-regexp: '^master$'
@@ -31,16 +31,15 @@
junit-allow-empty-results: true
- job-group:
- name: 'verify-ofagent-jobs-voltha-2.7'
+ name: 'verify-ofagent-jobs-voltha-2.8'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.8/bbsim-tests.groovy'
- job-group:
name: 'verify-ofagent-jobs-master'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha/master/bbsim-tests.groovy'
extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
- job-group:
diff --git a/jjb/verify/ofagent-py.yaml b/jjb/verify/ofagent-py.yaml
deleted file mode 100644
index 59a231a..0000000
--- a/jjb/verify/ofagent-py.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-# verification jobs for 'ofagent-py' repo
-
-# NOTE ofagent-py is unmaintained, should we keep running tests?
-
-- project:
- name: ofagent-py
- project: '{name}'
-
- jobs:
- - 'verify-ofagent-py-jobs':
- branch-regexp: '{all-branches-regexp}'
- - 'publish-ofagent-py-jobs':
- branch-regexp: '{all-branches-regexp}'
-
-- job-group:
- name: 'verify-ofagent-py-jobs'
- jobs:
- - 'verify-licensed'
- - 'tag-collision-reject'
- - 'make-unit-test':
- unit-test-keep-going: 'true'
- junit-allow-empty-results: true
- - 'voltha-patch-test':
- pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
-
-- job-group:
- name: 'publish-ofagent-py-jobs'
- jobs:
- - 'docker-publish':
- docker-repo: 'voltha'
- dependency-jobs: 'version-tag'
diff --git a/jjb/verify/omci-sim.yaml b/jjb/verify/omci-sim.yaml
deleted file mode 100644
index 7d60783..0000000
--- a/jjb/verify/omci-sim.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-# verification jobs for 'omci-sim' repo
-
-- project:
- name: omci-sim
- project: '{name}'
-
- jobs:
- - 'verify-omci-sim-jobs':
- branch-regexp: '{all-branches-regexp}'
-
-- job-group:
- name: 'verify-omci-sim-jobs'
- jobs:
- - 'verify-licensed'
- - 'tag-collision-reject':
- dependency-jobs: 'verify_omci-sim_licensed'
-# - 'make-unit-test':
-# dest-gopath: "github.com/opencord"
diff --git a/jjb/verify/onos-classic-helm-utils.yaml b/jjb/verify/onos-classic-helm-utils.yaml
new file mode 100644
index 0000000..6fdeaa1
--- /dev/null
+++ b/jjb/verify/onos-classic-helm-utils.yaml
@@ -0,0 +1,30 @@
+---
+# verification jobs for 'onos-classic-helm-utils' repo
+
+- project:
+ name: onos-classic-helm-utils
+ project: '{name}'
+
+ jobs:
+ - 'verify-onos-classic-helm-utils-jobs':
+ branch-regexp: '{all-branches-regexp}'
+ - 'publish-onos-classic-helm-utils-jobs'
+
+- job-group:
+ name: 'verify-onos-classic-helm-utils-jobs'
+ jobs:
+ - 'verify-licensed'
+ - 'tag-collision-reject':
+ dependency-jobs: 'verify_onos-classic-helm-utils_licensed'
+ - 'make-unit-test':
+ build-timeout: 30
+ unit-test-targets: 'docker-build'
+ junit-allow-empty-results: true
+
+- job-group:
+ name: 'publish-onos-classic-helm-utils-jobs'
+ jobs:
+ - 'docker-publish':
+ build-timeout: 30
+ docker-repo: 'opencord'
+ dependency-jobs: 'version-tag'
diff --git a/jjb/verify/openolt.yaml b/jjb/verify/openolt.yaml
index 04ae7b4..c2fd142 100644
--- a/jjb/verify/openolt.yaml
+++ b/jjb/verify/openolt.yaml
@@ -41,12 +41,6 @@
build-days-to-keep: '{build-days-to-keep}'
artifact-num-to-keep: '{artifact-num-to-keep}'
- parameters:
- - string:
- name: notificationEmail
- default: '$GERRIT_EVENT_ACCOUNT_EMAIL'
- description: 'Verification failure of patch $GERRIT_CHANGE_NUMBER to {project} repo'
-
node: 'openolt_deb_onf_agent'
project-type: pipeline
concurrent: true
diff --git a/jjb/verify/ponsim.yaml b/jjb/verify/ponsim.yaml
deleted file mode 100644
index 527c0cd..0000000
--- a/jjb/verify/ponsim.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-# verification jobs for 'ponsim' repo
-
-- project:
- name: ponsim
- project: '{name}'
-
- jobs:
- - 'verify-ponsim-jobs':
- branch-regexp: '{all-branches-regexp}'
- - 'publish-ponsim-jobs':
- branch-regexp: '{all-branches-regexp}'
-
-- job-group:
- name: 'verify-ponsim-jobs'
- jobs:
- - 'verify-licensed'
- - 'tag-collision-reject':
- dependency-jobs: 'verify_ponsim_licensed'
- - 'make-unit-test':
- build-node: 'ubuntu18.04-basebuild-1c-2g'
- dest-gopath: "github.com/opencord"
- unit-test-targets: 'test docker-build'
- unit-test-keep-going: 'true'
-
-- job-group:
- name: 'publish-ponsim-jobs'
- jobs:
- - 'docker-publish':
- docker-repo: 'voltha'
- dependency-jobs: 'version-tag'
diff --git a/jjb/verify/pyvoltha.yaml b/jjb/verify/pyvoltha.yaml
deleted file mode 100644
index 70cfc52..0000000
--- a/jjb/verify/pyvoltha.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-# verification jobs for 'pyvoltha' repo
-
-- project:
- name: pyvoltha
- project: '{name}'
-
- jobs:
- - 'verify-pyvoltha-jobs':
- branch-regexp: '{all-branches-regexp}'
- - 'publish-pyvoltha-jobs':
- branch-regexp: '{all-branches-regexp}'
- pypi-index: 'pypi'
-
-- job-group:
- name: 'verify-pyvoltha-jobs'
- jobs:
- - 'verify-licensed'
- - 'tag-collision-reject':
- dependency-jobs: 'verify_pyvoltha_licensed'
- - 'python-unit-test':
- dependency-jobs: 'verify_pyvoltha_tag-collision'
-
-
-- job-group:
- name: 'publish-pyvoltha-jobs'
- jobs:
- - 'pypi-publish':
- project-regexp: '^{name}$'
- dependency-jobs: 'version-tag'
diff --git a/jjb/verify/voltctl.yaml b/jjb/verify/voltctl.yaml
index 220e0ce..e4821de 100644
--- a/jjb/verify/voltctl.yaml
+++ b/jjb/verify/voltctl.yaml
@@ -25,7 +25,6 @@
unit-test-keep-going: 'true'
dependency-jobs: 'verify_voltctl_tag-collision'
- 'voltha-patch-test':
- pipeline-script: 'voltha/master/bbsim-tests.groovy'
extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
- job-group:
diff --git a/jjb/verify/voltha-api-server.yaml b/jjb/verify/voltha-api-server.yaml
index fe82286..0dd61e6 100644
--- a/jjb/verify/voltha-api-server.yaml
+++ b/jjb/verify/voltha-api-server.yaml
@@ -26,7 +26,7 @@
unit-test-keep-going: 'true'
junit-allow-empty-results: true
- 'voltha-patch-test':
- pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.8/bbsim-tests.groovy'
- job-group:
name: 'publish-voltha-api-server-jobs'
diff --git a/jjb/verify/voltha-bbsim.yaml b/jjb/verify/voltha-bbsim.yaml
deleted file mode 100644
index 5cdf7ad..0000000
--- a/jjb/verify/voltha-bbsim.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-# verification jobs for 'voltha-bbsim' repo
-
-- project:
- name: voltha-bbsim
- project: '{name}'
-
- jobs:
- - 'verify-voltha-bbsim-jobs':
- branch-regexp: '{all-branches-regexp}'
- - 'publish-voltha-bbsim-jobs':
- branch-regexp: '{all-branches-regexp}'
-
-- job-group:
- name: 'verify-voltha-bbsim-jobs'
- jobs:
- - 'verify-licensed'
- - 'tag-collision-reject':
- dependency-jobs: 'verify_voltha-bbsim_licensed'
- - 'make-unit-test':
- unit-test-targets: 'test'
- dest-gopath: "github.com/opencord"
- junit-allow-empty-results: true
- build-node: 'ubuntu18.04-basebuild-1c-2g'
-
-- job-group:
- name: 'publish-voltha-bbsim-jobs'
- jobs:
- - 'docker-publish':
- maintainers: "teo@opennetworking.org"
- build-timeout: 30
- docker-repo: 'voltha'
- dependency-jobs: 'version-tag'
diff --git a/jjb/verify/voltha-docs.yaml b/jjb/verify/voltha-docs.yaml
index 904ff98..b2d3187 100644
--- a/jjb/verify/voltha-docs.yaml
+++ b/jjb/verify/voltha-docs.yaml
@@ -28,3 +28,4 @@
build-output-path: '_build/multiversion/'
sync-target-server: 'guide.opencord.org'
sync-target-path: '/var/www/voltha-docs/'
+ build-timeout: 60
diff --git a/jjb/verify/voltha-go.yaml b/jjb/verify/voltha-go.yaml
index e4a4576..05125b7 100644
--- a/jjb/verify/voltha-go.yaml
+++ b/jjb/verify/voltha-go.yaml
@@ -8,10 +8,10 @@
jobs:
- 'verify-voltha-go-jobs':
branch-regexp: '{all-branches-regexp}'
- - 'verify-voltha-go-jobs-voltha-2.7':
- name-extension: '-voltha-2.7'
- override-branch: 'voltha-2.7'
- branch-regexp: '{kind-voltha-regexp}'
+ - 'verify-voltha-go-jobs-voltha-2.8':
+ name-extension: '-voltha-2.8'
+ override-branch: 'voltha-2.8'
+ branch-regexp: '^voltha-2.8$'
- 'verify-voltha-go-jobs-master':
branch-regexp: '^master$'
- 'publish-voltha-go-jobs':
@@ -39,16 +39,15 @@
unit-test-keep-going: 'true'
- job-group:
- name: 'verify-voltha-go-jobs-voltha-2.7'
+ name: 'verify-voltha-go-jobs-voltha-2.8'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.8/bbsim-tests.groovy'
- job-group:
name: 'verify-voltha-go-jobs-master'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha/master/bbsim-tests.groovy'
extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
- job-group:
diff --git a/jjb/verify/voltha-helm-charts.yaml b/jjb/verify/voltha-helm-charts.yaml
index ebe7c50..0f3a58c 100644
--- a/jjb/verify/voltha-helm-charts.yaml
+++ b/jjb/verify/voltha-helm-charts.yaml
@@ -8,10 +8,10 @@
jobs:
- 'verify-voltha-helm-charts-jobs':
branch-regexp: '{all-branches-regexp}'
- - 'verify-voltha-helm-charts-jobs-voltha-2.7':
- name-extension: '-voltha-2.7'
- override-branch: 'voltha-2.7'
- branch-regexp: '{kind-voltha-regexp}'
+ - 'verify-voltha-helm-charts-jobs-voltha-2.8':
+ name-extension: '-voltha-2.8'
+ override-branch: 'voltha-2.8'
+ branch-regexp: '^voltha-2.8$'
- 'verify-voltha-helm-charts-jobs-master':
branch-regexp: '^master$'
@@ -25,13 +25,12 @@
dependency-jobs: 'verify_voltha-helm-charts_tag-collision'
- job-group:
- name: 'verify-voltha-helm-charts-jobs-voltha-2.7'
+ name: 'verify-voltha-helm-charts-jobs-voltha-2.8'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.8/bbsim-tests.groovy'
- job-group:
name: 'verify-voltha-helm-charts-jobs-master'
jobs:
- - 'voltha-patch-test':
- pipeline-script: 'voltha/master/bbsim-tests.groovy'
+ - 'voltha-patch-test'
diff --git a/jjb/verify/voltha-onos.yaml b/jjb/verify/voltha-onos.yaml
index d708a43..aef2900 100644
--- a/jjb/verify/voltha-onos.yaml
+++ b/jjb/verify/voltha-onos.yaml
@@ -8,10 +8,10 @@
jobs:
- 'verify-voltha-onos-jobs':
branch-regexp: '{all-branches-regexp}'
- - 'verify-voltha-onos-jobs-voltha-2.7':
- name-extension: '-voltha-2.7'
- override-branch: 'voltha-2.7'
- branch-regexp: '{kind-voltha-regexp}'
+ - 'verify-voltha-onos-jobs-voltha-2.8':
+ name-extension: '-voltha-2.8'
+ override-branch: 'voltha-2.8'
+ branch-regexp: '^voltha-2.8$'
- 'verify-voltha-onos-jobs-master':
branch-regexp: '^master$'
- 'publish-voltha-onos-jobs':
@@ -25,17 +25,21 @@
dependency-jobs: 'verify_voltha-onos_licensed'
- job-group:
- name: 'verify-voltha-onos-jobs-voltha-2.7'
+ name: 'verify-voltha-onos-jobs-voltha-2.8'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.8/bbsim-tests.groovy'
- job-group:
name: 'verify-voltha-onos-jobs-master'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha/master/bbsim-tests.groovy'
extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
+ # the unit test job will fail on all the branches that are not "master" as the "make test"
+ # target was not available
+ - 'make-unit-test':
+ unit-test-targets: 'test'
+ junit-allow-empty-results: true
- job-group:
name: 'publish-voltha-onos-jobs'
diff --git a/jjb/verify/voltha-openolt-adapter.yaml b/jjb/verify/voltha-openolt-adapter.yaml
index ff3587b..0f98077 100644
--- a/jjb/verify/voltha-openolt-adapter.yaml
+++ b/jjb/verify/voltha-openolt-adapter.yaml
@@ -8,10 +8,10 @@
jobs:
- 'verify-voltha-openolt-adapter-jobs':
branch-regexp: '{all-branches-regexp}'
- - 'verify-voltha-openolt-adapter-jobs-voltha-2.7':
- name-extension: '-voltha-2.7'
- override-branch: 'voltha-2.7'
- branch-regexp: '{kind-voltha-regexp}'
+ - 'verify-voltha-openolt-adapter-jobs-voltha-2.8':
+ name-extension: '-voltha-2.8'
+ override-branch: 'voltha-2.8'
+ branch-regexp: '^voltha-2.8$'
- 'verify-voltha-openolt-adapter-jobs-master':
branch-regexp: '^master$'
- 'publish-voltha-openolt-adapter-jobs':
@@ -39,16 +39,15 @@
build-node: 'ubuntu18.04-basebuild-2c-4g'
- job-group:
- name: 'verify-voltha-openolt-adapter-jobs-voltha-2.7'
+ name: 'verify-voltha-openolt-adapter-jobs-voltha-2.8'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.8/bbsim-tests.groovy'
- job-group:
name: 'verify-voltha-openolt-adapter-jobs-master'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha/master/bbsim-tests.groovy'
extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
- job-group:
diff --git a/jjb/verify/voltha-openonu-adapter-go.yaml b/jjb/verify/voltha-openonu-adapter-go.yaml
index e4e67e3..efcef9f 100644
--- a/jjb/verify/voltha-openonu-adapter-go.yaml
+++ b/jjb/verify/voltha-openonu-adapter-go.yaml
@@ -8,10 +8,10 @@
jobs:
- 'verify-voltha-openonu-adapter-go-jobs':
branch-regexp: '{all-branches-regexp}'
- - 'verify-voltha-openonu-adapter-go-jobs-voltha-2.7':
- name-extension: '-voltha-2.7'
- override-branch: 'voltha-2.7'
- branch-regexp: '{kind-voltha-regexp}'
+ - 'verify-voltha-openonu-adapter-go-jobs-voltha-2.8':
+ name-extension: '-voltha-2.8'
+ override-branch: 'voltha-2.8'
+ branch-regexp: '^voltha-2.8$'
- 'verify-voltha-openonu-adapter-go-jobs-master':
branch-regexp: '^master$'
- 'publish-voltha-openonu-adapter-go-jobs':
@@ -38,17 +38,33 @@
build-node: 'ubuntu18.04-basebuild-2c-4g'
- job-group:
- name: 'verify-voltha-openonu-adapter-go-jobs-voltha-2.7'
+ name: 'verify-voltha-openonu-adapter-go-jobs-voltha-2.8'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.8/bbsim-tests.groovy'
- job-group:
name: 'verify-voltha-openonu-adapter-go-jobs-master'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha/master/bbsim-tests.groovy'
extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
+ testTargets: |
+ - target: sanity-kind-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: sanity-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: sanity-kind-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+ - target: 1t4gem-openonu-go-adapter-test
+ workflow: att
+ flags: ""
+ teardown: true
- job-group:
name: 'publish-voltha-openonu-adapter-go-jobs'
diff --git a/jjb/verify/voltha-openonu-adapter.yaml b/jjb/verify/voltha-openonu-adapter.yaml
deleted file mode 100644
index 21a1968..0000000
--- a/jjb/verify/voltha-openonu-adapter.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-# verification jobs for 'voltha-openonu-adapter' repo
-# NOTE this component is deprecated, should we keep running the tests?
-
-- project:
- name: voltha-openonu-adapter
- project: '{name}'
-
- jobs:
- - 'verify-voltha-openonu-adapter-jobs':
- branch-regexp: '{all-branches-regexp}'
- - 'verify-voltha-openonu-adapter-jobs-voltha-2.7':
- name-extension: '-voltha-2.7'
- override-branch: 'voltha-2.7'
- branch-regexp: '{kind-voltha-regexp}'
- - 'verify-voltha-openonu-adapter-jobs-master':
- branch-regexp: '^master$'
- - 'publish-voltha-openonu-adapter-jobs':
- branch-regexp: '{all-branches-regexp}'
-
-- job-group:
- name: 'verify-voltha-openonu-adapter-jobs'
- jobs:
- - 'verify-licensed'
- - 'tag-collision-reject':
- dependency-jobs: 'verify_voltha-openonu-adapter_licensed'
- - 'make-unit-test':
- unit-test-targets: 'test'
- unit-test-keep-going: 'true'
- junit-allow-empty-results: true
- build-timeout: 15
- - 'make-sca':
- unit-test-targets: 'sca'
- unit-test-keep-going: 'true'
- junit-allow-empty-results: true
- build-timeout: 15
-
-- job-group:
- name: 'verify-voltha-openonu-adapter-jobs-voltha-2.7'
- jobs:
- - 'voltha-patch-test':
- pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
-
-- job-group:
- name: 'verify-voltha-openonu-adapter-jobs-master'
- jobs:
- - 'voltha-patch-test':
- pipeline-script: 'voltha/master/bbsim-tests.groovy'
- extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
-
-- job-group:
- name: 'publish-voltha-openonu-adapter-jobs'
- jobs:
- - 'docker-publish':
- build-timeout: 30
- docker-repo: 'voltha'
- dependency-jobs: 'version-tag'
diff --git a/jjb/verify/voltha-ponsimolt-adapter.yaml b/jjb/verify/voltha-ponsimolt-adapter.yaml
deleted file mode 100644
index 052107d..0000000
--- a/jjb/verify/voltha-ponsimolt-adapter.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-# verification jobs for 'voltha-ponsimolt-adapter' repo
-
-- project:
- name: voltha-ponsimolt-adapter
- project: '{name}'
-
- jobs:
- - 'verify-voltha-ponsimolt-adapter-jobs':
- branch-regexp: '{all-branches-regexp}'
- - 'publish-voltha-ponsimolt-adapter-jobs':
- branch-regexp: '{all-branches-regexp}'
-
-- job-group:
- name: 'verify-voltha-ponsimolt-adapter-jobs'
- jobs:
- - 'verify-licensed'
- - 'tag-collision-reject':
- dependency-jobs: 'verify_voltha-ponsimolt-adapter_licensed'
- - 'make-unit-test':
- dest-gopath: "github.com/opencord"
- unit-test-keep-going: 'true'
-
-- job-group:
- name: 'publish-voltha-ponsimolt-adapter-jobs'
- jobs:
- - 'docker-publish':
- docker-repo: 'voltha'
- dependency-jobs: 'version-tag'
diff --git a/jjb/verify/voltha-ponsimonu-adapter.yaml b/jjb/verify/voltha-ponsimonu-adapter.yaml
deleted file mode 100644
index 3f83d09..0000000
--- a/jjb/verify/voltha-ponsimonu-adapter.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-# verification jobs for 'voltha-ponsimonu-adapter' repo
-
-- project:
- name: voltha-ponsimonu-adapter
- project: '{name}'
-
- jobs:
- - 'verify-voltha-ponsimonu-adapter-jobs':
- branch-regexp: '{all-branches-regexp}'
- - 'publish-voltha-ponsimonu-adapter-jobs':
- branch-regexp: '{all-branches-regexp}'
-
-- job-group:
- name: 'verify-voltha-ponsimonu-adapter-jobs'
- jobs:
- - 'verify-licensed'
- - 'tag-collision-reject':
- dependency-jobs: 'verify_voltha-ponsimonu-adapter_licensed'
- - 'make-unit-test':
- dest-gopath: "github.com/opencord"
- unit-test-keep-going: 'true'
-
-- job-group:
- name: 'publish-voltha-ponsimonu-adapter-jobs'
- jobs:
- - 'docker-publish':
- docker-repo: 'voltha'
- dependency-jobs: 'version-tag'
diff --git a/jjb/verify/voltha-python-base.yaml b/jjb/verify/voltha-python-base.yaml
deleted file mode 100644
index ce03fb3..0000000
--- a/jjb/verify/voltha-python-base.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-# verification jobs for 'voltha-python-base' repo
-
-- project:
- name: voltha-python-base
- project: '{name}'
-
- jobs:
- - 'verify-voltha-python-base-jobs':
- branch-regexp: '{all-branches-regexp}'
- - 'publish-voltha-python-base-jobs'
-
-- job-group:
- name: 'verify-voltha-python-base-jobs'
- jobs:
- - 'verify-licensed'
- - 'tag-collision-reject':
- dependency-jobs: 'verify_voltha-python-base_licensed'
- - 'make-unit-test':
- build-timeout: 30
- unit-test-targets: 'docker-build'
- junit-allow-empty-results: true
-
-- job-group:
- name: 'publish-voltha-python-base-jobs'
- jobs:
- - 'docker-publish':
- build-timeout: 30
- docker-repo: 'voltha'
- dependency-jobs: 'version-tag'
diff --git a/jjb/verify/voltha-simolt-adapter.yaml b/jjb/verify/voltha-simolt-adapter.yaml
deleted file mode 100644
index 41afcac..0000000
--- a/jjb/verify/voltha-simolt-adapter.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-# verification jobs for 'voltha-simolt-adapter' repo
-
-- project:
- name: voltha-simolt-adapter
- project: '{name}'
-
- jobs:
- - 'verify-voltha-simolt-adapter-jobs':
- branch-regexp: '{all-branches-regexp}'
- - 'publish-voltha-simolt-adapter-jobs':
- branch-regexp: '{all-branches-regexp}'
-
-- job-group:
- name: 'verify-voltha-simolt-adapter-jobs'
- jobs:
- - 'verify-licensed'
- - 'tag-collision-reject':
- dependency-jobs: 'verify_voltha-simolt-adapter_licensed'
- - 'make-unit-test':
- dest-gopath: "github.com/opencord"
- unit-test-keep-going: 'true'
-
-- job-group:
- name: 'publish-voltha-simolt-adapter-jobs'
- jobs:
- - 'docker-publish':
- docker-repo: 'voltha'
- dependency-jobs: 'version-tag'
diff --git a/jjb/verify/voltha-simonu-adapter.yaml b/jjb/verify/voltha-simonu-adapter.yaml
deleted file mode 100644
index 29aa572..0000000
--- a/jjb/verify/voltha-simonu-adapter.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-# verification jobs for 'voltha-simonu-adapter' repo
-
-- project:
- name: voltha-simonu-adapter
- project: '{name}'
-
- jobs:
- - 'verify-voltha-simonu-adapter-jobs':
- branch-regexp: '{all-branches-regexp}'
- - 'publish-voltha-simonu-adapter-jobs':
- branch-regexp: '{all-branches-regexp}'
-
-- job-group:
- name: 'verify-voltha-simonu-adapter-jobs'
- jobs:
- - 'verify-licensed'
- - 'tag-collision-reject':
- dependency-jobs: 'verify_voltha-simonu-adapter_licensed'
- - 'make-unit-test':
- dest-gopath: "github.com/opencord"
- unit-test-keep-going: 'true'
-
-- job-group:
- name: 'publish-voltha-simonu-adapter-jobs'
- jobs:
- - 'docker-publish':
- docker-repo: 'voltha'
- dependency-jobs: 'version-tag'
diff --git a/jjb/verify/voltha-system-tests.yaml b/jjb/verify/voltha-system-tests.yaml
index b385e2f..003d28e 100644
--- a/jjb/verify/voltha-system-tests.yaml
+++ b/jjb/verify/voltha-system-tests.yaml
@@ -9,11 +9,11 @@
- 'verify-voltha-system-tests-jobs':
build-node: 'ubuntu18.04-basebuild-4c-8g'
branch-regexp: '{all-branches-regexp}'
- - 'verify-voltha-system-tests-jobs-voltha-2.7':
+ - 'verify-voltha-system-tests-jobs-voltha-2.8':
build-node: 'ubuntu18.04-basebuild-4c-8g'
- name-extension: '-voltha-2.7'
- override-branch: 'voltha-2.7'
- branch-regexp: '{kind-voltha-regexp}'
+ name-extension: '-voltha-2.8'
+ override-branch: 'voltha-2.8'
+ branch-regexp: '^voltha-2.8$'
- 'verify-voltha-system-tests-jobs-master':
build-node: 'ubuntu18.04-basebuild-4c-8g'
branch-regexp: '^master$'
@@ -30,14 +30,13 @@
junit-allow-empty-results: true
- job-group:
- name: 'verify-voltha-system-tests-jobs-voltha-2.7'
+ name: 'verify-voltha-system-tests-jobs-voltha-2.8'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.8/bbsim-tests.groovy'
- job-group:
name: 'verify-voltha-system-tests-jobs-master'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha/master/bbsim-tests.groovy'
extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
diff --git a/jjb/verify/voltha.yaml b/jjb/verify/voltha.yaml
deleted file mode 100644
index 6eede8d..0000000
--- a/jjb/verify/voltha.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-# verification jobs for 'xos' repo
-
-- project:
- name: voltha
- project: '{name}'
-
- jobs:
- - 'verify-voltha-jobs':
- branch-regexp: '^(master|voltha-.*)$'
-
-- job-group:
- name: 'verify-voltha-jobs'
- jobs:
- - 'verify-licensed'
- - 'voltha-unit-test':
- dependency-jobs: 'verify_voltha_licensed'
-
diff --git a/jjb/voltha-e2e.yaml b/jjb/voltha-e2e.yaml
index 958cd22..68a7b3f 100755
--- a/jjb/voltha-e2e.yaml
+++ b/jjb/voltha-e2e.yaml
@@ -45,6 +45,44 @@
teardown: false
- 'voltha-periodic-test':
+ name: 'periodic-voltha-test-bbsim-grpc'
+ code-branch: 'master'
+ volthaHelmChartsChange: refs/changes/69/26569/7
+ extraHelmFlags: >
+ --set global.image_tag=master
+ --set onos-classic.image.repository=andreacampanella/voltha-onos --set onos-classic.image.tag=port-remove
+ --set voltha.images.rw_core.repository=dbainbriciena/voltha-rw-core
+ --set voltha.images.rw_core.tag=grpc009
+ --set voltha-adapter-openonu.images.adapter_open_onu_go.repository=dbainbriciena/voltha-openonu-adapter-go
+ --set voltha-adapter-openonu.images.adapter_open_onu_go.tag=grpc009
+ --set voltha-adapter-openolt.images.adapter_open_olt.repository=dbainbriciena/voltha-openolt-adapter
+ --set voltha-adapter-openolt.images.adapter_open_olt.tag=grpc009
+ --set voltha-adapter-openolt.adapter_open_olt.endpoints.core_endpoint=voltha-voltha-core.voltha.svc:55558
+ --set voltha-adapter-openolt.adapter_open_olt.endpoints.adapter_endpoint=voltha-voltha-adapter-openolt-api.voltha.svc:50060
+ --set voltha-adapter-openonu.adapter_open_onu.endpoints.core_endpoint=voltha-voltha-core.voltha.svc:55558
+ --set voltha-adapter-openonu.adapter_open_onu.endpoints.adapter_endpoint=voltha-voltha-adapter-openonu-api.voltha.svc:50060
+ --set services.kafka.cluster.address=voltha-infra-kafka.default.svc:9092
+ --set services.etcd.address=voltha-infra-etcd.default.svc:2379
+ time-trigger: "H H/23 * * *"
+ testTargets: |
+ - target: functional-single-kind
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: bbsim-alarms-kind
+ workflow: att
+ flags: ""
+ teardown: false
+ - target: bbsim-failurescenarios
+ workflow: att
+ flags: ""
+ teardown: false
+ - target: bbsim-errorscenarios
+ workflow: att
+ flags: ""
+ teardown: false
+
+ - 'voltha-periodic-test':
name: 'periodic-voltha-multiple-olts-test-bbsim'
code-branch: 'master'
olts: 2
@@ -68,23 +106,221 @@
flags: ""
teardown: false
- - 'voltha-periodic-test-kind-voltha-based':
- name: 'periodic-voltha-multiple-olts-test-bbsim-2.7'
- pipeline-script: 'voltha/voltha-2.7/voltha-nightly-tests-bbsim.groovy'
- build-node: 'qct-pod4-node2'
- make-target: functional-multi-olt
- make-target-failtest: bbsim-multiolt-failurescenarios
- make-target-errortest: bbsim-multiolt-errorscenarios
- make-target-alarmtest: bbsim-alarms-kind
- make-target-multipleolt: bbsim-multiolt-kind
- withAlarms: false
- code-branch: 'voltha-2.7'
+ - 'voltha-periodic-test':
+ name: 'periodic-voltha-test-bbsim-2.8'
+ pipeline-script: 'voltha/voltha-2.8/bbsim-tests.groovy'
+ code-branch: 'voltha-2.8'
+ time-trigger: "H H/23 * * *"
+ testTargets: |
+ - target: functional-single-kind
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: bbsim-alarms-kind
+ workflow: att
+ flags: ""
+ teardown: false
+ - target: bbsim-failurescenarios
+ workflow: att
+ flags: ""
+ teardown: false
+ - target: bbsim-errorscenarios
+ workflow: att
+ flags: ""
+ teardown: false
+
+ - 'voltha-periodic-test':
+ name: 'periodic-voltha-multiple-olts-test-bbsim-2.8'
+ pipeline-script: 'voltha/voltha-2.8/bbsim-tests.groovy'
+ code-branch: 'voltha-2.8'
olts: 2
- onus: 2
- pons: 2
- time-trigger: "H H * * *"
+ extraHelmFlags: '--set onu=2,pon=2'
+ time-trigger: "H H/23 * * *"
+ testTargets: |
+ - target: functional-multi-olt
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: bbsim-multiolt-failurescenarios
+ workflow: att
+ flags: ""
+ teardown: false
+ - target: bbsim-multiolt-errorscenarios
+ workflow: att
+ flags: ""
+ teardown: false
+ - target: bbsim-multiolt-kind
+ workflow: att
+ flags: ""
+ teardown: false
+
+ - 'voltha-periodic-test':
+ name: 'periodic-voltha-multi-uni-test-bbsim'
+ code-branch: 'master'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master --set voltha-adapter-openonu.adapter_open_onu.uni_port_mask=0x00FF'
+ time-trigger: "H H/23 * * *"
+ testTargets: |
+ - target: functional-single-kind-multiuni-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: bbsim-multiuni-failurescenarios-att
+ workflow: att
+ flags: ""
+ teardown: false
+ - target: bbsim-multiuni-errorscenarios-att
+ workflow: att
+ flags: ""
+ teardown: false
+
+ - 'voltha-periodic-test':
+ name: 'periodic-voltha-multi-uni-multiple-olts-test-bbsim'
+ code-branch: 'master'
+ olts: 2
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master --set onu=2,pon=2 --set voltha-adapter-openonu.adapter_open_onu.uni_port_mask=0x00FF'
+ time-trigger: "H H/23 * * *"
+ testTargets: |
+ - target: functional-multiuni-multiolt-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: bbsim-multiuni-multiolt-failurescenarios-att
+ workflow: att
+ flags: ""
+ teardown: false
+ - target: bbsim-multiuni-multiolt-errorscenarios-att
+ workflow: att
+ flags: ""
+ teardown: false
+ timeout: 180
+
+ - 'voltha-periodic-test':
+ name: 'periodic-voltha-multi-uni-test-bbsim-2.8'
+ pipeline-script: 'voltha/voltha-2.8/bbsim-tests.groovy'
+ code-branch: 'voltha-2.8'
+ extraHelmFlags: '--set voltha-adapter-openonu.adapter_open_onu.uni_port_mask=0x00FF'
+ time-trigger: "H H/23 * * *"
+ testTargets: |
+ - target: functional-single-kind-multiuni-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: bbsim-multiuni-failurescenarios-att
+ workflow: att
+ flags: ""
+ teardown: false
+ - target: bbsim-multiuni-errorscenarios-att
+ workflow: att
+ flags: ""
+ teardown: false
+
+ - 'voltha-periodic-test':
+ name: 'periodic-voltha-multi-uni-multiple-olts-test-bbsim-2.8'
+ pipeline-script: 'voltha/voltha-2.8/bbsim-tests.groovy'
+ code-branch: 'voltha-2.8'
+ olts: 2
+ extraHelmFlags: '--set onu=2,pon=2 --set voltha-adapter-openonu.adapter_open_onu.uni_port_mask=0x00FF'
+ time-trigger: "H H/23 * * *"
+ testTargets: |
+ - target: functional-multiuni-multiolt-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: bbsim-multiuni-multiolt-failurescenarios-att
+ workflow: att
+ flags: ""
+ teardown: false
+ - target: bbsim-multiuni-multiolt-errorscenarios-att
+ workflow: att
+ flags: ""
+ teardown: false
+ timeout: 180
# openonu Go periodic tests
+
+ - 'voltha-periodic-test':
+ name: 'periodic-voltha-openonu-go-test-bbsim-grpc'
+ code-branch: 'master'
+ volthaHelmChartsChange: refs/changes/69/26569/7
+ extraHelmFlags: >
+ --set global.image_tag=master
+ --set onos-classic.image.repository=andreacampanella/voltha-onos --set onos-classic.image.tag=port-remove
+ --set voltha.images.rw_core.repository=dbainbriciena/voltha-rw-core
+ --set voltha.images.rw_core.tag=grpc009
+ --set voltha-adapter-openonu.images.adapter_open_onu_go.repository=dbainbriciena/voltha-openonu-adapter-go
+ --set voltha-adapter-openonu.images.adapter_open_onu_go.tag=grpc009
+ --set voltha-adapter-openolt.images.adapter_open_olt.repository=dbainbriciena/voltha-openolt-adapter
+ --set voltha-adapter-openolt.images.adapter_open_olt.tag=grpc009
+ --set voltha-adapter-openolt.adapter_open_olt.endpoints.core_endpoint=voltha-voltha-core.voltha.svc:55558
+ --set voltha-adapter-openolt.adapter_open_olt.endpoints.adapter_endpoint=voltha-voltha-adapter-openolt-api.voltha.svc:50060
+ --set voltha-adapter-openonu.adapter_open_onu.endpoints.core_endpoint=voltha-voltha-core.voltha.svc:55558
+ --set voltha-adapter-openonu.adapter_open_onu.endpoints.adapter_endpoint=voltha-voltha-adapter-openonu-api.voltha.svc:50060
+ --set services.kafka.cluster.address=voltha-infra-kafka.default.svc:9092
+ --set services.etcd.address=voltha-infra-etcd.default.svc:2379
+ time-trigger: "H H/12 * * *"
+ logLevel: 'DEBUG'
+ testTargets: |
+ - target: 1t1gem-openonu-go-adapter-test
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: 1t4gem-openonu-go-adapter-test
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: 1t8gem-openonu-go-adapter-test
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: mib-upload-templating-openonu-go-adapter-test
+ workflow: att
+ flags: "--set pon=2,onu=2,controlledActivation=only-onu"
+ teardown: true
+ - target: reconcile-openonu-go-adapter-test-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: reconcile-openonu-go-adapter-test-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: reconcile-openonu-go-adapter-test-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+ - target: openonu-go-adapter-omci-hardening-passed-test
+ workflow: att
+ flags: "--set omci_response_rate=9 --set voltha-adapter-openonu.adapter_open_onu.omci_timeout=1s"
+ teardown: true
+ - target: openonu-go-adapter-omci-hardening-failed-test
+ workflow: att
+ flags: "--set omci_response_rate=7"
+ teardown: true
+ - target: voltha-onu-omci-get-single-kind-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: voltha-onu-omci-get-single-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: voltha-onu-omci-get-single-kind-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+ - target: voltha-onu-flows-check-single-kind-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: voltha-onu-flows-check-single-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: voltha-onu-flows-check-single-kind-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+
- 'voltha-periodic-test':
name: 'periodic-voltha-openonu-go-test-bbsim'
code-branch: 'master'
@@ -122,12 +358,92 @@
teardown: true
- target: openonu-go-adapter-omci-hardening-passed-test
workflow: att
- flags: "--set omci_response_rate=9 --set omci_timeout=1s"
+ flags: "--set omci_response_rate=9 --set voltha-adapter-openonu.adapter_open_onu.omci_timeout=1s"
teardown: true
- target: openonu-go-adapter-omci-hardening-failed-test
workflow: att
flags: "--set omci_response_rate=7"
teardown: true
+ - target: voltha-onu-omci-get-single-kind-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: voltha-onu-omci-get-single-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: voltha-onu-omci-get-single-kind-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+ - target: voltha-onu-flows-check-single-kind-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: voltha-onu-flows-check-single-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: voltha-onu-flows-check-single-kind-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+
+ - 'voltha-periodic-test':
+ name: 'periodic-voltha-openonu-go-test-bbsim-2.8'
+ pipeline-script: 'voltha/voltha-2.8/bbsim-tests.groovy'
+ code-branch: 'voltha-2.8'
+ time-trigger: "H H/23 * * *"
+ logLevel: 'DEBUG'
+ testTargets: |
+ - target: 1t1gem-openonu-go-adapter-test
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: 1t4gem-openonu-go-adapter-test
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: 1t8gem-openonu-go-adapter-test
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: mib-upload-templating-openonu-go-adapter-test
+ workflow: att
+ flags: "--set pon=2,onu=2,controlledActivation=only-onu"
+ teardown: true
+ - target: reconcile-openonu-go-adapter-test-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: reconcile-openonu-go-adapter-test-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: reconcile-openonu-go-adapter-test-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+ - target: openonu-go-adapter-omci-hardening-passed-test
+ workflow: att
+ flags: "--set omci_response_rate=9 --set voltha-adapter-openonu.adapter_open_onu.omci_timeout=1s"
+ teardown: true
+ - target: openonu-go-adapter-omci-hardening-failed-test
+ workflow: att
+ flags: "--set omci_response_rate=7"
+ teardown: true
+ - target: voltha-onu-omci-get-single-kind-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: voltha-onu-omci-get-single-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: voltha-onu-omci-get-single-kind-tt
+ workflow: tt
+ flags: ""
+ teardown: true
- 'voltha-periodic-test':
name: 'patchset-voltha-openonu-go-test-bbsim'
@@ -168,26 +484,36 @@
teardown: true
- target: openonu-go-adapter-omci-hardening-passed-test
workflow: att
- flags: "--set omci_response_rate=9 --set omci_timeout=1s"
+ flags: "--set omci_response_rate=9 --set voltha-adapter-openonu.adapter_open_onu.omci_timeout=1s"
teardown: true
- target: openonu-go-adapter-omci-hardening-failed-test
workflow: att
flags: "--set omci_response_rate=7"
teardown: true
-
- - 'voltha-periodic-test-kind-voltha-based':
- name: 'periodic-voltha-openonu-go-test-bbsim-2.7'
- pipeline-script: 'voltha/voltha-2.7/voltha-openonu-go-test-bbsim.groovy'
- build-node: 'ubuntu18.04-basebuild-8c-15g'
- make-target: openonu-go-adapter-test
- make-target-1t4gemtest: 1t4gem-openonu-go-adapter-test
- make-target-1t8gemtest: 1t8gem-openonu-go-adapter-test
- make-target-reconciletest: reconcile-openonu-go-adapter-test
- make-target-reconciledttest: reconcile-openonu-go-adapter-test-dt
- make-target-reconciletttest: reconcile-openonu-go-adapter-test-tt
- withAlarms: false
- code-branch: 'voltha-2.7'
- time-trigger: "H H/23 * * *"
+ - target: voltha-onu-omci-get-single-kind-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: voltha-onu-omci-get-single-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: voltha-onu-omci-get-single-kind-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+ - target: voltha-onu-flows-check-single-kind-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: voltha-onu-flows-check-single-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: voltha-onu-flows-check-single-kind-tt
+ workflow: tt
+ flags: ""
+ teardown: true
- 'voltha-periodic-test':
name: 'periodic-voltha-multiple-olts-openonu-go-test-bbsim'
@@ -220,9 +546,79 @@
workflow: tt
flags: ""
teardown: true
+ - target: voltha-onu-omci-get-multiolt-kind-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: voltha-onu-omci-get-multiolt-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: voltha-onu-omci-get-multiolt-kind-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+ - target: voltha-onu-flows-check-multiolt-kind-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: voltha-onu-flows-check-multiolt-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: voltha-onu-flows-check-multiolt-kind-tt
+ workflow: tt
+ flags: ""
+ teardown: true
time-trigger: "H H/12 * * *"
- 'voltha-periodic-test':
+ name: 'periodic-voltha-multiple-olts-openonu-go-test-bbsim-2.8'
+ pipeline-script: 'voltha/voltha-2.8/bbsim-tests.groovy'
+ code-branch: 'voltha-2.8'
+ extraHelmFlags: '--set onu=2,pon=2'
+ olts: 2
+ logLevel: 'DEBUG'
+ testTargets: |
+ - target: 1t1gem-openonu-go-adapter-multi-olt-test
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: 1t4gem-openonu-go-adapter-multi-olt-test
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: 1t8gem-openonu-go-adapter-multi-olt-test
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: reconcile-openonu-go-adapter-multi-olt-test-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: reconcile-openonu-go-adapter-multi-olt-test-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: reconcile-openonu-go-adapter-multi-olt-test-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+ - target: voltha-onu-omci-get-multiolt-kind-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: voltha-onu-omci-get-multiolt-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: voltha-onu-omci-get-multiolt-kind-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+ time-trigger: "H H/23 * * *"
+
+ - 'voltha-periodic-test':
name: 'patchset-voltha-multiple-olts-openonu-go-test-bbsim'
trigger-comment: "voltha test openonu multiolt"
code-branch: '$GERRIT_BRANCH'
@@ -255,12 +651,36 @@
workflow: tt
flags: ""
teardown: true
+ - target: voltha-onu-omci-get-multiolt-kind-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: voltha-onu-omci-get-multiolt-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: voltha-onu-omci-get-multiolt-kind-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+ - target: voltha-onu-flows-check-multiolt-kind-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: voltha-onu-flows-check-multiolt-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: voltha-onu-flows-check-multiolt-kind-tt
+ workflow: tt
+ flags: ""
+ teardown: true
olts: 2
- 'voltha-periodic-test':
name: 'periodic-voltha-pm-data-test-bbsim'
code-branch: 'master'
- extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master --set externalAccess.enabled=true,externalAccess.service.type=NodePort,externalAccess.service.nodePorts[0]=30201,externalAccess.service.domain=127.0.0.1'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master --set kafka.externalAccess.enabled=true,kafka.externalAccess.service.type=NodePort,kafka.externalAccess.service.nodePorts[0]=30201,kafka.externalAccess.service.domain=127.0.0.1'
time-trigger: "H H/23 * * *"
logLevel: 'DEBUG'
testTargets: |
@@ -281,8 +701,7 @@
- 'voltha-periodic-test':
name: 'patchset-voltha-pm-data-test-bbsim'
trigger-comment: "voltha test pm data singleolt"
- code-branch: '$GERRIT_BRANCH'
- extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master --set externalAccess.enabled=true,externalAccess.service.type=NodePort,externalAccess.service.nodePorts[0]=30201,externalAccess.service.domain=127.0.0.1'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master --set kafka.externalAccess.enabled=true,kafka.externalAccess.service.type=NodePort,kafka.externalAccess.service.nodePorts[0]=30201,kafka.externalAccess.service.domain=127.0.0.1'
code-branch: '$GERRIT_BRANCH'
gerrit-project: '$GERRIT_PROJECT'
gerritRefspec: '$GERRIT_REFSPEC'
@@ -303,9 +722,30 @@
timeout: 140
- 'voltha-periodic-test':
+ name: 'periodic-voltha-pm-data-test-bbsim-2.8'
+ code-branch: 'voltha-2.8'
+ extraHelmFlags: '--set kafka.externalAccess.enabled=true,kafka.externalAccess.service.type=NodePort,kafka.externalAccess.service.nodePorts[0]=30201,kafka.externalAccess.service.domain=127.0.0.1'
+ time-trigger: "H H/23 * * *"
+ logLevel: 'DEBUG'
+ testTargets: |
+ - target: voltha-pm-data-single-kind-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: voltha-pm-data-single-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: voltha-pm-data-single-kind-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+ timeout: 140
+
+ - 'voltha-periodic-test':
name: 'periodic-voltha-multiple-olts-pm-data-test-bbsim'
code-branch: 'master'
- extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master --set onu=2,pon=2 --set externalAccess.enabled=true,externalAccess.service.type=NodePort,externalAccess.service.nodePorts[0]=30201,externalAccess.service.domain=127.0.0.1'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master --set onu=2,pon=2 --set kafka.externalAccess.enabled=true,kafka.externalAccess.service.type=NodePort,kafka.externalAccess.service.nodePorts[0]=30201,kafka.externalAccess.service.domain=127.0.0.1'
olts: 2
timeout: 180
logLevel: 'DEBUG'
@@ -328,7 +768,7 @@
name: 'patchset-voltha-multiple-olts-pm-data-test-bbsim'
trigger-comment: "voltha test pm data multiolt"
code-branch: '$GERRIT_BRANCH'
- extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master --set onu=2,pon=2 --set externalAccess.enabled=true,externalAccess.service.type=NodePort,externalAccess.service.nodePorts[0]=30201,externalAccess.service.domain=127.0.0.1'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master --set onu=2,pon=2 --set kafka.externalAccess.enabled=true,kafka.externalAccess.service.type=NodePort,kafka.externalAccess.service.nodePorts[0]=30201,kafka.externalAccess.service.domain=127.0.0.1'
gerrit-project: '$GERRIT_PROJECT'
gerritRefspec: '$GERRIT_REFSPEC'
logLevel: 'DEBUG'
@@ -348,21 +788,26 @@
olts: 2
timeout: 180
- - 'voltha-periodic-test-kind-voltha-based':
- name: 'periodic-voltha-multiple-olts-openonu-go-test-bbsim-2.7'
- pipeline-script: 'voltha/voltha-2.7/voltha-openonu-go-test-bbsim.groovy'
- build-node: 'ubuntu18.04-basebuild-8c-15g'
- make-target: openonu-go-adapter-multi-olt-test
- make-target-1t4gemtest: 1t4gem-openonu-go-adapter-multi-olt-test
- make-target-1t8gemtest: 1t8gem-openonu-go-adapter-multi-olt-test
- make-target-reconciletest: reconcile-openonu-go-adapter-multi-olt-test
- make-target-reconciledttest: reconcile-openonu-go-adapter-multi-olt-test-dt
- make-target-reconciletttest: reconcile-openonu-go-adapter-multi-olt-test-tt
- withAlarms: false
- code-branch: 'voltha-2.7'
+ - 'voltha-periodic-test':
+ name: 'periodic-voltha-multiple-olts-pm-data-test-bbsim-2.8'
+ code-branch: 'voltha-2.8'
+ extraHelmFlags: '--set onu=2,pon=2 --set kafka.externalAccess.enabled=true,kafka.externalAccess.service.type=NodePort,kafka.externalAccess.service.nodePorts[0]=30201,kafka.externalAccess.service.domain=127.0.0.1'
olts: 2
- onus: 2
- pons: 2
+ timeout: 180
+ logLevel: 'DEBUG'
+ testTargets: |
+ - target: voltha-pm-data-multiolt-kind-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: voltha-pm-data-multiolt-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: voltha-pm-data-multiolt-kind-tt
+ workflow: tt
+ flags: ""
+ teardown: true
time-trigger: "H H/23 * * *"
- 'voltha-periodic-test':
@@ -376,26 +821,16 @@
flags: ""
teardown: true
- - 'voltha-periodic-test-kind-voltha-based':
- name: 'periodic-voltha-test-DMI-2.7'
- pipeline-script: 'voltha/voltha-2.7/voltha-DMI-bbsim-tests.groovy'
- build-node: 'qct-pod4-node2'
- make-target: bbsim-dmi-hw-management-test
- withAlarms: false
- code-branch: 'voltha-2.7'
+ - 'voltha-periodic-test':
+ name: 'periodic-voltha-test-DMI-2.8'
+ pipeline-script: 'voltha/voltha-2.8/bbsim-tests.groovy'
+ code-branch: 'voltha-2.8'
time-trigger: "H H/23 * * *"
-
- - 'voltha-periodic-test-kind-voltha-based':
- name: 'periodic-voltha-test-bbsim-2.7'
- pipeline-script: 'voltha/voltha-2.7/voltha-nightly-tests-bbsim.groovy'
- build-node: 'qct-pod4-node2'
- make-target: functional-single-kind
- make-target-failtest: bbsim-failurescenarios
- make-target-errortest: bbsim-errorscenarios
- make-target-alarmtest: bbsim-alarms-kind
- withAlarms: true
- code-branch: 'voltha-2.7'
- time-trigger: "H H * * *"
+ testTargets: |
+ - target: bbsim-dmi-hw-management-test
+ workflow: att
+ flags: ""
+ teardown: true
- 'voltha-periodic-test':
name: 'periodic-voltha-etcd-test'
@@ -409,15 +844,18 @@
flags: ""
teardown: true
- - 'voltha-periodic-test-kind-voltha-based':
- name: 'periodic-voltha-etcd-test-2.7'
- pipeline-script: 'voltha/voltha-2.7/voltha-system-test-bbsim.groovy'
+ - 'voltha-periodic-test':
+ name: 'periodic-voltha-etcd-test-2.8'
+ pipeline-script: 'voltha/voltha-2.8/bbsim-tests.groovy'
build-node: 'ubuntu18.04-basebuild-4c-8g'
- code-branch: 'voltha-2.7'
- make-target: sanity-multi-kind
- onus: 2
- pons: 2
- time-trigger: "H H/12 * * *"
+ code-branch: 'voltha-2.8'
+ extraHelmFlags: '--set onu=2,pon=2'
+ time-trigger: "H H/23 * * *"
+ testTargets: |
+ - target: sanity-multi-kind
+ workflow: att
+ flags: ""
+ teardown: true
- 'voltha-periodic-test':
name: 'periodic-voltha-sanity-test-multi-runs'
@@ -446,16 +884,32 @@
flags: ""
teardown: false
- - 'voltha-periodic-test-kind-voltha-based':
- name: 'periodic-voltha-sanity-test-multi-runs-2.7'
- pipeline-script: 'voltha/voltha-2.7/voltha-go-multi-tests.groovy'
- build-node: 'qct-pod4-node2'
- code-branch: 'voltha-2.7'
- make-target: sanity-kind
- onus: 1
- pons: 1
- test-runs: 5
+ - 'voltha-periodic-test':
+ name: 'periodic-voltha-sanity-test-multi-runs-2.8'
+ pipeline-script: 'voltha/voltha-2.8/bbsim-tests.groovy'
+ code-branch: 'voltha-2.8'
time-trigger: "H H/23 * * *"
+ testTargets: |
+ - target: sanity-kind
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: sanity-kind
+ workflow: att
+ flags: ""
+ teardown: false
+ - target: sanity-kind
+ workflow: att
+ flags: ""
+ teardown: false
+ - target: sanity-kind
+ workflow: att
+ flags: ""
+ teardown: false
+ - target: sanity-kind
+ workflow: att
+ flags: ""
+ teardown: false
- 'voltha-periodic-test':
name: 'nightly-voltha-DTflow-sanity-test'
@@ -472,34 +926,34 @@
# ATT Per-patchset Pod builds on Tucson pod (master)
- 'verify_physical_voltha_patchset_auto':
name: 'verify_physical_voltha_patchset_auto'
- extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master --set global.log_level=debug'
workflow: 'att'
+ branch-pattern: master
- # ATT Per-patchset Pod builds on Tucson pod (voltha-2.7)
+
+ # ATT Per-patchset Pod builds on Tucson pod (voltha-2.8)
- 'verify_physical_voltha_patchset_auto':
- name: 'verify_physical_voltha_patchset_auto_voltha-2.7'
- oltDebVersionMaster: 'openolt_asfvolt16-3.4.1-dev-c5dcc7f2b27df1c42868b3a79a5416808511bb1d-40G-NNI.deb'
- oltDebVersionVoltha23: 'openolt_asfvolt16-3.3.3-1a5d68b50d8bcc5ba6cb1630d3294c30c37cd2f5-40G-NNI.deb'
- pipeline-script: 'voltha/voltha-2.7/voltha-physical-build-and-tests.groovy'
- branch-pattern: voltha-2.7
+ name: 'verify_physical_voltha_patchset_auto-2.8'
+ pipeline-script: 'voltha/voltha-2.8/tucson-build-and-test.groovy'
+ extraHelmFlags: '--set global.log_level=debug'
workflow: 'att'
+ branch-pattern: voltha-2.8
# ATT Manual Pod builds on Tucson pod (master)
- 'verify_physical_voltha_patchset_manual':
name: 'verify_physical_voltha_patchset_manual'
trigger-string: 'hardware test'
branch-pattern: master
- extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master --set global.log_level=debug'
workflow: 'att'
- # ATT Manual Pod builds on Tucson pod (voltha-2.7)
+ # ATT Manual Pod builds on Tucson pod (voltha-2.8)
- 'verify_physical_voltha_patchset_manual':
- name: 'verify_physical_voltha_patchset_manual_voltha-2.7'
- oltDebVersionMaster: 'openolt_asfvolt16-3.4.1-dev-c5dcc7f2b27df1c42868b3a79a5416808511bb1d-40G-NNI.deb'
- oltDebVersionVoltha23: 'openolt_asfvolt16-3.3.3-1a5d68b50d8bcc5ba6cb1630d3294c30c37cd2f5-40G-NNI.deb'
- pipeline-script: 'voltha/voltha-2.7/voltha-physical-build-and-tests.groovy'
+ name: 'verify_physical_voltha_patchset_manual-2.8'
+ pipeline-script: 'voltha/voltha-2.8/tucson-build-and-test.groovy'
trigger-string: 'hardware test'
- branch-pattern: voltha-2.7
+ branch-pattern: voltha-2.8
+ extraHelmFlags: '--set global.log_level=debug'
workflow: 'att'
# DT Manual Pod builds on Tucson pod (master)
@@ -509,23 +963,22 @@
trigger-string: 'DT hardware test'
default-test-args: '-i sanityDt -i PowerSwitch -X'
branch-pattern: master
- extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master --set global.log_level=debug'
- # DT Per-patchset Pod builds on Tucson pod (voltha-2.7)
+ # DT Manual Pod builds on Tucson pod (voltha-2.8)
- 'verify_physical_voltha_patchset_manual':
- name: 'verify_physical_voltha_patchset_manual_DT_voltha-2.7'
+ name: 'verify_physical_voltha_patchset_manual_DT-2.8'
+ pipeline-script: 'voltha/voltha-2.8/tucson-build-and-test.groovy'
workflow: 'dt'
- oltDebVersionMaster: 'openolt_asfvolt16-3.4.1-dev-c5dcc7f2b27df1c42868b3a79a5416808511bb1d-40G-NNI.deb'
- oltDebVersionVoltha23: 'openolt_asfvolt16-3.3.3-1a5d68b50d8bcc5ba6cb1630d3294c30c37cd2f5-40G-NNI.deb'
- pipeline-script: 'voltha/voltha-2.7/voltha-physical-build-and-tests.groovy'
trigger-string: 'DT hardware test'
default-test-args: '-i sanityDt -i PowerSwitch -X'
- branch-pattern: voltha-2.7
+ branch-pattern: voltha-2.8
+ extraHelmFlags: '--set global.log_level=debug'
- job-template:
id: 'voltha-periodic-test'
name: '{name}'
- pipeline-script: 'voltha/master/periodic-bbsim-tests.groovy'
+ pipeline-script: 'voltha/master/bbsim-tests.groovy'
build-node: 'ubuntu18.04-basebuild-8c-15g'
robot-args: ''
gerrit-project: ''
@@ -660,184 +1113,34 @@
branch-pattern: '{all-branches-regexp}'
- job-template:
- id: 'voltha-periodic-test-kind-voltha-based'
- name: '{name}'
- pipeline-script: 'voltha/voltha-2.7/voltha-go-tests.groovy'
- test-runs: 1
- robot-args: ''
- gerrit-project: ''
- work-flow: ''
- volthaSystemTestsChange: ''
- volthaHelmChartsChange: ''
- kindVolthaChange: ''
- extraHelmFlags: ''
- sandbox: true
- olts: 1
- withAlarms: false
-
- description: |
- <!-- Managed by Jenkins Job Builder -->
- Created by {id} job-template from ci-management/jjb/voltha-e2e.yaml <br /><br />
- E2E Validation for Voltha 2.X
-
- properties:
- - cord-infra-properties:
- build-days-to-keep: '{big-build-days-to-keep}'
- artifact-num-to-keep: '{big-artifact-num-to-keep}'
-
- wrappers:
- - lf-infra-wrappers:
- build-timeout: '{build-timeout}'
- jenkins-ssh-credential: '{jenkins-ssh-credential}'
-
- parameters:
- - string:
- name: buildNode
- default: '{build-node}'
- description: 'Name of the Jenkins node to run the job on'
-
- - string:
- name: extraHelmFlags
- default: '--set onu={onus},pon={pons} {extraHelmFlags}'
- description: 'Helm flags to pass to ./voltha up'
-
- - bool:
- name: withAlarms
- default: '{withAlarms}'
- description: "Run alarm based tests when true"
-
- - string:
- name: makeTarget
- default: '{make-target}'
- description: 'Makefile target to invoke during test'
-
- - string:
- name: makeFailtestTarget
- default: '{make-target-failtest}'
- description: 'Makefile target to invoke during failure/based test'
-
- - string:
- name: makeMultiOltTarget
- default: '{make-target-multipleolt}'
- description: 'Makefile target to invoke during multiple olt test'
-
- - string:
- name: makeErrortestTarget
- default: '{make-target-errortest}'
- description: 'Makefile target to invoke during error test'
-
- - string:
- name: makeAlarmtestTarget
- default: '{make-target-alarmtest}'
- description: 'Makefile target to invoke during alarm test'
-
- - string:
- name: make1t4gemTestTarget
- default: '{make-target-1t4gemtest}'
- description: 'Makefile target to invoke during 1t4gem test'
-
- - string:
- name: make1t8gemTestTarget
- default: '{make-target-1t8gemtest}'
- description: 'Makefile target to invoke during 1t8gem test'
-
- - string:
- name: makeReconcileTestTarget
- default: '{make-target-reconciletest}'
- description: 'Makefile target to invoke during reconcile test'
-
- - string:
- name: makeReconcileDtTestTarget
- default: '{make-target-reconciledttest}'
- description: 'Makefile target to invoke during reconcile dt test'
-
- - string:
- name: makeReconcileTtTestTarget
- default: '{make-target-reconciletttest}'
- description: 'Makefile target to invoke during reconcile tt test'
-
- - string:
- name: manifestUrl
- default: '{gerrit-server-url}/{voltha-test-manifest-repo}'
- description: 'Repo manifest URL for code checkout'
-
- - string:
- name: branch
- default: '{code-branch}'
- description: 'Repo manifest branch for code checkout'
-
- - string:
- name: gerritProject
- default: '{gerrit-project}'
- description: 'Name of the Gerrit project'
-
- - string:
- name: gerritChangeNumber
- default: ''
- description: 'Changeset number in Gerrit'
-
- - string:
- name: gerritPatchsetNumber
- default: ''
- description: 'PatchSet number in Gerrit'
-
- - string:
- name: testRuns
- default: '{test-runs}'
- description: 'How many times to repeat the tests'
-
- - string:
- name: extraRobotArgs
- default: '{robot-args}'
- description: 'Arguments to pass to robot'
-
- - string:
- name: workFlow
- default: '{work-flow}'
- description: 'Workflow for testcase'
-
- - string:
- name: karafHome
- default: '{karaf-home}'
- description: 'Karaf home'
-
- - string:
- name: volthaSystemTestsChange
- default: '{volthaSystemTestsChange}'
- description: 'Download a change for gerrit in the voltha-system-tests repo, example value: "refs/changes/79/18779/13"'
-
- - string:
- name: kindVolthaChange
- default: '{kindVolthaChange}'
- description: 'Download a change for gerrit in the kind-voltha repo, example value: "refs/changes/32/19132/1"'
-
- - string:
- name: olts
- default: '{olts}'
- description: 'How many BBSim instances to run'
-
- project-type: pipeline
- concurrent: true
-
- dsl: !include-raw-escape: pipeline/{pipeline-script}
-
- triggers:
- - timed: |
- TZ=America/Los_Angeles
- {time-trigger}
-
-- job-template:
id: 'voltha-patch-test'
name: 'verify_{project}_sanity-test{name-extension}'
build-node: 'ubuntu18.04-basebuild-4c-8g'
+ pipeline-script: 'voltha/master/bbsim-tests.groovy'
override-branch: '$GERRIT_BRANCH'
sandbox: true
build-timeout: 20
+ timeout: 50
+ olts: 1
+ registry: mirror.registry.opennetworking.org
+ logLevel: 'INFO'
volthaSystemTestsChange: ''
volthaHelmChartsChange: ''
extraHelmFlags: ''
branch-regexp: '{all-branches-regexp}'
- kindVolthaChange: '' # this is only needed to test kind-voltha patches
+ testTargets: |
+ - target: sanity-kind-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: sanity-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: sanity-kind-tt
+ workflow: tt
+ flags: ""
+ teardown: true
description: |
<!-- Managed by Jenkins Job Builder -->
@@ -890,11 +1193,39 @@
default: '{override-branch}'
description: 'Name of the branch to use'
- # Used in the 2.7 based pipeline, can be removed after 2.8
+ # test configuration
+ # this is a parameter to drive the test execution, VOLTHA is redeployed each time with
+ # the provided configuration and then the make target is invoked,
+ # example value (has to be valid YAML):
+ # testTargets: |
+ # - target: 1t1gem-openonu-go-adapter-test
+ # workflow: att
+ # flags: ""
+ # teardown: true
+ - text:
+ name: testTargets
+ default: '{testTargets}'
+ description: 'Test configuration, see the ci-management job definition for more info'
+
- string:
- name: kindVolthaChange
- default: '{kindVolthaChange}'
- description: 'Download a change for gerrit in the kind-voltha repo, example value: "refs/changes/32/19132/1" (only used to test kind-voltha changes in 2.7)'
+ name: timeout
+ default: '{timeout}'
+ description: 'Timeout of pipeline job [minutes]'
+
+ - string:
+ name: olts
+ default: '{olts}'
+ description: 'How many BBSim instances to run'
+
+ - string:
+ name: registry
+ default: '{registry}'
+ description: 'Which registry to use (amazon vs menlo)'
+
+ - string:
+ name: logLevel
+ default: '{logLevel}'
+ description: 'Log level for all the components'
project-type: pipeline
concurrent: true
@@ -1003,11 +1334,6 @@
default: '{profile}'
description: 'Technology Profile pushed to the ETCD'
- - string:
- name: notificationEmail
- default: 'andy@opennetworking.org'
- description: ''
-
- bool:
name: reinstallOlt
default: true
@@ -1091,12 +1417,11 @@
sandbox: true
pipeline-script: 'voltha/master/tucson-build-and-test.groovy'
default-test-args: '-i sanityORDeleteOLT -i PowerSwitch -X'
- branch-pattern: '{all-branches-regexp}'
build-node: 'tucson-pod'
config-pod: 'tucson-pod'
profile: 'Default'
- oltDebVersionMaster: 'openolt_asfvolt16-3.4.1-dev-c5dcc7f2b27df1c42868b3a79a5416808511bb1d-40G-NNI.deb'
- oltDebVersionVoltha23: 'openolt_asfvolt16-3.3.3-1a5d68b50d8bcc5ba6cb1630d3294c30c37cd2f5-40G-NNI.deb'
+ oltDebVersionMaster: 'openolt_asfvolt16-3.5.1-b8c09facd3da5d93b7c2815e176c6682de737437-40G-NNI.deb'
+ oltDebVersionVoltha23: 'openolt_asfvolt16-3.4.9-e2a9597f3d690fe3a0ea0df244571dfc9e8c2833-40G-NNI.deb'
volthaSystemTestsChange: ''
volthaHelmChartsChange: ''
extraHelmFlags: ''
@@ -1144,12 +1469,11 @@
sandbox: true
build-node: 'tucson-pod'
config-pod: 'tucson-pod'
- oltDebVersionMaster: 'openolt_asfvolt16-3.4.1-dev-c5dcc7f2b27df1c42868b3a79a5416808511bb1d-40G-NNI.deb'
- oltDebVersionVoltha23: 'openolt_asfvolt16-3.3.3-1a5d68b50d8bcc5ba6cb1630d3294c30c37cd2f5-40G-NNI.deb'
+ oltDebVersionMaster: 'openolt_asfvolt16-3.5.1-b8c09facd3da5d93b7c2815e176c6682de737437-40G-NNI.deb'
+ oltDebVersionVoltha23: 'openolt_asfvolt16-3.4.9-e2a9597f3d690fe3a0ea0df244571dfc9e8c2833-40G-NNI.deb'
pipeline-script: 'voltha/master/tucson-build-and-test.groovy'
trigger-string: 'hardware test'
default-test-args: '-i sanityORDeleteOLT -i PowerSwitch -X'
- branch-pattern: '{all-branches-regexp}'
volthaSystemTestsChange: ''
volthaHelmChartsChange: ''
profile: 'Default'
diff --git a/jjb/voltha-scale.yaml b/jjb/voltha-scale.yaml
index 606c3bc..ce66cd0 100644
--- a/jjb/voltha-scale.yaml
+++ b/jjb/voltha-scale.yaml
@@ -25,28 +25,143 @@
withEapol: true
withDhcp: true
withIgmp: false
- extraHelmFlags: '--set authRetry=true,dhcpRetry=true'
+ extraHelmFlags: '--set authRetry=false,dhcpRetry=false'
withPcap: false
- 'voltha-scale-measurements':
name: 'voltha-scale-measurements-master-experimental-multi-stack'
build-node: 'voltha-scale-2'
pipeline-script: 'voltha/master/voltha-scale-multi-stack.groovy'
- 'disable-job': true
+ 'disable-job': false
# trigger on Feb 29th (a.k.a only trigger it manually)
time-trigger: "H 0 29 2 *"
withMonitoring: true
logLevel: WARN
- volthaStacks: 2
+ volthaStacks: 10
olts: 2
- pons: 2
- onus: 2
+ pons: 16
+ onus: 32
withFlows: true
provisionSubscribers: true
workflow: dt
withEapol: false
withDhcp: false
withIgmp: false
+ # extraHelmFlags: " -f /home/jenkins/voltha-scale/voltha-values.yaml --set etcd.persistence.enabled=true,etcd.persistence.storageClass=longhorn "
+
+ # GRPC jobs
+ - 'voltha-scale-measurements':
+ name: 'voltha-scale-measurements-master-2-16-32-att-subscribers-grpc'
+ 'disable-job': false
+ build-node: 'voltha-scale-1'
+ # trigger on Feb 29th (a.k.a only trigger it manually)
+ time-trigger: "H 0 29 2 *"
+ logLevel: DEBUG
+ olts: 2
+ pons: 16
+ onus: 32
+ withFlows: true
+ provisionSubscribers: true
+ withEapol: true
+ withDhcp: true
+ withIgmp: false
+ inMemoryEtcdStorage: false
+ extraHelmFlags: >
+ --set authRetry=false,dhcpRetry=false -f /home/jenkins/voltha-scale/voltha-values.yaml --set etcd.persistence.enabled=true,etcd.persistence.storageClass=longhorn
+ --set voltha-adapter-openolt.adapter_open_olt.endpoints.core_endpoint=voltha1-voltha-core.default.svc:55558
+ --set voltha-adapter-openolt.adapter_open_olt.endpoints.adapter_endpoint=voltha1-voltha-adapter-openolt-api.default.svc:50060
+ --set voltha-adapter-openonu.adapter_open_onu.endpoints.core_endpoint=voltha1-voltha-core.default.svc:55558
+ --set voltha-adapter-openonu.adapter_open_onu.endpoints.adapter_endpoint=voltha1-voltha-adapter-openonu-api.default.svc:50060
+ --set services.kafka.cluster.address=voltha-infra-kafka.default.svc:9092
+ --set services.etcd.address=voltha-infra-etcd.default.svc:2379
+ --set global.tracing.enabled=true
+ volthaHelmChartsChange: refs/changes/69/26569/7
+ rwCoreImg: 'dbainbriciena/voltha-rw-core:grpc009'
+ openoltAdapterImg: 'dbainbriciena/voltha-openolt-adapter:grpc009'
+ openonuAdapterGoImg: 'dbainbriciena/voltha-openonu-adapter-go:grpc009'
+ - 'voltha-scale-measurements':
+ name: 'voltha-scale-measurements-master-2-16-32-dt-subscribers-grpc'
+ 'disable-job': false
+ build-node: 'voltha-scale-1'
+ # trigger on Feb 29th (a.k.a only trigger it manually)
+ time-trigger: "H 0 29 2 *"
+ logLevel: DEBUG
+ olts: 2
+ pons: 16
+ onus: 32
+ withFlows: true
+ provisionSubscribers: true
+ withEapol: false
+ withDhcp: false
+ withIgmp: false
+ workflow: dt
+ inMemoryEtcdStorage: false
+ extraHelmFlags: >
+ -f /home/jenkins/voltha-scale/voltha-values.yaml --set etcd.persistence.enabled=true,etcd.persistence.storageClass=longhorn
+ --set voltha-adapter-openolt.adapter_open_olt.endpoints.core_endpoint=voltha1-voltha-api.default.svc:55555
+ --set voltha-adapter-openolt.adapter_open_olt.endpoints.adapter_endpoint=voltha1-voltha-adapter-openolt-api.default.svc:50060
+ --set voltha-adapter-openonu.adapter_open_onu.endpoints.core_endpoint=voltha1-voltha-api.default.svc:55555
+ --set voltha-adapter-openonu.adapter_open_onu.endpoints.adapter_endpoint=voltha1-voltha-adapter-openonu-api.default.svc:50060
+ --set services.kafka.cluster.address=voltha-infra-kafka.default.svc:9092
+ --set services.etcd.address=voltha-infra-etcd.default.svc:2379
+ --set global.tracing.enabled=true
+ volthaHelmChartsChange: refs/changes/69/26569/3
+ rwCoreImg: 'dbainbriciena/voltha-rw-core:grpc009'
+ openoltAdapterImg: 'dbainbriciena/voltha-openolt-adapter:grpc009'
+ openonuAdapterGoImg: 'dbainbriciena/voltha-openonu-adapter-go:grpc009'
+ - 'voltha-scale-measurements':
+ name: 'voltha-scale-measurements-master-2-16-32-tt-subscribers-grpc'
+ 'disable-job': false
+ build-node: 'voltha-scale-1'
+ # trigger on Feb 29th (a.k.a only trigger it manually)
+ time-trigger: "H 0 29 2 *"
+ logLevel: DEBUG
+ olts: 2
+ pons: 16
+ onus: 32
+ withFlows: true
+ provisionSubscribers: true
+ withEapol: false
+ withDhcp: true
+ withIgmp: false
+ workflow: tt
+ inMemoryEtcdStorage: false
+ extraHelmFlags: >
+ --set dhcpRetry=false -f /home/jenkins/voltha-scale/voltha-values.yaml --set etcd.persistence.enabled=true,etcd.persistence.storageClass=longhorn
+ --set voltha-adapter-openolt.adapter_open_olt.endpoints.core_endpoint=voltha1-voltha-api.default.svc:55555
+ --set voltha-adapter-openolt.adapter_open_olt.endpoints.adapter_endpoint=voltha1-voltha-adapter-openolt-api.default.svc:50060
+ --set voltha-adapter-openonu.adapter_open_onu.endpoints.core_endpoint=voltha1-voltha-api.default.svc:55555
+ --set voltha-adapter-openonu.adapter_open_onu.endpoints.adapter_endpoint=voltha1-voltha-adapter-openonu-api.default.svc:50060
+ --set services.kafka.cluster.address=voltha-infra-kafka.default.svc:9092
+ --set services.etcd.address=voltha-infra-etcd.default.svc:2379
+ --set global.tracing.enabled=true
+ volthaHelmChartsChange: refs/changes/69/26569/3
+ rwCoreImg: 'dbainbriciena/voltha-rw-core:grpc009'
+ openoltAdapterImg: 'dbainbriciena/voltha-openolt-adapter:grpc009'
+ openonuAdapterGoImg: 'dbainbriciena/voltha-openonu-adapter-go:grpc009'
+
+ # OLT app rewrite jobs
+ - 'voltha-scale-measurements':
+ name: 'voltha-scale-measurements-master-2-16-32-att-subscribers-new-olt-app'
+ 'disable-job': false
+ build-node: 'voltha-scale-1'
+ # trigger on Feb 29th (a.k.a only trigger it manually)
+ time-trigger: "H 0 29 2 *"
+ logLevel: INFO
+ olts: 2
+ pons: 16
+ onus: 32
+ withFlows: true
+ provisionSubscribers: true
+ withEapol: true
+ withDhcp: true
+ withIgmp: false
+ inMemoryEtcdStorage: false
+ extraHelmFlags: '--set authRetry=false,dhcpRetry=false -f /home/jenkins/voltha-scale/voltha-values.yaml'
+ rwCoreImg: 'matteoscandolo/voltha-rw-core:sync-delete'
+ ofAgentImg: 'matteoscandolo/voltha-ofagent-go:test'
+ onosImg: 'matteoscandolo/voltha-onos:oltapp'
+ openonuAdapterGoImg: 'gcgirish/voltha-openonu-adapter-go:syncFlow'
# jobs for 1024 ONUs with openonu-go and clustered ONOS (2 OLTs)
- 'voltha-scale-measurements':
@@ -62,8 +177,7 @@
withEapol: true
withDhcp: true
withIgmp: false
- extraHelmFlags: '--set authRetry=true,dhcpRetry=true --set global.log_correlation.enabled=true -f /home/jenkins/voltha-scale/voltha-values.yaml'
- onosImg: andreacampanella/voltha-onos:olt-group-pkt-req-mcast-fix
+ extraHelmFlags: '--set authRetry=false,dhcpRetry=false -f /home/jenkins/voltha-scale/voltha-values.yaml --set etcd.persistence.enabled=true,etcd.persistence.storageClass=longhorn'
- 'voltha-scale-measurements':
name: 'voltha-scale-measurements-master-2-16-32-dt-subscribers'
@@ -79,7 +193,7 @@
withEapol: false
withDhcp: false
withIgmp: false
- extraHelmFlags: '--set authRetry=true,dhcpRetry=true --set global.log_correlation.enabled=true -f /home/jenkins/voltha-scale/voltha-values.yaml '
+ extraHelmFlags: '-f /home/jenkins/voltha-scale/voltha-values.yaml --set etcd.persistence.enabled=true,etcd.persistence.storageClass=longhorn'
- 'voltha-scale-measurements':
name: 'voltha-scale-measurements-master-2-16-32-tt-subscribers'
@@ -97,8 +211,7 @@
withIgmp: true
onosReplicas: 3
atomixReplicas: 3
- extraHelmFlags: '--set authRetry=true,dhcpRetry=true --set global.rw_core.core_timeout=60s --set global.log_correlation.enabled=true -f /home/jenkins/voltha-scale/voltha-values.yaml '
- onosImg: andreacampanella/voltha-onos:olt-group-pkt-req-mcast-fix
+ extraHelmFlags: '--set dhcpRetry=false -f /home/jenkins/voltha-scale/voltha-values.yaml --set etcd.persistence.enabled=true,etcd.persistence.storageClass=longhorn'
# multi-stack jobs
- 'voltha-scale-measurements':
@@ -107,7 +220,6 @@
'disable-job': false
pipeline-script: 'voltha/master/voltha-scale-multi-stack.groovy'
time-trigger: "H H/4 * * *"
- withMonitoring: false
logLevel: WARN
volthaStacks: 10
olts: 2
@@ -127,7 +239,6 @@
'disable-job': false
pipeline-script: 'voltha/master/voltha-scale-multi-stack.groovy'
time-trigger: "H H/4 * * *"
- withMonitoring: false
logLevel: WARN
volthaStacks: 10
olts: 2
@@ -139,7 +250,7 @@
withEapol: false
withDhcp: false
withIgmp: false
- extraHelmFlags: "-f /home/jenkins/voltha-scale/voltha-values.yaml "
+ extraHelmFlags: "-f /home/jenkins/voltha-scale/voltha-values.yaml --set etcd.persistence.enabled=true,etcd.persistence.storageClass=longhorn"
- 'voltha-scale-measurements':
name: 'voltha-scale-measurements-master-10-stacks-2-16-32-tt-subscribers'
@@ -147,7 +258,6 @@
'disable-job': false
pipeline-script: 'voltha/master/voltha-scale-multi-stack.groovy'
time-trigger: "H H/4 * * *"
- withMonitoring: false
logLevel: WARN
volthaStacks: 10
olts: 2
@@ -161,11 +271,11 @@
withIgmp: true
extraHelmFlags: "-f /home/jenkins/voltha-scale/voltha-values.yaml "
- # voltha-2.7 Jobs
+ # voltha-2.8 Jobs
- 'voltha-scale-measurements':
- name: 'voltha-scale-measurements-voltha-2.7-2-16-32-att-subscribers'
+ name: 'voltha-scale-measurements-voltha-2.8-2-16-32-att-subscribers'
'disable-job': false
- pipeline-script: 'voltha/voltha-2.7/voltha-scale-test.groovy'
+ pipeline-script: 'voltha/voltha-2.8/voltha-scale-test.groovy'
build-node: 'voltha-scale-1'
time-trigger: "H H/4 * * *"
olts: 2
@@ -176,8 +286,7 @@
withEapol: true
withDhcp: true
withIgmp: false
- extraHelmFlags: '--set defaults.rw_core.timeout=30s '
- release: voltha-2.7
+ release: voltha-2.8
bbsimImg: ''
rwCoreImg: ''
ofAgentImg: ''
@@ -185,11 +294,12 @@
openonuAdapterImg: ''
openonuAdapterGoImg: ''
onosImg: ''
+ extraHelmFlags: "--set authRetry=false,dhcpRetry=false -f /home/jenkins/voltha-scale/voltha-values.yaml --set etcd.persistence.enabled=true,etcd.persistence.storageClass=longhorn"
- 'voltha-scale-measurements':
- name: 'voltha-scale-measurements-voltha-2.7-2-16-32-dt-subscribers'
+ name: 'voltha-scale-measurements-voltha-2.8-2-16-32-dt-subscribers'
'disable-job': false
- pipeline-script: 'voltha/voltha-2.7/voltha-scale-test.groovy'
+ pipeline-script: 'voltha/voltha-2.8/voltha-scale-test.groovy'
build-node: 'voltha-scale-1'
time-trigger: "H H/4 * * *"
olts: 2
@@ -201,8 +311,7 @@
withEapol: false
withDhcp: false
withIgmp: false
- extraHelmFlags: '--set defaults.rw_core.timeout=30s '
- release: voltha-2.7
+ release: voltha-2.8
bbsimImg: ''
rwCoreImg: ''
ofAgentImg: ''
@@ -210,11 +319,12 @@
openonuAdapterImg: ''
openonuAdapterGoImg: ''
onosImg: ''
+ extraHelmFlags: "-f /home/jenkins/voltha-scale/voltha-values.yaml --set etcd.persistence.enabled=true,etcd.persistence.storageClass=longhorn"
- 'voltha-scale-measurements':
- name: 'voltha-scale-measurements-voltha-2.7-2-16-32-tt-subscribers'
+ name: 'voltha-scale-measurements-voltha-2.8-2-16-32-tt-subscribers'
'disable-job': false
- pipeline-script: 'voltha/voltha-2.7/voltha-scale-test.groovy'
+ pipeline-script: 'voltha/voltha-2.8/voltha-scale-test.groovy'
build-node: 'voltha-scale-1'
time-trigger: "H H/4 * * *"
olts: 2
@@ -226,8 +336,7 @@
withEapol: false
withDhcp: true
withIgmp: true
- extraHelmFlags: '--set defaults.rw_core.timeout=30s '
- release: voltha-2.7
+ release: voltha-2.8
bbsimImg: ''
rwCoreImg: ''
ofAgentImg: ''
@@ -235,13 +344,14 @@
openonuAdapterImg: ''
openonuAdapterGoImg: ''
onosImg: ''
+ extraHelmFlags: "--set dhcpRetry=false -f /home/jenkins/voltha-scale/voltha-values.yaml --set etcd.persistence.enabled=true,etcd.persistence.storageClass=longhorn"
- # 2.6 multi-stack jobs
+ # 2.8 multi-stack jobs
- 'voltha-scale-measurements':
- name: 'voltha-scale-measurements-2.6-10-stacks-2-16-32-att-subscribers'
+ name: 'voltha-scale-measurements-2.8-10-stacks-2-16-32-att-subscribers'
build-node: 'voltha-scale-2'
- 'disable-job': true
- pipeline-script: 'voltha-scale-multi-stack.groovy'
+ 'disable-job': false
+ pipeline-script: 'voltha/voltha-2.8/voltha-scale-multi-stack.groovy'
time-trigger: "H H/4 * * *"
withMonitoring: false
logLevel: WARN
@@ -251,7 +361,7 @@
onus: 32
withFlows: true
provisionSubscribers: true
- release: voltha-2.6
+ release: voltha-2.8
workflow: att
withEapol: true
withDhcp: true
@@ -263,12 +373,13 @@
openonuAdapterImg: ''
openonuAdapterGoImg: ''
onosImg: ''
+ extraHelmFlags: "-f /home/jenkins/voltha-scale/voltha-values.yaml "
- 'voltha-scale-measurements':
- name: 'voltha-scale-measurements-2.6-10-stacks-2-16-32-dt-subscribers'
+ name: 'voltha-scale-measurements-2.8-10-stacks-2-16-32-dt-subscribers'
build-node: 'voltha-scale-2'
- pipeline-script: 'voltha-scale-multi-stack.groovy'
- 'disable-job': true
+ pipeline-script: 'voltha/voltha-2.8/voltha-scale-multi-stack.groovy'
+ 'disable-job': false
time-trigger: "H H/4 * * *"
withMonitoring: false
logLevel: WARN
@@ -278,7 +389,7 @@
onus: 32
withFlows: true
provisionSubscribers: true
- release: voltha-2.6
+ release: voltha-2.8
workflow: dt
withEapol: false
withDhcp: false
@@ -290,12 +401,13 @@
openonuAdapterImg: ''
openonuAdapterGoImg: ''
onosImg: ''
+ extraHelmFlags: "-f /home/jenkins/voltha-scale/voltha-values.yaml "
- 'voltha-scale-measurements':
- name: 'voltha-scale-measurements-2.6-10-stacks-2-16-32-tt-subscribers'
+ name: 'voltha-scale-measurements-2.8-10-stacks-2-16-32-tt-subscribers'
build-node: 'voltha-scale-2'
- pipeline-script: 'voltha-scale-multi-stack.groovy'
- 'disable-job': true
+ pipeline-script: 'voltha/voltha-2.8/voltha-scale-multi-stack.groovy'
+ 'disable-job': false
time-trigger: "H H/4 * * *"
withMonitoring: false
logLevel: WARN
@@ -305,7 +417,7 @@
onus: 32
withFlows: true
provisionSubscribers: true
- release: voltha-2.6
+ release: voltha-2.8
workflow: tt
withEapol: false
withDhcp: true
@@ -317,6 +429,7 @@
openonuAdapterImg: ''
openonuAdapterGoImg: ''
onosImg: ''
+ extraHelmFlags: "-f /home/jenkins/voltha-scale/voltha-values.yaml "
# per patchset job
- 'voltha-scale-measurements':
@@ -367,23 +480,7 @@
- 'voltha-scale-measurements-dev':
name: 'voltha-scale-measurements-master-dev'
build-node: 'voltha-scale'
- extraHelmFlags: '--set defaults.rw_core.timeout=30s '
- # development matrix
- - 'voltha-scale-matrix':
- name: 'voltha-scale-matrix-voltha-master-dev'
- build-node: 'voltha-scale'
-
- # development matrix
- - 'voltha-scale-matrix':
- name: 'voltha-scale-matrix-voltha-master'
- build-node: 'voltha-scale-1'
- onosReplicas: 3
- atomixReplicas: 3
- kafkaReplicas: 3
- etcdReplicas: 3
- topologies: 1-16-16, 1-16-32, 2-16-32
- time-trigger: H 0 * * *
# list of parameters for the VOLTHA Jobs,
# used as anchor so that can be shared across multiple jobs
@@ -799,98 +896,3 @@
branches:
- branch-compare-type: REG_EXP
branch-pattern: '{all-branches-regexp}'
-- job-template:
- id: 'voltha-scale-matrix'
- name: '{name}'
- pipeline-script: 'voltha-scale-matrix.groovy'
-
- description: |
- <!-- Managed by Jenkins Job Builder -->
- Created by {id} job-template from ci-management/jjb/voltha-scale.yaml <br /><br />
- Using pipeline {pipeline-script} <br/><br/>
- Scale measurements for VOLTHA 2.x
-
- properties:
- - cord-infra-properties:
- build-days-to-keep: '{big-build-days-to-keep}'
- artifact-num-to-keep: '{big-artifact-num-to-keep}'
-
- wrappers:
- - lf-infra-wrappers:
- build-timeout: '{build-timeout}'
- jenkins-ssh-credential: '{jenkins-ssh-credential}'
-
- # default values
- time-trigger: 0 0 29 2 *
- release: master
- build-node: 'voltha-scale'
- volthaSystemTestsChange: ''
- volthaHelmChartsChange: ''
- kindVolthaChange: ''
- onosReplicas: 1
- atomixReplicas: 0
- kafkaReplicas: 1
- etcdReplicas: 1
- openonuAdapterReplicas: 1
- topologies: 1-1-1, 1-2-2, 2-2-2
-
- project-type: pipeline
- concurrent: false
-
- dsl: !include-raw-escape: pipeline/{pipeline-script}
-
- parameters:
- - string:
- name: release
- default: '{release}'
- description: 'Version of the code to test (matches a branch in kind-voltha and voltha-system-tests repos)'
-
- - string:
- name: buildNode
- default: '{build-node}'
- description: 'Name of the Jenkins node to run the job on'
-
- - string:
- name: volthaSystemTestsChange
- default: '{volthaSystemTestsChange}'
- description: 'Download a change for gerrit in the voltha-system-tests repo, example value: "refs/changes/79/18779/13"'
-
- - string:
- name: volthaHelmChartsChange
- default: '{volthaHelmChartsChange}'
- description: 'Download a change for gerrit in the voltha-helm-charts repo, example value: "refs/changes/32/19132/1"'
-
- - string:
- name: onosReplicas
- default: '{onosReplicas}'
- description: 'How many ONOSes instances to run'
-
- - string:
- name: atomixReplicas
- default: '{atomixReplicas}'
- description: 'How many Atomix instances to run'
-
- - string:
- name: kafkaReplicas
- default: '{kafkaReplicas}'
- description: 'How many Kafka instances to run'
-
- - string:
- name: etcdReplicas
- default: '{etcdReplicas}'
- description: 'How many ETCD instances to run'
-
- - string:
- name: openonuAdapterReplicas
- default: '{openonuAdapterReplicas}'
- description: 'How many OpenONU adapter instances to run'
-
- - string:
- name: topologies
- default: '{topologies}'
- description: 'Topologies configuration, comma separate list of "olt-pon-onu" eg: "1-16-16, 1-16-32"'
-
- triggers:
- - timed: |
- TZ=America/Los_Angeles
- {time-trigger}
diff --git a/jjb/voltha-test/voltha-certification.yaml b/jjb/voltha-test/voltha-certification.yaml
new file mode 100644
index 0000000..90b6d25
--- /dev/null
+++ b/jjb/voltha-test/voltha-certification.yaml
@@ -0,0 +1,233 @@
+---
+# POD Build Pipeline Jobs for Voltha Releases
+
+- project:
+ name: voltha-certification-jobs
+
+ project-name: '{name}'
+
+ build-timeout: '300'
+
+ with-kind: false
+ power-switch: False
+ work-flow: 'ATT'
+ in-band-management: false
+ num-of-openonu: '1'
+ num-of-onos: '3'
+ num-of-atomix: '3'
+ reinstall-olt: true
+ test-type: ''
+ volthaSystemTestsChange: ''
+ kindVolthaChange: ''
+ cordTesterChange: ''
+ oltAdapterAppLabel: 'adapter-open-olt'
+ num-of-onus: ''
+ num-of-ponports: ''
+
+ jobs:
+
+ # Certification (Radisys 1600G) pod with olt/onu - master versions timer based job
+ - 'build_voltha_pod_release_timer':
+ build-node: 'menlo-certification-pod'
+ config-pod: 'menlo-certification-pod-radisys-1600g'
+ disable-job: false
+ reinstall-olt: false
+ name-extension: '_DT'
+ work-flow: 'DT'
+ profile: '1T8GEM'
+ time: '21'
+
+ # Certification (Radisys 1600G) POD test job - master versions: uses 1T8GEM tech profile on voltha branch
+ - 'build_voltha_pod_test':
+ build-node: 'menlo-certification-pod'
+ config-pod: 'menlo-certification-pod-radisys-1600g'
+ disable-job: false
+ release: 'master'
+ branch: 'master'
+ name-extension: '_DT'
+ work-flow: 'DT'
+ test-repo: 'voltha-system-tests'
+ profile: '1T8GEM'
+ power-switch: True
+ pipeline-script: 'voltha/master/voltha-dt-physical-functional-tests.groovy'
+
+ # Certification (Radisys 1600X) pod with olt/onu - master versions timer based job
+ - 'build_voltha_pod_release_timer':
+ build-node: 'menlo-certification-pod'
+ config-pod: 'menlo-certification-pod-radisys-1600x'
+ disable-job: false
+ reinstall-olt: false
+ name-extension: '_DT'
+ work-flow: 'DT'
+ profile: '1T8GEM'
+ time: '1'
+
+ # Certification (Radisys 1600X) POD test job - master versions: uses 1T8GEM tech profile on voltha branch
+ - 'build_voltha_pod_test':
+ build-node: 'menlo-certification-pod'
+ config-pod: 'menlo-certification-pod-radisys-1600x'
+ disable-job: false
+ release: 'master'
+ branch: 'master'
+ name-extension: '_DT'
+ work-flow: 'DT'
+ test-repo: 'voltha-system-tests'
+ profile: '1T8GEM'
+ power-switch: True
+ pipeline-script: 'voltha/master/voltha-dt-physical-functional-tests.groovy'
+
+ # Certification (Radisys) pod with olt/onu - master versions timer based job
+ - 'build_voltha_pod_release_timer':
+ build-node: 'menlo-certification-pod'
+ config-pod: 'menlo-certification-pod-radisys-3200g'
+ disable-job: false
+ reinstall-olt: false
+ name-extension: '_DT'
+ work-flow: 'DT'
+ profile: '1T8GEM'
+ time: '5'
+
+ # Certification (Radisys) POD test job - master versions: uses 1T8GEM tech profile on voltha branch
+ - 'build_voltha_pod_test':
+ timeout: 270
+ build-node: 'menlo-certification-pod'
+ config-pod: 'menlo-certification-pod-radisys-3200g'
+ disable-job: false
+ release: 'master'
+ branch: 'master'
+ name-extension: '_DT'
+ work-flow: 'DT'
+ test-repo: 'voltha-system-tests'
+ profile: '1T8GEM'
+ power-switch: True
+ pipeline-script: 'voltha/master/voltha-dt-physical-functional-tests.groovy'
+
+ # Certification (Radisys) pod with olt/onu - 2.8 version timer based job
+ - 'build_voltha_pod_release_timer':
+ build-node: 'menlo-certification-pod'
+ config-pod: 'menlo-certification-pod-radisys-3200g'
+ disable-job: false
+ reinstall-olt: false
+ release: '2.8'
+ branch: 'voltha-2.8'
+ name-extension: '_DT'
+ work-flow: 'DT'
+ profile: '1T8GEM'
+ time: '9'
+ pipeline-script: 'voltha/voltha-2.8/physical-build.groovy'
+
+ # Certification (Radisys) POD test job - master versions: uses 1T8GEM tech profile on voltha branch
+ - 'build_voltha_pod_test':
+ build-node: 'menlo-certification-pod'
+ config-pod: 'menlo-certification-pod-radisys-3200g'
+ disable-job: false
+ release: '2.8'
+ branch: 'voltha-2.8'
+ name-extension: '_DT'
+ work-flow: 'DT'
+ test-repo: 'voltha-system-tests'
+ profile: '1T8GEM'
+ power-switch: True
+ pipeline-script: 'voltha/voltha-2.8/voltha-dt-physical-functional-tests.groovy'
+
+ # Certification (Radisys) pod with olt/onu - 2.8 version timer based job
+ - 'build_voltha_pod_release_timer':
+ build-node: 'menlo-certification-pod'
+ config-pod: 'menlo-certification-pod-radisys-1600g'
+ disable-job: false
+ reinstall-olt: false
+ release: '2.8'
+ branch: 'voltha-2.8'
+ name-extension: '_DT'
+ work-flow: 'DT'
+ profile: '1T8GEM'
+ time: '13'
+ pipeline-script: 'voltha/voltha-2.8/physical-build.groovy'
+
+ # Certification (Radisys) POD test job - master versions: uses 1T8GEM tech profile on voltha branch
+ - 'build_voltha_pod_test':
+ build-node: 'menlo-certification-pod'
+ config-pod: 'menlo-certification-pod-radisys-1600g'
+ disable-job: false
+ release: '2.8'
+ branch: 'voltha-2.8'
+ name-extension: '_DT'
+ work-flow: 'DT'
+ test-repo: 'voltha-system-tests'
+ profile: '1T8GEM'
+ power-switch: True
+ pipeline-script: 'voltha/voltha-2.8/voltha-dt-physical-functional-tests.groovy'
+
+ # Certification (Radisys) pod with olt/onu - 2.8 version timer based job
+ - 'build_voltha_pod_release_timer':
+ build-node: 'menlo-certification-pod'
+ config-pod: 'menlo-certification-pod-radisys-1600x'
+ disable-job: false
+ reinstall-olt: false
+ release: '2.8'
+ branch: 'voltha-2.8'
+ name-extension: '_DT'
+ work-flow: 'DT'
+ profile: '1T8GEM'
+ time: '17'
+ pipeline-script: 'voltha/voltha-2.8/physical-build.groovy'
+
+ # Certification (Radisys) POD test job - master versions: uses 1T8GEM tech profile on voltha branch
+ - 'build_voltha_pod_test':
+ build-node: 'menlo-certification-pod'
+ config-pod: 'menlo-certification-pod-radisys-1600x'
+ disable-job: false
+ release: '2.8'
+ branch: 'voltha-2.8'
+ name-extension: '_DT'
+ work-flow: 'DT'
+ test-repo: 'voltha-system-tests'
+ profile: '1T8GEM'
+ power-switch: True
+ pipeline-script: 'voltha/voltha-2.8/voltha-dt-physical-functional-tests.groovy'
+
+ # Certification (Radisys) pod with olt/onu - master versions timer based job , two OLTs
+ - 'build_voltha_pod_release_timer':
+ build-node: 'menlo-certification-pod'
+ config-pod: 'menlo-certification-pod-radisys-3200g'
+ disable-job: true
+ reinstall-olt: false
+ work-flow: 'ATT'
+ profile: '1T4GEM'
+ time: '13'
+
+ # Certification (Radisys) POD test job - master versions: uses tech profile on voltha branch
+ - 'build_voltha_pod_test':
+ build-node: 'menlo-certification-pod'
+ config-pod: 'menlo-certification-pod-radisys-3200g'
+ disable-job: true
+ release: 'master'
+ branch: 'master'
+ work-flow: 'ATT'
+ test-repo: 'voltha-system-tests'
+ profile: '1T4GEM'
+ power-switch: True
+ pipeline-script: 'voltha/master/voltha-physical-functional-tests.groovy'
+
+ # Certification (Radisys) pod with olt/onu - master versions timer based job , two OLTs
+ - 'build_voltha_pod_release_timer':
+ build-node: 'menlo-certification-pod'
+ config-pod: 'menlo-certification-pod-radisys-3200g'
+ disable-job: true
+ reinstall-olt: false
+ work-flow: 'ATT'
+ profile: '1T4GEM-unencrypted'
+ time: '17'
+
+ # Certification (Radisys) POD test job - master versions: uses tech profile on voltha branch
+ - 'build_voltha_pod_test':
+ build-node: 'menlo-certification-pod'
+ config-pod: 'menlo-certification-pod-radisys-3200g'
+ disable-job: true
+ release: 'master'
+ branch: 'master'
+ work-flow: 'ATT'
+ test-repo: 'voltha-system-tests'
+ profile: '1T4GEM-unencrypted'
+ power-switch: True
+ pipeline-script: 'voltha/master/voltha-physical-functional-tests.groovy'
diff --git a/jjb/voltha-test/voltha-nightly-jobs.yaml b/jjb/voltha-test/voltha-nightly-jobs.yaml
index 7ba8dbb..bc70d30 100644
--- a/jjb/voltha-test/voltha-nightly-jobs.yaml
+++ b/jjb/voltha-test/voltha-nightly-jobs.yaml
@@ -7,6 +7,10 @@
openoltAdapterChart: onf/voltha-adapter-openolt
+ oltAdapterReleaseName: open-olt
+
+ waitTimerForOltUp: 360
+
parameters:
- string:
name: buildNode
@@ -68,15 +72,15 @@
default: '{branch}'
- string:
- name: notificationEmail
- default: 'you@opennetworking.org, suchitra@opennetworking.org'
- description: ''
-
- - string:
name: workFlow
default: '{work-flow}'
description: 'Installs and uses the specified work flow on the POD'
+ - string:
+ name: extraHelmFlags
+ default: '{extraHelmFlags}'
+ description: 'Helm flags (passed to each deployment)'
+
# openonu-go only supports a single replica, remove after 2.8
- string:
name: NumOfOpenOnu
@@ -89,9 +93,19 @@
description: 'Installs the specified Number of ONOS instances'
- bool:
- name: installBBSim
- default: '{installBBSim}'
- description: "Install the BBSim container"
+ name: enableMultiUni
+ default: '{enableMultiUni}'
+ description: "Enables the Multi UNI feature"
+
+ - string:
+ name: uniPortMask
+ default: '{uniPortMask}'
+ description: 'Open ONU adapter uni_port_mask, used when enableMultiUni is set to True, values: 0x0001-0x00FF'
+
+ - string:
+ name: bbsimReplicas
+ default: '{bbsimReplicas}'
+ description: 'Installs the specified Number of BBSim Instances'
- string:
name: onuNumber
@@ -128,12 +142,6 @@
default: '{reinstall-olt}'
description: "Re-install olt software bringing up CORD"
- # withKind is not used in the master branch pipeline, remove after 2.8
- - bool:
- name: withKind
- default: '{with-kind}'
- description: "The pods uses kind and a physical fabric thus port forward to the management is needed"
-
- string:
name: VolthaEtcdPort
default: '{VolthaEtcdPort}'
@@ -154,12 +162,6 @@
default: '{volthaHelmChartsChange}'
description: 'Download a change for gerrit in the voltha-helm-charts repo, example value: "refs/changes/32/19132/1"'
- # kind-voltha is not used in the master branch pipeline, remove after 2.8
- - string:
- name: kindVolthaChange
- default: '{kindVolthaChange}'
- description: 'Download a change for gerrit in the kind-voltha repo, example value: "refs/changes/32/19132/1"'
-
# NOTE is this needed/used?
- string:
name: cordTesterChange
@@ -170,7 +172,18 @@
- string:
name: openoltAdapterChart
default: '{openoltAdapterChart}'
- description: 'OpenOLT chart name (or location on file system)'
+ description: 'Olt adapter chart name (or location on file system)'
+
+ # this is used in the Adtran DT job
+ - string:
+ name: oltAdapterReleaseName
+ default: '{oltAdapterReleaseName}'
+ description: 'Olt adapter release name'
+
+ - string:
+ name: waitTimerForOltUp
+ default: '{waitTimerForOltUp}'
+ description: 'Wait timer for the OLT to come up after reboot'
# defualt properties for the VOLTHA scale jobs
- voltha-pipe-job-boiler-plate: &voltha-pipe-job-boiler-plate
@@ -213,9 +226,12 @@
volthaHelmChartsChange: ''
profile: 'Default'
logLevel: 'DEBUG'
- installBBSim: false
+ enableMultiUni: false
+ uniPortMask: '0x0001'
+ bbsimReplicas: 0
num-of-onus: 0
num-of-ponports: 0
+ extraHelmFlags: ''
<<: *voltha-build-job-parameters
@@ -251,9 +267,12 @@
volthaHelmChartsChange: ''
profile: 'Default'
logLevel: 'DEBUG'
- installBBSim: false
+ enableMultiUni: false
+ uniPortMask: '0x0001'
+ bbsimReplicas: 0
num-of-onus: 0
num-of-ponports: 0
+ extraHelmFlags: ''
<<: *voltha-build-job-parameters
@@ -266,47 +285,6 @@
TZ=America/Los_Angeles
H {time} * * *
-# this job template is defined to support VOLTHA-2.7 builds, remove after 2.8
-- job-template:
- name: 'build_{config-pod}_{profile}{name-extension}_voltha_{release}'
- id: build_voltha_pod_release_legacy
- disabled: '{disable-job}'
- description: |
- Automatic Build on POD {config-pod}, using {Jenkinsfile} in {gerrit-server-url}/voltha-system-tests' <br /><br />
- Created from job-template {id} from ci-management/jjb/voltha-test/voltha-nightly-jobs.yaml <br />
- Created by QA (Suchitra Vemuri - suchitra@opennetworking.org ) <br />
- This job is triggered upon completion of a dependent _test job <br />
- Copyright (c) 2020 Open Networking Foundation (ONF)
-
- <<: *voltha-pipe-job-boiler-plate
- VolthaEtcdPort: '2379'
- release: '2.7'
- branch: 'voltha-2.7'
- volthaHelmChartsChange: '' # this is not supported in the VOLTHA-2.7 build, but the parameters are shared, ideally we should split them
- logLevel: 'DEBUG'
- installBBSim: false
- num-of-onus: 0
- num-of-ponports: 0
- num-of-kafka: 1
- num-of-etcd: 1
-
- <<: *voltha-build-job-parameters
-
- concurrent: true
-
- pipeline-scm:
- script-path: '{Jenkinsfile}'
- scm:
- - git:
- url: '{gerrit-server-url}/voltha-system-tests'
- branches:
- - '{branch}'
-
- triggers:
- - timed: |
- TZ=America/Los_Angeles
- H {time} * * *
-
# VOLTHA Test Job
# This job is automatically triggered after a build job has successfully completed
- job-template:
@@ -320,14 +298,21 @@
Copyright (c) 2017 Open Networking Foundation (ONF)
<<: *voltha-pipe-job-boiler-plate
- pipeline-script: 'voltha-physical-functional-tests.groovy'
+ pipeline-script: 'voltha/master/voltha-physical-functional-tests.groovy'
manifest-url: 'https://gerrit.opencord.org/voltha-test-manifest.git'
manifest-branch: 'master'
oltAdapterAppLabel: 'adapter-open-olt'
+ enableMultiUni: false
+ timeout: 240
parameters:
- string:
+ name: timeout
+ default: '{timeout}'
+ description: 'Job pipeline timeout value [minutes]'
+
+ - string:
name: buildNode
default: '{build-node}'
description: 'Name of the Jenkins executor node to run the job on'
@@ -393,6 +378,11 @@
description: 'Installs and uses the specified work flow on the POD'
- bool:
+ name: enableMultiUni
+ default: '{enableMultiUni}'
+ description: "Enables the Multi UNI feature"
+
+ - bool:
name: powerSwitch
default: '{power-switch}'
description: "Indicate whether POD has power switch to reboot ONUs/OLT remotely"
@@ -441,14 +431,20 @@
Copyright (c) 2017 Open Networking Foundation (ONF)
<<: *voltha-pipe-job-boiler-plate
- pipeline-script: 'voltha-physical-functional-tests.groovy'
+ pipeline-script: 'voltha/master/voltha-physical-functional-tests.groovy'
manifest-url: 'https://gerrit.opencord.org/voltha-test-manifest.git'
manifest-branch: 'master'
oltAdapterAppLabel: 'adapter-open-olt'
+ timeout: 240
parameters:
- string:
+ name: timeout
+ default: '{timeout}'
+ description: 'Job pipeline timeout value [minutes]'
+
+ - string:
name: buildNode
default: '{build-node}'
description: 'Name of the Jenkins executor node to run the job on'
@@ -551,3 +547,134 @@
- timed: |
TZ=America/Los_Angeles
{time-trigger}
+
+
+# VOLTHA Manual Test Job
+# This job is triggered manually
+- job-template:
+ name: 'build_{config-pod}_{profile}{name-extension}_voltha_{release}_manual_test'
+ id: build_voltha_pod_manual_test
+ disabled: '{disable-job}'
+ description: |
+ Post Tests on {config-pod} triggered by build_{config-pod}_{branch}, using {pipeline-script}<br /><br />
+ Created from job-template {id} from ci-management/jjb/voltha-test/voltha-nightly-jobs.yaml <br />
+ Created by Suchitra Vemuri, suchitra@opennetworking.org <br />
+ Copyright (c) 2017 Open Networking Foundation (ONF)
+ <<: *voltha-pipe-job-boiler-plate
+ pipeline-script: 'voltha/master/voltha-physical-functional-tests.groovy'
+ manifest-url: 'https://gerrit.opencord.org/voltha-test-manifest.git'
+ manifest-branch: 'master'
+
+ oltAdapterAppLabel: 'adapter-open-olt'
+ enableMultiUni: false
+ timeout: 240
+
+ parameters:
+ - string:
+ name: timeout
+ default: '{timeout}'
+ description: 'Job pipeline timeout value [minutes]'
+
+ - string:
+ name: buildNode
+ default: '{build-node}'
+ description: 'Name of the Jenkins executor node to run the job on'
+
+ - string:
+ name: TestNodeName
+ default: '{build-node}'
+ description: 'DEPRECATED - use buildNode instead'
+
+ - string:
+ name: cordRepoUrl
+ default: '{gerrit-server-url}'
+ description: 'The URL of the CORD Project repository'
+
+ - string:
+ name: configBaseDir
+ default: 'pod-configs'
+ description: 'The directory inside the POD configs repository'
+
+ - string:
+ name: configDeploymentDir
+ default: 'deployment-configs'
+ description: 'The deployment configs folder'
+
+ - string:
+ name: configKubernetesDir
+ default: 'kubernetes-configs'
+ description: 'The kubernetes config folder'
+
+ - string:
+ name: configToscaDir
+ default: 'tosca-configs'
+ description: 'The tosca config folder'
+
+ - string:
+ name: configFileName
+ default: '{config-pod}'
+ description: 'The config file'
+
+ - string:
+ name: profile
+ default: '{profile}'
+ description: 'Technology Profile pushed to the ETCD'
+
+ - string:
+ name: branch
+ default: '{branch}'
+ description: 'Branch of the test libraries to check out'
+
+ - string:
+ name: manifestUrl
+ default: '{manifest-url}'
+ description: 'Repo manifest URL for code checkout (so we can display changes in Jenkins)'
+
+ - string:
+ name: manifestBranch
+ default: '{manifest-branch}'
+ description: 'Repo manifest branch for code checkout (so we can display changes in Jenkins)'
+
+ - string:
+ name: workFlow
+ default: '{work-flow}'
+ description: 'Installs and uses the specified work flow on the POD'
+
+ - bool:
+ name: powerSwitch
+ default: '{power-switch}'
+ description: "Indicate whether POD has power switch to reboot ONUs/OLT remotely"
+
+ - bool:
+ name: enableMultiUni
+ default: '{enableMultiUni}'
+ description: "Enables the Multi UNI feature"
+
+ - string:
+ name: oltAdapterAppLabel
+ default: '{oltAdapterAppLabel}'
+ description: 'OLT adapter pod name'
+
+ - string:
+ name: testType
+ default: '{test-type}'
+ description: 'Passes the required test category to the groovy script'
+
+ - string:
+ name: volthaSystemTestsChange
+ default: '{volthaSystemTestsChange}'
+ description: 'Download a change for gerrit in the voltha-system-tests repo, example value: "refs/changes/79/18779/13"'
+
+ - string:
+ name: kindVolthaChange
+ default: '{kindVolthaChange}'
+ description: 'Download a change for gerrit in the kind-voltha repo, example value: "refs/changes/32/19132/1"'
+
+ - string:
+ name: cordTesterChange
+ default: '{cordTesterChange}'
+ description: 'Download a change for gerrit in the kind-voltha repo, example value: "refs/changes/32/19132/1"'
+ concurrent: true
+
+ project-type: pipeline
+ dsl: !include-raw-escape: ../pipeline/{pipeline-script}
diff --git a/jjb/voltha-test/voltha.yaml b/jjb/voltha-test/voltha.yaml
index 308c6eb..04253bf 100644
--- a/jjb/voltha-test/voltha.yaml
+++ b/jjb/voltha-test/voltha.yaml
@@ -26,53 +26,38 @@
jobs:
# flex OCP pod with olt/onu - manual test job, voltha master build job
- - 'build_pod_manual':
+ - 'build_voltha_pod_manual':
build-node: 'qa-testvm-pod'
config-pod: 'flex-ocp-cord'
- release: 'master'
- branch: 'master'
- num-of-openonu: '1'
num-of-onos: '3'
num-of-atomix: '3'
+ num-of-kafka: '3'
+ num-of-etcd: '3'
VolthaEtcdPort: 9999
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
- profile: '1T4GEM'
+ profile: 'TP'
+ name-extension: '_TT'
+ work-flow: 'TT'
# flex pod1 test job - using voltha branch
- - 'build_pod_test':
+ - 'build_voltha_pod_manual_test':
build-node: 'qa-testvm-pod'
config-pod: 'flex-ocp-cord'
- profile: '1T4GEM'
+ disable-job: false
+ profile: 'TP'
branch: 'master'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-test'
-
- # onlab pod1 OCP pod with olt/onu - Manual testing BAL3.1 release voltha master build job
- - 'build_pod_manual':
- build-node: 'onf-build'
- config-pod: 'onlab-pod1'
release: 'master'
- branch: 'master'
+ work-flow: 'TT'
+ name-extension: '_TT'
+ test-type: ''
test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
- profile: '1T4GEM'
-
- # onlab pod1 test job - BAL3.1 tests using voltha branch
- - 'build_pod_test':
- build-node: 'onf-build'
- config-pod: 'onlab-pod1'
- profile: '1T4GEM'
- branch: 'master'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-test'
+ pipeline-script: 'voltha/master/voltha-tt-physical-functional-tests.groovy'
# flex OCP pod with olt/onu - Default tech profile and timer based job
- 'build_voltha_pod_release_timer':
build-node: 'qa-testvm-pod'
config-pod: 'flex-ocp-cord'
profile: 'Default'
- time: '4'
+ time: '5'
VolthaEtcdPort: 9999
num-of-onos: '3'
num-of-atomix: '3'
@@ -87,63 +72,29 @@
test-repo: 'voltha-system-tests'
profile: 'Default'
- # flex OCP pod with olt/onu - Released versions Default tech profile and timer based job
- - 'build_voltha_pod_release_legacy':
+ # flex OCP pod with olt/onu - 1T4GEM tech profile and timer based job
+ - 'build_voltha_pod_release_timer':
build-node: 'qa-testvm-pod'
config-pod: 'flex-ocp-cord'
- release: '2.7'
- branch: 'voltha-2.7'
- num-of-openonu: '1'
- num-of-onos: '3'
- num-of-atomix: '3'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
- configurePod: true
+ release: '2.8'
+ branch: 'voltha-2.8'
profile: '1T4GEM'
time: '1'
VolthaEtcdPort: 9999
+ num-of-onos: '3'
+ num-of-atomix: '3'
+ pipeline-script: 'voltha/voltha-2.8/physical-build.groovy'
# flex pod1 test job - released versions: uses tech profile on voltha branch
- 'build_voltha_pod_test':
build-node: 'qa-testvm-pod'
config-pod: 'flex-ocp-cord'
- release: '2.7'
- branch: 'voltha-2.7'
+ release: '2.8'
+ branch: 'voltha-2.8'
power-switch: True
test-repo: 'voltha-system-tests'
profile: '1T4GEM'
- # flex OCP pod with olt/onu - Released versions Default tech profile and timer based job
- - 'build_voltha_pod_release_legacy':
- build-node: 'qa-testvm-pod'
- config-pod: 'flex-ocp-cord'
- release: '2.7'
- branch: 'voltha-2.7'
- num-of-openonu: '1'
- num-of-onos: '3'
- num-of-atomix: '3'
- name-extension: '_TT'
- work-flow: 'TT'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
- configurePod: true
- profile: 'TP'
- time: '22'
- VolthaEtcdPort: 9999
-
- # flex pod1 test job - released versions: uses tech profile on voltha branch
- - 'build_voltha_pod_test':
- build-node: 'qa-testvm-pod'
- config-pod: 'flex-ocp-cord'
- release: '2.7'
- branch: 'voltha-2.7'
- name-extension: '_TT'
- work-flow: 'TT'
- power-switch: True
- pipeline-script: 'voltha-tt-physical-functional-tests.groovy'
- test-repo: 'voltha-system-tests'
- profile: 'TP'
-
# flex OCP pod with olt/onu - Released versions Default tech profile and timer based job
- 'build_voltha_pod_release_timer':
build-node: 'qa-testvm-pod'
@@ -153,7 +104,7 @@
name-extension: '_TT'
work-flow: 'TT'
profile: 'TP'
- time: '9'
+ time: '10'
VolthaEtcdPort: 9999
num-of-onos: '3'
num-of-atomix: '3'
@@ -167,21 +118,84 @@
name-extension: '_TT'
work-flow: 'TT'
power-switch: True
- pipeline-script: 'voltha-tt-physical-functional-tests.groovy'
+ pipeline-script: 'voltha/master/voltha-tt-physical-functional-tests.groovy'
test-repo: 'voltha-system-tests'
profile: 'TP'
+ # flex OCP pod with olt/onu - Released versions Default tech profile and timer based job
+ - 'build_voltha_pod_release_timer':
+ build-node: 'qa-testvm-pod'
+ config-pod: 'flex-ocp-cord'
+ release: '2.8'
+ branch: 'voltha-2.8'
+ name-extension: '_TT'
+ work-flow: 'TT'
+ profile: 'TP'
+ time: '20'
+ VolthaEtcdPort: 9999
+ num-of-onos: '3'
+ num-of-atomix: '3'
+ pipeline-script: 'voltha/voltha-2.8/physical-build.groovy'
+
+ # flex pod1 test job - released versions: uses tech profile on voltha branch
+ - 'build_voltha_pod_test':
+ build-node: 'qa-testvm-pod'
+ config-pod: 'flex-ocp-cord'
+ release: '2.8'
+ branch: 'voltha-2.8'
+ name-extension: '_TT'
+ work-flow: 'TT'
+ power-switch: True
+ pipeline-script: 'voltha/voltha-2.8/voltha-tt-physical-functional-tests.groovy'
+ test-repo: 'voltha-system-tests'
+ profile: 'TP'
+
+ # flex OCP pod with olt/onu - Released versions Default tech profile and timer based job
+ - 'build_voltha_pod_release_timer':
+ build-node: 'qa-testvm-pod'
+ config-pod: 'flex-ocp-cord-multi-uni'
+ release: 'master'
+ branch: 'master'
+ name-extension: '_TT'
+ work-flow: 'TT'
+ profile: 'TP'
+ time: '15'
+ VolthaEtcdPort: 9999
+ num-of-onos: '3'
+ num-of-atomix: '3'
+ reinstall-olt: true
+ enableMultiUni: true
+ uniPortMask: '0x0003'
+
+ # flex pod1 test job - released versions: uses tech profile on voltha branch
+ - 'build_voltha_pod_test':
+ build-node: 'qa-testvm-pod'
+ config-pod: 'flex-ocp-cord-multi-uni'
+ release: 'master'
+ branch: 'master'
+ name-extension: '_TT'
+ work-flow: 'TT'
+ power-switch: True
+ pipeline-script: 'voltha/master/voltha-tt-physical-functional-tests.groovy'
+ test-repo: 'voltha-system-tests'
+ profile: 'TP'
+ enableMultiUni: true
+
# Menlo pod with olt/onu - 1T4GEM tech profile and timer based job
- 'build_voltha_pod_release_timer':
build-node: 'menlo-demo-pod'
config-pod: 'onf-demo-pod'
profile: '1T4GEM'
+ num-of-onos: '3'
+ num-of-atomix: '3'
in-band-management: true
+ waitTimerForOltUp: 540
VolthaEtcdPort: 9999
time: '1'
# Menlo pod test job - master test job uses tech profile on voltha branch
- 'build_voltha_pod_test':
+ timeout: 300
build-node: 'menlo-demo-pod'
config-pod: 'onf-demo-pod'
release: 'master'
@@ -197,12 +211,16 @@
name-extension: '_DT'
work-flow: 'DT'
profile: '1T8GEM'
+ num-of-onos: '3'
+ num-of-atomix: '3'
VolthaEtcdPort: 9999
in-band-management: true
+ waitTimerForOltUp: 540
time: '4'
# Menlo pod test job - uses tech profile on voltha branch
- 'build_voltha_pod_test':
+ timeout: 300
build-node: 'menlo-demo-pod'
config-pod: 'onf-demo-pod'
release: 'master'
@@ -211,114 +229,56 @@
work-flow: 'DT'
test-repo: 'voltha-system-tests'
profile: '1T8GEM'
- pipeline-script: 'voltha-dt-physical-functional-tests.groovy'
+ pipeline-script: 'voltha/master/voltha-dt-physical-functional-tests.groovy'
power-switch: True
# Menlo pod with olt/onu - released branch, Default tech profile and timer based job
- - 'build_voltha_pod_release_legacy':
+ - 'build_voltha_pod_release_timer':
build-node: 'menlo-demo-pod'
config-pod: 'onf-demo-pod'
- release: '2.7'
- branch: 'voltha-2.7'
+ release: '2.8'
+ branch: 'voltha-2.8'
name-extension: '_DT'
work-flow: 'DT'
- num-of-openonu: '1'
+ profile: '1T8GEM'
num-of-onos: '3'
num-of-atomix: '3'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
- configurePod: true
- profile: '1T8GEM'
VolthaEtcdPort: 9999
in-band-management: true
time: '7'
+ pipeline-script: 'voltha/voltha-2.8/physical-build.groovy'
# Menlo pod test job - uses tech profile on voltha branch
- 'build_voltha_pod_test':
+ timeout: 300
build-node: 'menlo-demo-pod'
config-pod: 'onf-demo-pod'
- release: '2.7'
- branch: 'voltha-2.7'
+ release: '2.8'
+ branch: 'voltha-2.8'
name-extension: '_DT'
work-flow: 'DT'
test-repo: 'voltha-system-tests'
profile: '1T8GEM'
- pipeline-script: 'voltha-dt-physical-functional-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.8/voltha-dt-physical-functional-tests.groovy'
power-switch: True
- # Menlo DEMO-POD - 1 1TCONT 4 4GEMs TechProfile - Manual build and test job
- - 'build_pod_manual':
- build-node: 'menlo-demo-pod'
- config-pod: 'onf-demo-pod'
- release: 'master'
- branch: 'master'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
- configurePod: true
- profile: '1T4GEM'
- num-of-openonu: '1'
- num-of-onos: '3'
- num-of-atomix: '3'
- in-band-management: true
-
- - 'build_pod_test':
- build-node: 'menlo-demo-pod'
- config-pod: 'onf-demo-pod'
- branch: 'master'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-test'
- profile: '1T4GEM'
-
- # Menlo DEMO-POD - Default TechProfile - manual build job
- - 'build_pod_manual':
- build-node: 'menlo-demo-pod'
- config-pod: 'onf-demo-pod'
- release: 'master'
- branch: 'master'
- num-of-openonu: '1'
- num-of-onos: '3'
- num-of-atomix: '3'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
- configurePod: true
- profile: 'Default'
- in-band-management: true
-
- # ONF DEMO OCP test job - voltha-master branch
- - 'build_pod_test':
- build-node: 'menlo-demo-pod'
- config-pod: 'onf-demo-pod'
- profile: 'Default'
- branch: 'master'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-test'
-
# ONF Menlo Soak POD build job - voltha-master branch
- 'build_voltha_pod_manual':
build-node: 'menlo-soak-pod'
config-pod: 'onf-soak-pod'
- installBBSim: true
+ disable-job: true
+ bbsimReplicas: 1
profile: '1T8GEM'
name-extension: '_DT'
work-flow: 'DT'
reinstall-olt: true
- num-of-onus: 16
- num-of-ponports: 8
+ num-of-onus: 32
+ num-of-ponports: 16
+ logLevel: 'WARN'
# ONF Menlo Soak POD test job - voltha-master branch
- # FIXME once the soak-pod is back use 'build_voltha_pod_test'
- - 'build_pod_test':
- build-node: 'menlo-soak-pod'
- config-pod: 'onf-soak-pod'
- 'disable-job': true
- profile: 'Default'
- branch: 'master'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-test'
-
- # ONF Menlo Soak POD test job - voltha-master branch
- # Run tests every three days
- - 'build_voltha_pod_soak_test':
+ # Run tests manually triggering the job
+ - 'build_voltha_pod_manual_test':
build-node: 'menlo-soak-pod'
config-pod: 'onf-soak-pod'
disable-job: true
@@ -329,11 +289,10 @@
name-extension: '_DT_soak_Func'
test-type: 'Functional'
test-repo: 'voltha-system-tests'
- pipeline-script: 'voltha-physical-soak-dt-tests.groovy'
- time-trigger: "H H 2 * *"
+ pipeline-script: 'voltha/master/voltha-physical-soak-dt-tests.groovy'
# ONF Menlo Soak POD test job - voltha-master branch
- # Run tests every three days
+ # Run failure/recovery tests every Wednesday
- 'build_voltha_pod_soak_test':
build-node: 'menlo-soak-pod'
config-pod: 'onf-soak-pod'
@@ -345,11 +304,11 @@
name-extension: '_DT_soak_Fail'
test-type: 'Failure'
test-repo: 'voltha-system-tests'
- pipeline-script: 'voltha-physical-soak-dt-tests.groovy'
- time-trigger: "H H */2 * *"
+ pipeline-script: 'voltha/master/voltha-physical-soak-dt-tests.groovy'
+ time-trigger: "H H * * 3"
# ONF Menlo Soak POD test job - voltha-master branch
- # Run dataplane tests every three days
+ # Run dataplane tests every Saturday
- 'build_voltha_pod_soak_test':
build-node: 'menlo-soak-pod'
config-pod: 'onf-soak-pod'
@@ -361,33 +320,72 @@
name-extension: '_DT_soak_DP'
test-type: 'Dataplane'
test-repo: 'voltha-system-tests'
- pipeline-script: 'voltha-physical-soak-dt-tests.groovy'
- time-trigger: "H H */3 * *"
+ pipeline-script: 'voltha/master/voltha-physical-soak-dt-tests.groovy'
+ time-trigger: "H H * * 6"
- # Certification (Radisys) pod with olt/onu - master versions timer based job , two OLTs
- - 'build_voltha_pod_release_timer':
- build-node: 'menlo-certification-pod'
- config-pod: 'menlo-certification-pod-radisys-gpon'
+ # ONF Menlo Soak POD build job - voltha-2.8 branch
+ - 'build_voltha_pod_manual':
+ build-node: 'menlo-soak-pod'
+ config-pod: 'onf-soak-pod'
disable-job: false
- reinstall-olt: false
+ branch: 'voltha-2.8'
+ release: '2.8'
+ bbsimReplicas: 1
+ profile: '1T8GEM'
name-extension: '_DT'
work-flow: 'DT'
- profile: '1T8GEM-unencrypted'
- time: '1'
+ reinstall-olt: true
+ num-of-onus: 32
+ num-of-ponports: 16
+ logLevel: 'WARN'
+ pipeline-script: 'voltha/voltha-2.8/physical-build.groovy'
- # Certification (Radisys) POD test job - master versions: uses tech profile on voltha branch
- - 'build_voltha_pod_test':
- build-node: 'menlo-certification-pod'
- config-pod: 'menlo-certification-pod-radisys-gpon'
+ # ONF Menlo Soak POD test job - voltha-2.8 branch
+ # Run tests manually triggering the job
+ - 'build_voltha_pod_manual_test':
+ build-node: 'menlo-soak-pod'
+ config-pod: 'onf-soak-pod'
disable-job: false
- release: 'master'
- branch: 'master'
- name-extension: '_DT'
+ profile: '1T8GEM'
+ branch: 'voltha-2.8'
+ release: '2.8'
work-flow: 'DT'
+ name-extension: '_DT_soak_Func'
+ test-type: 'Functional'
test-repo: 'voltha-system-tests'
- profile: '1T8GEM-unencrypted'
- power-switch: True
- pipeline-script: 'voltha-dt-physical-functional-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.8/voltha-physical-soak-dt-tests.groovy'
+
+ # ONF Menlo Soak POD test job - voltha-2.8 branch
+ # Run failure/recovery tests every Wednesday
+ - 'build_voltha_pod_soak_test':
+ build-node: 'menlo-soak-pod'
+ config-pod: 'onf-soak-pod'
+ 'disable-job': false
+ profile: '1T8GEM'
+ branch: 'voltha-2.8'
+ release: '2.8'
+ work-flow: 'DT'
+ name-extension: '_DT_soak_Fail'
+ test-type: 'Failure'
+ test-repo: 'voltha-system-tests'
+ pipeline-script: 'voltha/voltha-2.8/voltha-physical-soak-dt-tests.groovy'
+ time-trigger: "H H * * 3"
+
+ # ONF Menlo Soak POD test job - voltha-2.8 branch
+ # Run dataplane tests every Saturday
+ - 'build_voltha_pod_soak_test':
+ build-node: 'menlo-soak-pod'
+ config-pod: 'onf-soak-pod'
+ 'disable-job': false
+ profile: '1T8GEM'
+ branch: 'voltha-2.8'
+ release: '2.8'
+ work-flow: 'DT'
+ name-extension: '_DT_soak_DP'
+ test-type: 'Dataplane'
+ test-repo: 'voltha-system-tests'
+ pipeline-script: 'voltha/voltha-2.8/voltha-physical-soak-dt-tests.groovy'
+ time-trigger: "H H * * 6"
# Berlin pod with olt/onu - master versions timer based job , two OLTs
- 'build_voltha_pod_release_timer':
@@ -396,10 +394,13 @@
name-extension: '_DT'
work-flow: 'DT'
profile: '1T8GEM'
- time: '18'
+ num-of-onos: '3'
+ num-of-atomix: '3'
+ time: '16'
# Berlin POD test job - master versions: uses tech profile on voltha branch
- 'build_voltha_pod_test':
+ timeout: 300
build-node: 'dt-berlin-community-pod'
config-pod: 'dt-berlin-pod-multi-olt'
release: 'master'
@@ -409,67 +410,7 @@
test-repo: 'voltha-system-tests'
profile: '1T8GEM'
power-switch: True
- pipeline-script: 'voltha-dt-physical-functional-tests.groovy'
-
- # Berlin pod with olt/onu - voltha-2.7 timer based job , two OLTs
- - 'build_voltha_pod_release_legacy':
- build-node: 'dt-berlin-community-pod'
- config-pod: 'dt-berlin-pod-multi-olt'
- disable-job: true
- release: '2.7'
- branch: 'voltha-2.7'
- name-extension: '_DT'
- work-flow: 'DT'
- num-of-openonu: '1'
- num-of-onos: '3'
- num-of-atomix: '3'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
- configurePod: true
- profile: '1T8GEM'
- # Update this value accordingly once the job is enabled
- time: ''
-
- # Berlin POD test job - voltha-2.7 versions: two OLTs
- - 'build_voltha_pod_test':
- build-node: 'dt-berlin-community-pod'
- config-pod: 'dt-berlin-pod-multi-olt'
- disable-job: true
- release: '2.7'
- branch: 'voltha-2.7'
- name-extension: '_DT'
- work-flow: 'DT'
- test-repo: 'voltha-system-tests'
- profile: '1T8GEM'
- power-switch: True
- pipeline-script: 'voltha-dt-physical-functional-tests.groovy'
-
- # Berlin pod with olt/onu - voltha-2.7 Default tech profile and timer based job
- - 'build_voltha_pod_release_legacy':
- build-node: 'dt-berlin-community-pod'
- config-pod: 'dt-berlin-pod'
- release: '2.7'
- branch: 'voltha-2.7'
- num-of-openonu: '1'
- num-of-onos: '3'
- num-of-atomix: '3'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
- configurePod: true
- disable-job: true
- profile: 'Default'
- # Update this value accordingly once the job is enabled
- time: ''
-
- # Berlin POD test job - released versions: uses tech profile on voltha branch
- - 'build_voltha_pod_test':
- build-node: 'dt-berlin-community-pod'
- config-pod: 'dt-berlin-pod'
- release: '2.7'
- branch: 'voltha-2.7'
- test-repo: 'voltha-system-tests'
- profile: 'Default'
- power-switch: True
+ pipeline-script: 'voltha/master/voltha-dt-physical-functional-tests.groovy'
# Berlin pod with gpon olt/onu - master 1T8GEM tech profile and timer based job
- 'build_voltha_pod_release_timer':
@@ -478,6 +419,8 @@
name-extension: '_DT'
work-flow: 'DT'
profile: '1T8GEM'
+ num-of-onos: '3'
+ num-of-atomix: '3'
time: '1'
# Berlin POD test job - master versions: uses 1T8GEM tech profile on voltha branch
@@ -491,109 +434,94 @@
test-repo: 'voltha-system-tests'
profile: '1T8GEM'
power-switch: True
- pipeline-script: 'voltha-dt-physical-functional-tests.groovy'
+ pipeline-script: 'voltha/master/voltha-dt-physical-functional-tests.groovy'
- # Berlin pod with gpon olt/onu - released 1T8GEM tech profile and timer based job
- - 'build_voltha_pod_release_legacy':
- build-node: 'dt-berlin-community-pod'
- config-pod: 'dt-berlin-pod-gpon'
- release: '2.7'
- branch: 'voltha-2.7'
- num-of-openonu: '1'
- num-of-onos: '3'
- num-of-atomix: '3'
- name-extension: '_DT'
- work-flow: 'DT'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
- configurePod: true
- profile: '1T8GEM'
- time: '13'
-
- # Berlin POD test job - released versions: uses 1T8GEM tech profile on voltha branch
- - 'build_voltha_pod_test':
- build-node: 'dt-berlin-community-pod'
- config-pod: 'dt-berlin-pod-gpon'
- name-extension: '_DT'
- work-flow: 'DT'
- release: '2.7'
- branch: 'voltha-2.7'
- test-repo: 'voltha-system-tests'
- profile: '1T8GEM'
- power-switch: True
- pipeline-script: 'voltha-dt-physical-functional-tests.groovy'
-
- # Berlin pod with gpon olt/onu - master 1T8GEM tech profile and openonu go and timer based job
+ # Berlin pod with gpon olt/onu - master 1T8GEM tech profile and timer based job
- 'build_voltha_pod_release_timer':
build-node: 'dt-berlin-community-pod'
- config-pod: 'dt-berlin-pod-openonugo'
- 'disable-job': true
- name-extension: '_DT_openonugo'
+ config-pod: 'dt-berlin-pod-gpon'
+ release: '2.8'
+ branch: 'voltha-2.8'
+ name-extension: '_DT'
work-flow: 'DT'
- configurePod: true
profile: '1T8GEM'
- # Update this value accordingly once the job is enabled
- time: ''
+ num-of-onos: '3'
+ num-of-atomix: '3'
+ time: '11'
+ pipeline-script: 'voltha/voltha-2.8/physical-build.groovy'
- # Berlin POD test job - master versions, uses 1T8GEM tech profile and openonu go on voltha branch
+ # Berlin POD test job - master versions: uses 1T8GEM tech profile on voltha branch
- 'build_voltha_pod_test':
build-node: 'dt-berlin-community-pod'
- config-pod: 'dt-berlin-pod-openonugo'
- 'disable-job': true
- name-extension: '_DT_openonugo'
+ config-pod: 'dt-berlin-pod-gpon'
+ name-extension: '_DT'
work-flow: 'DT'
- release: 'master'
- branch: 'master'
+ release: '2.8'
+ branch: 'voltha-2.8'
test-repo: 'voltha-system-tests'
profile: '1T8GEM'
power-switch: True
- pipeline-script: 'voltha-dt-physical-functional-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.8/voltha-dt-physical-functional-tests.groovy'
- # Berlin pod with olt/onu - manual test job, voltha master build job
- - 'build_pod_manual':
+ # Berlin pod with olt/onu - master versions timer based job , two OLTs
+ - 'build_voltha_pod_release_timer':
build-node: 'dt-berlin-community-pod'
- config-pod: 'dt-berlin-pod-gpon'
- release: 'master'
- branch: 'master'
- num-of-openonu: '1'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
- profile: 'Default'
-
- # Berlin pod1 test job - using voltha branch
- - 'build_pod_test':
- build-node: 'dt-berlin-community-pod'
- config-pod: 'dt-berlin-pod-gpon'
- profile: 'Default'
- branch: 'master'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-test'
-
- # Berlin pod with adtran gpon olt/onu - master 1T8GEM tech profile and timer based job
- - 'build_voltha_pod_release_legacy':
- build-node: 'dt-berlin-community-pod'
- config-pod: 'dt-berlin-pod-gpon-adtran'
- release: '2.6'
- branch: 'voltha-2.6'
- VolthaEtcdPort: 9999
+ config-pod: 'dt-berlin-pod-multi-olt'
name-extension: '_DT'
work-flow: 'DT'
+ release: '2.8'
+ branch: 'voltha-2.8'
+ profile: '1T8GEM'
+ num-of-onos: '3'
+ num-of-atomix: '3'
+ pipeline-script: 'voltha/voltha-2.8/physical-build.groovy'
+ time: '5'
+
+ # Berlin POD test job - master versions: uses tech profile on voltha branch
+ - 'build_voltha_pod_test':
+ build-node: 'dt-berlin-community-pod'
+ config-pod: 'dt-berlin-pod-multi-olt'
+ release: '2.8'
+ branch: 'voltha-2.8'
+ name-extension: '_DT'
+ work-flow: 'DT'
+ test-repo: 'voltha-system-tests'
+ profile: '1T8GEM'
+ power-switch: True
+ pipeline-script: 'voltha/voltha-2.8/voltha-dt-physical-functional-tests.groovy'
+
+ # Berlin pod with adtran gpon olt/onu - 2.8 1T8GEM tech profile and timer based job
+ - 'build_voltha_pod_release_timer':
+ build-node: 'dt-berlin-community-pod'
+ config-pod: 'dt-berlin-pod-gpon-adtran'
+ name-extension: '_DT'
+ work-flow: 'DT'
+ release: '2.8'
+ branch: 'voltha-2.8'
profile: '1T8GEM'
reinstall-olt: false
- Jenkinsfile: 'Jenkinsfile-voltha-build' # we are cloning voltha-system-test@2.6 that still has it
- openoltAdapterChart: '/home/community/adtran-2021-01-29/voltha-adapter-adtran-olt'
- time: '7'
+ extraHelmFlags: "--set adapter_adtran_olt.kv_store_data_prefix='service/voltha/voltha_voltha' --set network.netconfUserSecret='' --set adapter_adtran_olt.topics.core_topic=voltha_voltha_rwcore"
+ num-of-onos: '3'
+ num-of-atomix: '3'
+ pipeline-script: 'voltha/voltha-2.8/physical-build.groovy'
+ VolthaEtcdPort: 9999
+ openoltAdapterChart: '/home/community/adtran-21.4-voltha-2.8/voltha-adapter-adtran-olt'
+ oltAdapterReleaseName: 'adtran-olt'
+ waitTimerForOltUp: 420
+ time: '21'
- # Berlin POD adtran test job - master versions: uses 1T8GEM tech profile on voltha branch
+ # Berlin POD adtran test job - 2.8 versions: uses 1T8GEM tech profile on voltha branch
- 'build_voltha_pod_test':
+ timeout: 390
build-node: 'dt-berlin-community-pod'
config-pod: 'dt-berlin-pod-gpon-adtran'
+ release: '2.8'
+ branch: 'voltha-2.8'
name-extension: '_DT'
work-flow: 'DT'
- release: '2.6'
- branch: 'voltha-2.6'
test-repo: 'voltha-system-tests'
profile: '1T8GEM'
power-switch: True
+ pipeline-script: 'voltha/voltha-2.8/voltha-dt-physical-functional-tests.groovy'
oltAdapterAppLabel: 'adapter-adtran-olt'
- pipeline-script: 'voltha-dt-physical-functional-tests.groovy'
+
diff --git a/jjb/xos-integration-tests.yaml b/jjb/xos-integration-tests.yaml
index b1cd814..dde320f 100644
--- a/jjb/xos-integration-tests.yaml
+++ b/jjb/xos-integration-tests.yaml
@@ -7,7 +7,8 @@
project-name: 'xos-integration-tests'
jobs:
- - 'data-model-scale'
+ - 'data-model-scale':
+ 'disable-job': true
- 'data-migrations'
- 'data-backup'
- 'att-wf'
@@ -15,6 +16,7 @@
- job-template:
id: 'data-model-scale'
name: 'xos-data-model-scale'
+ disabled: '{disable-job}'
description: |
<!-- Managed by Jenkins Job Builder -->
@@ -56,11 +58,6 @@
default: 'robot -d Log -T -v xos_chameleon_url:127.0.0.1 -v xos_chameleon_port:30006 -v cord_kafka:\$CORD_KAFKA_IP -v num_olts:10 -v num_onus:1 -v num_pon_ports:10 -v timeout:360s xos-scale-att-workflow.robot'
description: 'Exact command to execute the tests including arguments'
- - string:
- name: notificationEmail
- default: 'kailash@opennetworking.org, teo@opennetworking.org'
- description: ''
-
project-type: pipeline
concurrent: false
@@ -115,11 +112,6 @@
default: 'robot -d Log -T -v helm_chart:$WORKSPACE/cord/helm-charts/xos-services/simpleexampleservice xos-remove-service.robot xos-service-migrations.robot'
description: 'Exact command to execute the tests including arguments'
- - string:
- name: notificationEmail
- default: 'kailash@opennetworking.org, smbaker@opennetworking.org'
- description: ''
-
project-type: pipeline
concurrent: false
@@ -174,11 +166,6 @@
default: 'robot -d Log -T xos-backup.robot'
description: 'Exact command to execute the tests including arguments'
- - string:
- name: notificationEmail
- default: 'kailash@opennetworking.org, smbaker@opennetworking.org'
- description: ''
-
project-type: pipeline
concurrent: false
@@ -243,11 +230,6 @@
default: 'robot -d Log -T -v server_ip:127.0.0.1 -v server_port:30006 -v cord_kafka:\$CORD_KAFKA_IP ATT_Workflow.robot'
description: 'Exact command to execute the tests including arguments'
- - string:
- name: notificationEmail
- default: 'kailash@opennetworking.org, teo@opennetworking.org'
- description: ''
-
- bool:
name: InstallService
default: true
diff --git a/vars/createKubernetesCluster.groovy b/vars/createKubernetesCluster.groovy
index a1a2bde..8a55cc7 100644
--- a/vars/createKubernetesCluster.groovy
+++ b/vars/createKubernetesCluster.groovy
@@ -3,6 +3,7 @@
def call(Map config) {
// note that I can't define this outside the function as there's no global scope in Groovy
def defaultConfig = [
+ branch: "master",
nodes: 1,
name: "kind-ci"
]
@@ -58,7 +59,11 @@
if [ "\$HOSTARCH" == "x86_64" ]; then
HOSTARCH="amd64"
fi
- VC_VERSION="\$(curl --fail -sSL https://api.github.com/repos/opencord/voltctl/releases/latest | jq -r .tag_name | sed -e 's/^v//g')"
+ if [ "${cfg.branch}" == "voltha-2.8" ]; then
+ VC_VERSION="1.6.11"
+ else
+ VC_VERSION="\$(curl --fail -sSL https://api.github.com/repos/opencord/voltctl/releases/latest | jq -r .tag_name | sed -e 's/^v//g')"
+ fi
curl -Lo $WORKSPACE/bin/voltctl https://github.com/opencord/voltctl/releases/download/v\$VC_VERSION/voltctl-\$VC_VERSION-\$HOSTOS-\$HOSTARCH
chmod +x $WORKSPACE/bin/voltctl
diff --git a/vars/getPodsInfo.groovy b/vars/getPodsInfo.groovy
index 28fc81d..03b17ce 100644
--- a/vars/getPodsInfo.groovy
+++ b/vars/getPodsInfo.groovy
@@ -3,11 +3,16 @@
def call(String dest) {
sh """
mkdir -p ${dest}
+ # only tee the main infos
kubectl get pods --all-namespaces -o wide | tee ${dest}/pods.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee ${dest}/pod-images.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee ${dest}/pod-imagesId.txt || true
+ helm ls --all-namespaces | tee ${dest}/helm-charts.txt
+
+ # everything else should not be dumped on the console
+ kubectl get svc --all-namespaces -o wide > ${dest}/svc.txt || true
+ kubectl get pvc --all-namespaces -o wide > ${dest}/pvcs.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq > ${dest}/pod-images.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq > ${dest}/pod-imagesId.txt || true
kubectl describe pods --all-namespaces -l app.kubernetes.io/part-of=voltha > ${dest}/voltha-pods-describe.txt
kubectl describe pods --all-namespaces -l app=onos-classic > ${dest}/onos-pods-describe.txt
- helm ls --all-namespaces | tee ${dest}/helm-charts.txt
"""
}
diff --git a/vars/getVolthaImageFlags.groovy b/vars/getVolthaImageFlags.groovy
index 8770ff2..957d586 100644
--- a/vars/getVolthaImageFlags.groovy
+++ b/vars/getVolthaImageFlags.groovy
@@ -15,11 +15,6 @@
chart = "voltha-adapter-openonu"
image = "adapter_open_onu_go"
break
- // TODO remove after 2.7
- case "voltha-openonu-adapter":
- chart = "voltha-adapter-openonu"
- image = "adapter_open_onu"
- break
// TODO end
case "voltha-openolt-adapter":
chart = "voltha-adapter-openolt"
diff --git a/vars/helmTeardown.groovy b/vars/helmTeardown.groovy
index 71fd263..6070fca 100644
--- a/vars/helmTeardown.groovy
+++ b/vars/helmTeardown.groovy
@@ -22,8 +22,8 @@
set +x
PODS=\$(kubectl get pods -n ${n} --no-headers | wc -l)
while [[ \$PODS != 0 ]]; do
- sleep 5
- PODS=\$(kubectl get pods -n ${n} --no-headers | wc -l)
+ sleep 5
+ PODS=\$(kubectl get pods -n ${n} --no-headers | wc -l)
done
"""
}
diff --git a/vars/startComponentsLogs.groovy b/vars/startComponentsLogs.groovy
new file mode 100644
index 0000000..a485be4
--- /dev/null
+++ b/vars/startComponentsLogs.groovy
@@ -0,0 +1,54 @@
+// check if kail is installed, if not installs it
+// and then uses it to collect logs on specified containers
+
+// appsToLog is a list of kubernetes labels used by kail to get the logs
+// the generated log file is named with the string after =
+// for example app=bbsim will generate a file called bbsim.log
+
+// to archive the logs use: archiveArtifacts artifacts: '${logsDir}/*.log'
+def call(Map config) {
+
+ def tagPrefix = "jenkins"
+
+ def defaultConfig = [
+ appsToLog: [
+ 'app=onos-classic',
+ 'app=adapter-open-onu',
+ 'app=adapter-open-olt',
+ 'app=rw-core',
+ 'app=ofagent',
+ 'app=bbsim',
+ 'app=radius',
+ 'app=bbsim-sadis-server',
+ 'app=onos-config-loader',
+ ],
+ logsDir: "$WORKSPACE/logs"
+ ]
+
+ if (!config) {
+ config = [:]
+ }
+
+ def cfg = defaultConfig + config
+
+ // check if kail is installed and if not installs it
+ sh """
+ if ! command -v kail &> /dev/null
+ then
+ bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/bin"
+ fi
+ """
+
+ // fi the logsDir does not exists dir() will create it
+ dir(cfg.logsDir) {
+ for(int i = 0;i<cfg.appsToLog.size();i++) {
+ def label = cfg.appsToLog[i]
+ def logFile = label.split('=')[1]
+ def tag = "${tagPrefix}-kail-${logFile}"
+ println "Starting logging process for label: ${label}"
+ sh """
+ _TAG=${tag} kail -l ${label} > ${cfg.logsDir}/${logFile}.log&
+ """
+ }
+ }
+}
\ No newline at end of file
diff --git a/vars/stopComponentsLogs.groovy b/vars/stopComponentsLogs.groovy
new file mode 100644
index 0000000..6729c37
--- /dev/null
+++ b/vars/stopComponentsLogs.groovy
@@ -0,0 +1,35 @@
+// stops all the kail processes created by startComponentsLog
+
+def call(Map config) {
+
+ def defaultConfig = [
+ logsDir: "$WORKSPACE/logs",
+ compress: false, // wether to compress the logs in a tgz file
+ ]
+
+ if (!config) {
+ config = [:]
+ }
+
+ def cfg = defaultConfig + config
+
+ def tag = "jenkins-"
+ println "Stopping all kail logging process"
+ sh """
+ P_IDS="\$(ps e -ww -A | grep "_TAG=jenkins-kail" | grep -v grep | awk '{print \$1}')"
+ if [ -n "\$P_IDS" ]; then
+ for P_ID in \$P_IDS; do
+ kill -9 \$P_ID
+ done
+ fi
+ """
+ if (cfg.compress) {
+ sh """
+ pushd ${cfg.logsDir}
+ tar czf ${cfg.logsDir}/combined.tgz *
+ rm *.log
+ popd
+ """
+
+ }
+}
\ No newline at end of file
diff --git a/vars/volthaInfraDeploy.groovy b/vars/volthaInfraDeploy.groovy
index 58bd800..be2f759 100644
--- a/vars/volthaInfraDeploy.groovy
+++ b/vars/volthaInfraDeploy.groovy
@@ -53,11 +53,14 @@
kubectl create configmap -n ${cfg.infraNamespace} kube-config "--from-file=kube_config=${kubeconfig}" || true
"""
+ // bitnamic/etch has change the replica format between the currently used 5.4.2 and the latest 6.2.5
+ // for now put both values in the extra helm chart flags
sh """
helm upgrade --install --create-namespace -n ${cfg.infraNamespace} voltha-infra ${volthaInfraChart} \
--set onos-classic.replicas=${cfg.onosReplica},onos-classic.atomix.replicas=${cfg.atomixReplica} \
--set kafka.replicaCount=${cfg.kafkaReplica},kafka.zookeeper.replicaCount=${cfg.kafkaReplica} \
--set etcd.statefulset.replicaCount=${cfg.etcdReplica} \
+ --set etcd.replicaCount=${cfg.etcdReplica} \
-f $WORKSPACE/voltha-helm-charts/examples/${cfg.workflow}-values.yaml ${cfg.extraHelmFlags}
"""
}
diff --git a/vars/volthaStackDeploy.groovy b/vars/volthaStackDeploy.groovy
index b7e43fc..ae8533c 100644
--- a/vars/volthaStackDeploy.groovy
+++ b/vars/volthaStackDeploy.groovy
@@ -10,6 +10,7 @@
workflow: "att",
extraHelmFlags: "",
localCharts: false,
+ onosReplica: 1,
]
if (!config) {
@@ -19,9 +20,11 @@
def cfg = defaultConfig + config
def volthaStackChart = "onf/voltha-stack"
+ def bbsimChart = "onf/bbsim"
if (cfg.localCharts) {
volthaStackChart = "$WORKSPACE/voltha-helm-charts/voltha-stack"
+ bbsimChart = "$WORKSPACE/voltha-helm-charts/bbsim"
sh """
pushd $WORKSPACE/voltha-helm-charts/voltha-stack
@@ -36,6 +39,7 @@
helm upgrade --install --create-namespace -n ${cfg.volthaNamespace} ${cfg.stackName} ${volthaStackChart} \
--set global.stack_name=${cfg.stackName} \
--set global.voltha_infra_name=voltha-infra \
+ --set voltha.onos_classic.replicas=${cfg.onosReplica} \
--set global.voltha_infra_namespace=${cfg.infraNamespace} \
${cfg.extraHelmFlags}
"""
@@ -51,7 +55,7 @@
def bbsimCfg = readYaml file: "$WORKSPACE/voltha-helm-charts/examples/${cfg.workflow}-values.yaml"
// NOTE we assume that the only service that needs a different s_tag is the first one in the list
bbsimCfg["servicesConfig"]["services"][0]["s_tag"] = startingStag + i
- println "Using BBSim Service config ${bbsimCfg}"
+ println "Using BBSim Service config ${bbsimCfg['servicesConfig']}"
writeYaml file: "$WORKSPACE/bbsimCfg${cfg.stackId}${i}.yaml", data: bbsimCfg
} else {
// NOTE if it's DT just copy the file over
@@ -62,7 +66,7 @@
}
sh """
- helm upgrade --install --create-namespace -n ${cfg.volthaNamespace} bbsim${i} onf/bbsim \
+ helm upgrade --install --create-namespace -n ${cfg.volthaNamespace} bbsim${i} ${bbsimChart} \
--set olt_id="${cfg.stackId}${i}" \
-f $WORKSPACE/bbsimCfg${cfg.stackId}${i}.yaml \
${cfg.extraHelmFlags}
@@ -81,8 +85,10 @@
"""
// also make sure that the ONOS config is loaded
+ // NOTE that this is only required for VOLTHA-2.8
println "Wait for ONOS Config loader to complete"
+ // NOTE that this is only required for VOLTHA-2.8,
sh """
set +x
config=\$(kubectl get jobs.batch -n ${cfg.infraNamespace} --no-headers | grep "0/" | wc -l)
@@ -91,4 +97,14 @@
config=\$(kubectl get jobs.batch -n ${cfg.infraNamespace} --no-headers | grep "0/" | wc -l)
done
"""
+ // NOTE that this is only required for VOLTHA-2.9 onwards, to wait until the pod completed,
+ //meaning ONOS fully deployed
+ sh """
+ set +x
+ config=\$(kubectl get pods -l app=onos-config-loader -n ${cfg.infraNamespace} --no-headers --field-selector=status.phase=Running | grep "0/" | wc -l)
+ while [[ \$config != 0 ]]; do
+ sleep 5
+ config=\$(kubectl get pods -l app=onos-config-loader -n ${cfg.infraNamespace} --no-headers --field-selector=status.phase=Running | grep "0/" | wc -l)
+ done
+ """
}