Merge "[VOL-3353] Trigger openonu security static checks"
diff --git a/jjb/cord-test/voltha.yaml b/jjb/cord-test/voltha.yaml
index 2d530ec..f34bb19 100644
--- a/jjb/cord-test/voltha.yaml
+++ b/jjb/cord-test/voltha.yaml
@@ -124,7 +124,7 @@
Jenkinsfile: 'Jenkinsfile-voltha-build'
configurePod: true
profile: 'TP'
- time: '7'
+ time: '22'
# flex pod1 test job - released versions: uses tech profile on voltha branch
- 'build_voltha_pod_test':
diff --git a/jjb/omec-ci.yaml b/jjb/omec-ci.yaml
index a359e90..2c85e9e 100644
--- a/jjb/omec-ci.yaml
+++ b/jjb/omec-ci.yaml
@@ -66,10 +66,10 @@
pipeline-file: 'Jenkinsfile-omec-install-ngic-rtc-vnf.groovy'
- 'omec-fossa':
pipeline-file: 'omec-fossa-scan.groovy'
- build-node: 'omec-qa'
+ build-node: 'ubuntu16.04-basebuild-1c-2g'
- 'omec-reuse':
pipeline-file: 'omec-reuse-scan.groovy'
- build-node: 'omec-qa'
+ build-node: 'ubuntu16.04-basebuild-1c-2g'
- 'docker-publish-github':
build-timeout: 30
docker-repo: 'omecproject'
@@ -103,10 +103,10 @@
pipeline-file: 'Jenkinsfile-omec-install-c3po-hss-vnf.groovy'
- 'omec-fossa':
pipeline-file: 'omec-fossa-scan.groovy'
- build-node: 'omec-qa'
+ build-node: 'ubuntu16.04-basebuild-1c-2g'
- 'omec-reuse':
pipeline-file: 'omec-reuse-scan.groovy'
- build-node: 'omec-qa'
+ build-node: 'ubuntu16.04-basebuild-1c-2g'
- 'docker-publish-github':
build-timeout: 30
docker-repo: 'omecproject'
@@ -136,10 +136,10 @@
pipeline-file: 'Jenkinsfile-omec-install-openmme-vnf.groovy'
- 'omec-fossa':
pipeline-file: 'omec-fossa-scan.groovy'
- build-node: 'omec-qa'
+ build-node: 'ubuntu16.04-basebuild-1c-2g'
- 'omec-reuse':
pipeline-file: 'omec-reuse-scan.groovy'
- build-node: 'omec-qa'
+ build-node: 'ubuntu16.04-basebuild-1c-2g'
- 'docker-publish-github':
build-timeout: 30
docker-repo: 'omecproject'
@@ -168,10 +168,10 @@
pipeline-file: 'Jenkinsfile-omec-install-Nucleus-vnf.groovy'
- 'omec-fossa':
pipeline-file: 'omec-fossa-scan.groovy'
- build-node: 'omec-qa'
+ build-node: 'ubuntu16.04-basebuild-1c-2g'
- 'omec-reuse':
pipeline-file: 'omec-reuse-scan.groovy'
- build-node: 'omec-qa'
+ build-node: 'ubuntu16.04-basebuild-1c-2g'
- 'docker-publish-github':
build-timeout: 60
docker-repo: 'omecproject'
@@ -194,10 +194,10 @@
jobs:
- 'omec-fossa':
pipeline-file: 'omec-fossa-scan.groovy'
- build-node: 'omec-qa'
+ build-node: 'ubuntu16.04-basebuild-1c-2g'
- 'omec-reuse':
pipeline-file: 'omec-reuse-scan.groovy'
- build-node: 'omec-qa'
+ build-node: 'ubuntu16.04-basebuild-1c-2g'
# for ignite
- project:
@@ -215,10 +215,10 @@
jobs:
- 'omec-fossa':
pipeline-file: 'omec-fossa-scan.groovy'
- build-node: 'omec-qa'
+ build-node: 'ubuntu16.04-basebuild-1c-2g'
- 'omec-reuse':
pipeline-file: 'omec-reuse-scan.groovy'
- build-node: 'omec-qa'
+ build-node: 'ubuntu16.04-basebuild-1c-2g'
# for upf-epc
- project:
@@ -234,10 +234,10 @@
jobs:
- 'omec-fossa':
pipeline-file: 'omec-fossa-scan.groovy'
- build-node: 'omec-qa'
+ build-node: 'ubuntu16.04-basebuild-1c-2g'
- 'omec-reuse':
pipeline-file: 'omec-reuse-scan.groovy'
- build-node: 'omec-qa'
+ build-node: 'ubuntu16.04-basebuild-1c-2g'
- 'docker-publish-github':
build-timeout: 30
docker-repo: 'omecproject'
diff --git a/jjb/pipeline/omec-fossa-scan.groovy b/jjb/pipeline/omec-fossa-scan.groovy
index 0c415ac..fa82b84 100644
--- a/jjb/pipeline/omec-fossa-scan.groovy
+++ b/jjb/pipeline/omec-fossa-scan.groovy
@@ -19,7 +19,7 @@
agent {
docker {
- image "fossa-verify:latest"
+ image "omecproject/fossa-verify:latest"
label "${params.buildNode}"
}
}
diff --git a/jjb/pipeline/omec-reuse-scan.groovy b/jjb/pipeline/omec-reuse-scan.groovy
index 605f285..03b19e8 100644
--- a/jjb/pipeline/omec-reuse-scan.groovy
+++ b/jjb/pipeline/omec-reuse-scan.groovy
@@ -19,7 +19,7 @@
agent {
docker {
- image "reuse-verify:latest"
+ image "omecproject/reuse-verify:latest"
label "${params.buildNode}"
}
}
diff --git a/jjb/pipeline/voltha-dt-physical-build-and-tests.groovy b/jjb/pipeline/voltha-dt-physical-build-and-tests.groovy
new file mode 100644
index 0000000..da7bb3f
--- /dev/null
+++ b/jjb/pipeline/voltha-dt-physical-build-and-tests.groovy
@@ -0,0 +1,440 @@
+// Copyright 2019-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// deploy VOLTHA built from patchset on a physical pod and run e2e test
+// uses kind-voltha to deploy voltha-2.X
+
+// Need this so that deployment_config has global scope when it's read later
+deployment_config = null
+localDeploymentConfigFile = null
+localKindVolthaValuesFile = null
+localSadisConfigFile = null
+
+// The pipeline assumes these variables are always defined
+if ( params.manualBranch != "" ) {
+ GERRIT_EVENT_COMMENT_TEXT = ""
+ GERRIT_PROJECT = ""
+ GERRIT_BRANCH = "${params.manualBranch}"
+ GERRIT_CHANGE_NUMBER = ""
+ GERRIT_PATCHSET_NUMBER = ""
+}
+
+pipeline {
+
+ /* no label, executor is determined by JJB */
+ agent {
+ label "${params.buildNode}"
+ }
+ options {
+ timeout(time: 120, unit: 'MINUTES')
+ }
+
+ environment {
+ KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
+ VOLTCONFIG="$HOME/.volt/config-minimal"
+ PATH="$WORKSPACE/voltha/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ TYPE="minimal"
+ FANCY=0
+ //VOL-2194 ONOS SSH and REST ports hardcoded to 30115/30120 in tests
+ ONOS_SSH_PORT=30115
+ ONOS_API_PORT=30120
+ }
+
+ stages {
+ stage ('Initialize') {
+ steps {
+ sh returnStdout: false, script: """
+ test -e $WORKSPACE/voltha/kind-voltha/voltha && cd $WORKSPACE/voltha/kind-voltha && ./voltha down
+ cd $WORKSPACE
+ rm -rf $WORKSPACE/*
+ """
+ script {
+ if (env.configRepo && ! env.localConfigDir) {
+ env.localConfigDir = "$WORKSPACE"
+ sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/${configRepo}"
+ }
+ localDeploymentConfigFile = "${env.localConfigDir}/${params.deploymentConfigFile}"
+ localKindVolthaValuesFile = "${env.localConfigDir}/${params.kindVolthaValuesFile}"
+ localSadisConfigFile = "${env.localConfigDir}/${params.sadisConfigFile}"
+ }
+ }
+ }
+
+ stage('Repo') {
+ steps {
+ checkout(changelog: true,
+ poll: false,
+ scm: [$class: 'RepoScm',
+ manifestRepositoryUrl: "${params.manifestUrl}",
+ manifestBranch: "${params.branch}",
+ currentBranch: true,
+ destinationDir: 'voltha',
+ forceSync: true,
+ resetFirst: true,
+ quiet: true,
+ jobs: 4,
+ showAllChanges: true]
+ )
+ }
+ }
+
+ stage('Get Patch') {
+ when {
+ expression { params.manualBranch == "" }
+ }
+ steps {
+ sh returnStdout: false, script: """
+ cd voltha
+ repo download "${gerritProject}" "${gerritChangeNumber}/${gerritPatchsetNumber}"
+ """
+ }
+ }
+
+ stage('Check config files') {
+ steps {
+ script {
+ try {
+ deployment_config = readYaml file: "${localDeploymentConfigFile}"
+ } catch (err) {
+ echo "Error reading ${localDeploymentConfigFile}"
+ throw err
+ }
+ sh returnStdout: false, script: """
+ if [ ! -e ${localKindVolthaValuesFile} ]; then echo "${localKindVolthaValuesFile} not found"; exit 1; fi
+ if [ ! -e ${localSadisConfigFile} ]; then echo "${localSadisConfigFile} not found"; exit 1; fi
+ """
+ }
+ }
+ }
+
+ stage('Create KinD Cluster') {
+ steps {
+ sh returnStdout: false, script: """
+ if [ "${branch}" != "master" ]; then
+ echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
+ source "$WORKSPACE/voltha/kind-voltha/releases/${branch}"
+ else
+ echo "on master, using default settings for kind-voltha"
+ fi
+
+ cd $WORKSPACE/voltha/kind-voltha/
+ JUST_K8S=y ./voltha up
+ """
+ }
+ }
+
+ stage('Build and Push Images') {
+ when {
+ expression { params.manualBranch == "" }
+ }
+ steps {
+ sh returnStdout: false, script: """
+
+ if [ "${branch}" != "master" ]; then
+ echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
+ source "$WORKSPACE/voltha/kind-voltha/releases/${branch}"
+ else
+ echo "on master, using default settings for kind-voltha"
+ fi
+
+ if ! [[ "${gerritProject}" =~ ^(voltha-system-tests|kind-voltha|voltha-helm-charts)\$ ]]; then
+ make -C $WORKSPACE/voltha/${gerritProject} DOCKER_REPOSITORY=voltha/ DOCKER_TAG=citest docker-build
+ docker images | grep citest
+ for image in \$(docker images -f "reference=*/*citest" --format "{{.Repository}}")
+ do
+ echo "Pushing \$image to nodes"
+ kind load docker-image \$image:citest --name voltha-\$TYPE --nodes voltha-\$TYPE-worker,voltha-\$TYPE-worker2
+ docker rmi \$image:citest \$image:latest || true
+ done
+ fi
+ """
+ }
+ }
+
+ stage('Deploy Voltha') {
+ environment {
+ WITH_RADIUS="no"
+ WITH_EAPOL="no"
+ WITH_DHCP="no"
+ WITH_IGMP="no"
+ CONFIG_SADIS="no"
+ WITH_SIM_ADAPTERS="no"
+ DEPLOY_K8S="no"
+ VOLTHA_LOG_LEVEL="DEBUG"
+ }
+ steps {
+ script {
+ sh returnStdout: false, script: """
+ if [ "${branch}" != "master" ]; then
+ echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
+ source "$WORKSPACE/voltha/kind-voltha/releases/${branch}"
+ else
+ echo "on master, using default settings for kind-voltha"
+ fi
+
+ export EXTRA_HELM_FLAGS+='--set log_agent.enabled=False -f ${localKindVolthaValuesFile} '
+
+ IMAGES=""
+ if [ "${gerritProject}" = "voltha-go" ]; then
+ IMAGES="rw_core ro_core "
+ elif [ "${gerritProject}" = "ofagent-py" ]; then
+ IMAGES="ofagent "
+ elif [ "${gerritProject}" = "voltha-onos" ]; then
+ IMAGES="onos "
+ elif [ "${gerritProject}" = "voltha-openolt-adapter" ]; then
+ IMAGES="adapter_open_olt "
+ elif [ "${gerritProject}" = "voltha-openonu-adapter" ]; then
+ IMAGES="adapter_open_onu "
+ elif [ "${gerritProject}" = "voltha-api-server" ]; then
+ IMAGES="afrouter afrouterd "
+ else
+ echo "No images to push"
+ fi
+
+ for I in \$IMAGES
+ do
+ EXTRA_HELM_FLAGS+="--set images.\$I.tag=citest,images.\$I.pullPolicy=Never "
+ done
+
+ if [ "${gerritProject}" = "voltha-helm-charts" ]; then
+ export CHART_PATH=$WORKSPACE/voltha/voltha-helm-charts
+ export VOLTHA_CHART=\$CHART_PATH/voltha
+ export VOLTHA_ADAPTER_OPEN_OLT_CHART=\$CHART_PATH/voltha-adapter-openolt
+ export VOLTHA_ADAPTER_OPEN_ONU_CHART=\$CHART_PATH/voltha-adapter-openonu
+ helm dep update \$VOLTHA_CHART
+ helm dep update \$VOLTHA_ADAPTER_OPEN_OLT_CHART
+ helm dep update \$VOLTHA_ADAPTER_OPEN_ONU_CHART
+ fi
+
+ cd $WORKSPACE/voltha/kind-voltha/
+ echo \$EXTRA_HELM_FLAGS
+ kail -n voltha -n default > $WORKSPACE/onos-voltha-combined.log &
+ ./voltha up
+
+ set +e
+
+ # Remove noise from voltha-core logs
+ voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
+ voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
+ # Remove noise from openolt logs
+ voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
+ voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
+ voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
+ """
+ }
+ }
+ }
+
+ stage('Deploy Kafka Dump Chart') {
+ steps {
+ script {
+ sh returnStdout: false, script: """
+ helm repo add cord https://charts.opencord.org
+ helm repo update
+ if helm version -c --short|grep v2 -q; then
+ helm install -n voltha-kafka-dump cord/voltha-kafka-dump
+ else
+ helm install voltha-kafka-dump cord/voltha-kafka-dump
+ fi
+ """
+ }
+ }
+ }
+
+ stage('Push Tech-Profile') {
+ when {
+ expression { params.profile != "Default" }
+ }
+ steps {
+ sh returnStdout: false, script: """
+ etcd_container=\$(kubectl get pods -n voltha | grep voltha-etcd-cluster | awk 'NR==1{print \$1}')
+ kubectl cp $WORKSPACE/voltha/voltha-system-tests/tests/data/TechProfile-${profile}.json voltha/\$etcd_container:/tmp/flexpod.json
+ kubectl exec -it \$etcd_container -n voltha -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/XGS-PON/64'
+ """
+ }
+ }
+
+ stage('Push Sadis-config') {
+ steps {
+ sh returnStdout: false, script: """
+ curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:$ONOS_API_PORT/onos/v1/network/configuration --data @${localSadisConfigFile}
+ """
+ }
+ }
+
+ stage('Reinstall OLT software') {
+ when {
+ expression { params.reinstallOlt }
+ }
+ steps {
+ script {
+ deployment_config.olts.each { olt ->
+ sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --remove asfvolt16 && dpkg --purge asfvolt16'"
+ waitUntil {
+ olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
+ return olt_sw_present.toInteger() == 0
+ }
+ if ( params.branch == 'voltha-2.3' ) {
+ oltDebVersion = oltDebVersionVoltha23
+ } else {
+ oltDebVersion = oltDebVersionMaster
+ }
+ sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --install ${oltDebVersion}'"
+ waitUntil {
+ olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
+ return olt_sw_present.toInteger() == 1
+ }
+ if ( olt.fortygig ) {
+ // If the OLT is connected to a 40G switch interface, set the NNI port to be downgraded
+ sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'echo port ce128 sp=40000 >> /broadcom/qax.soc ; /opt/bcm68620/svk_init.sh'"
+ }
+ }
+ }
+ }
+ }
+
+ stage('Restart OLT processes') {
+ steps {
+ script {
+ deployment_config.olts.each { olt ->
+ sh returnStdout: false, script: """
+ ssh-keyscan -H ${olt.ip} >> ~/.ssh/known_hosts
+ sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; reboot'
+ sleep 120
+ """
+ waitUntil {
+ onu_discovered = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'grep \"onu discover indication\" /var/log/openolt.log | wc -l'"
+ return onu_discovered.toInteger() > 0
+ }
+ }
+ }
+ }
+ }
+
+ stage('Run E2E Tests') {
+ environment {
+ ROBOT_CONFIG_FILE="${localDeploymentConfigFile}"
+ ROBOT_MISC_ARGS="${params.extraRobotArgs} --removekeywords wuks -d $WORKSPACE/RobotLogs -v container_log_dir:$WORKSPACE "
+ ROBOT_FILE="Voltha_DT_PODTests.robot"
+ }
+ steps {
+ sh returnStdout: false, script: """
+ cd voltha
+ mkdir -p $WORKSPACE/RobotLogs
+
+ # If the Gerrit comment contains a line with "functional tests" then run the full
+ # functional test suite. This covers tests tagged either 'sanity' or 'functional'.
+ # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
+ REGEX="functional tests"
+ if [[ "$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]]; then
+ ROBOT_MISC_ARGS+="-i functionalDt"
+ fi
+ # Likewise for dataplane tests
+ REGEX="dataplane tests"
+ if [[ "$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]]; then
+ ROBOT_MISC_ARGS+="-i dataplaneDt"
+ fi
+
+ make -C $WORKSPACE/voltha/voltha-system-tests voltha-dt-test || true
+ """
+ }
+ }
+
+ stage('After-Test Delay') {
+ when {
+ expression { params.manualBranch == "" }
+ }
+ steps {
+ sh returnStdout: false, script: """
+ # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
+ REGEX="hardware test with delay\$"
+ [[ "$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]] && sleep 10m || true
+ """
+ }
+ }
+ }
+
+ post {
+ always {
+ sh returnStdout: false, script: '''
+ set +e
+ cp $WORKSPACE/voltha/kind-voltha/install-minimal.log $WORKSPACE/
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
+ kubectl get nodes -o wide
+ kubectl get pods -o wide
+ kubectl get pods -n voltha -o wide
+
+ sync
+ pkill kail || true
+
+ ## Pull out errors from log files
+ extract_errors_go() {
+ echo
+ echo "Error summary for $1:"
+ grep $1 $WORKSPACE/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
+ echo
+ }
+
+ extract_errors_python() {
+ echo
+ echo "Error summary for $1:"
+ grep $1 $WORKSPACE/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
+ echo
+ }
+
+ extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
+ extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
+ extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
+ extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
+
+ gzip $WORKSPACE/onos-voltha-combined.log
+
+ ## collect events, the chart should be running by now
+ kubectl get pods | grep -i voltha-kafka-dump | grep -i running
+ if [[ $? == 0 ]]; then
+ kubectl exec -it `kubectl get pods | grep -i voltha-kafka-dump | grep -i running | cut -f1 -d " "` ./voltha-dump-events.sh > $WORKSPACE/voltha-events.log
+ fi
+ '''
+ script {
+ deployment_config.olts.each { olt ->
+ sh returnStdout: false, script: """
+ until sshpass -p ${olt.pass} scp ${olt.user}@${olt.ip}:/var/log/openolt.log $WORKSPACE/openolt-${olt.ip}.log
+ do
+ echo "Fetching openolt.log log failed, retrying..."
+ sleep 10
+ done
+ sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.ip}.log # Remove escape sequences
+ until sshpass -p ${olt.pass} scp ${olt.user}@${olt.ip}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.ip}.log
+ do
+ echo "Fetching dev_mgmt_daemon.log failed, retrying..."
+ sleep 10
+ done
+ sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.ip}.log # Remove escape sequences
+ """
+ }
+ }
+ step([$class: 'RobotPublisher',
+ disableArchiveOutput: false,
+ logFileName: 'RobotLogs/log*.html',
+ otherFiles: '',
+ outputFileName: 'RobotLogs/output*.xml',
+ outputPath: '.',
+ passThreshold: 100,
+ reportFileName: 'RobotLogs/report*.html',
+ unstableThreshold: 0]);
+ archiveArtifacts artifacts: '*.log,*.gz'
+ }
+ }
+}
diff --git a/jjb/pipeline/voltha-physical-build-and-tests.groovy b/jjb/pipeline/voltha-physical-build-and-tests.groovy
index 7d2bea7..5f1bfff 100644
--- a/jjb/pipeline/voltha-physical-build-and-tests.groovy
+++ b/jjb/pipeline/voltha-physical-build-and-tests.groovy
@@ -343,6 +343,11 @@
if [[ "$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]]; then
ROBOT_MISC_ARGS+="-i functional"
fi
+ # Likewise for dataplane tests
+ REGEX="dataplane tests"
+ if [[ "$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]]; then
+ ROBOT_MISC_ARGS+="-i dataplane"
+ fi
make -C $WORKSPACE/voltha/voltha-system-tests voltha-test || true
"""
diff --git a/jjb/pipeline/voltha-physical-functional-tests.groovy b/jjb/pipeline/voltha-physical-functional-tests.groovy
index c42e280..86a4db6 100644
--- a/jjb/pipeline/voltha-physical-functional-tests.groovy
+++ b/jjb/pipeline/voltha-physical-functional-tests.groovy
@@ -23,7 +23,7 @@
label "${params.buildNode}"
}
options {
- timeout(time: 180, unit: 'MINUTES')
+ timeout(time: 280, unit: 'MINUTES')
}
environment {
diff --git a/jjb/pipeline/voltha-scale-test.groovy b/jjb/pipeline/voltha-scale-test.groovy
index 0cf443c..dc4d12e 100644
--- a/jjb/pipeline/voltha-scale-test.groovy
+++ b/jjb/pipeline/voltha-scale-test.groovy
@@ -62,6 +62,8 @@
VOLTHA_BBSIM_CHART="${bbsimChart}"
VOLTHA_ADAPTER_OPEN_OLT_CHART="${openoltAdapterChart}"
VOLTHA_ADAPTER_OPEN_ONU_CHART="${openonuAdapterChart}"
+
+ APPS_TO_LOG="etcd kafka onos-onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius"
}
stages {
@@ -147,12 +149,12 @@
// includes monitoring, kafka, etcd
steps {
sh '''
- helm install kafka incubator/kafka --set replicas=3 --set persistence.enabled=false --set zookeeper.replicaCount=3 --set zookeeper.persistence.enabled=false
+ helm install kafka incubator/kafka --set replicas=${kafkaReplicas} --set persistence.enabled=false --set zookeeper.replicaCount=${kafkaReplicas} --set zookeeper.persistence.enabled=false
# the ETCD chart use "auth" for resons different than BBsim, so strip that away
ETCD_FLAGS=$(echo ${extraHelmFlags} | sed -e 's/--set auth=false / /g') | sed -e 's/--set auth=true / /g'
ETCD_FLAGS+=" --set memoryMode=${inMemoryEtcdStorage} "
- helm install -f $WORKSPACE/kind-voltha/values.yaml --set etcd.replicas=3 etcd etcd/etcd $ETCD_FLAGS
+ helm install -f $WORKSPACE/kind-voltha/values.yaml --set replicas=${etcdReplicas} etcd etcd/etcd $ETCD_FLAGS
if [ ${withMonitoring} = true ] ; then
helm install nem-monitoring cord/nem-monitoring \
@@ -231,6 +233,19 @@
_TAG=etcd-port-forward kubectl port-forward --address 0.0.0.0 -n default service/etcd $VOLTHA_ETCD_PORT:2379&
"""
}
+ sh returnStdout: false, script: '''
+ # start logging with kail
+
+ LOG_FOLDER=$WORKSPACE/logs
+ mkdir -p $LOG_FOLDER
+
+ list=($APPS_TO_LOG)
+ for app in "${list[@]}"
+ do
+ echo "Starting logs for: ${app}"
+ _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
+ done
+ '''
// bbsim-sadis server takes a while to cache the subscriber entries
// wait for that before starting the tests
sleep(120)
@@ -239,9 +254,6 @@
stage('Configuration') {
steps {
sh '''
- # Always deactivate org.opencord.kafka
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.kafka
-
#Setting link discovery
sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
@@ -253,20 +265,6 @@
sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
- if [ ${withEapol} = false ] || [ ${withFlows} = false ]; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.aaa
- fi
-
- if [ ${withDhcp} = false ] || [ ${withFlows} = false ]; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.dhcpl2relay
- fi
-
- if [ ${withIgmp} = false ] || [ ${withFlows} = false ]; then
- # FIXME will actually affected the tests only after VOL-3054 is addressed
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.igmpproxy
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.mcast
- fi
-
if [ ${withFlows} = false ]; then
sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.olt
fi
@@ -376,6 +374,21 @@
// event of a timeout in the tests
sh '''
+ # stop the kail processes
+ list=($APPS_TO_LOG)
+ for app in "${list[@]}"
+ do
+ echo "Stopping logs for: ${app}"
+ _TAG="kail-$app"
+ P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
+ if [ -n "$P_IDS" ]; then
+ echo $P_IDS
+ for P_ID in $P_IDS; do
+ kill -9 $P_ID
+ done
+ fi
+ done
+
if [ ${withPcap} = true ] ; then
# stop ofAgent tcpdump
P_ID="\$(ps e -ww -A | grep "_TAG=ofagent-tcpdump" | grep -v grep | awk '{print \$1}')"
@@ -390,7 +403,7 @@
cd voltha-system-tests
source ./vst_venv/bin/activate
- python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time.txt
+ python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time.txt || true
cat $WORKSPACE/execution-time.txt
'''
sh '''
@@ -432,8 +445,6 @@
unstableThreshold: 0]);
// get all the logs from kubernetes PODs
sh returnStdout: false, script: '''
- LOG_FOLDER=$WORKSPACE/logs
- mkdir -p $LOG_FOLDER
# store information on running charts
helm ls > $LOG_FOLDER/helm-list.txt || true
@@ -443,18 +454,6 @@
kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
- # log in individual files for all the container that match the selector app=$APP_TO_LOG
- APPS_TO_LOG=(etcd kafka onos adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius)
- for app in "${APPS_TO_LOG[@]}"
- do
- echo "Getting logs for: ${app}"
- kubectl get pods -l app=${app} -o=jsonpath=\"{.items[*]['metadata.name']}\"
- printf '%s\n' $(kubectl get pods -l app=$app -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I# bash -c "kubectl logs # > $LOG_FOLDER/#.log" || true
-
- # Get the logs from the previous POD if any (useful in case of restarts)
- printf '%s\n' $(kubectl get pods -l app=$app -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I# bash -c "kubectl logs -p # > $LOG_FOLDER/#-previous.log" || true
- done
-
# copy the ONOS logs directly from the container to avoid the color codes
printf '%s\n' $(kubectl get pods -l app=onos-onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I# bash -c "kubectl cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
'''
@@ -523,10 +522,12 @@
}
// get cpu usage by container
sh '''
- cd $WORKSPACE/voltha-system-tests
- source ./vst_venv/bin/activate
- sleep 60 # we have to wait for prometheus to collect all the information
- python tests/scale/sizing.py -o $WORKSPACE/plots || true
+ if [ ${withMonitoring} = true ] ; then
+ cd $WORKSPACE/voltha-system-tests
+ source ./vst_venv/bin/activate
+ sleep 60 # we have to wait for prometheus to collect all the information
+ python tests/scale/sizing.py -o $WORKSPACE/plots || true
+ fi
'''
archiveArtifacts artifacts: 'kind-voltha/install-minimal.log,execution-time.txt,logs/*,logs/pprof/*,RobotLogs/*,plots/*.txt,plots/*.pdf,etcd-metrics/*'
}
diff --git a/jjb/verify/voltha-go.yaml b/jjb/verify/voltha-go.yaml
index 089eaa5..3ae3bc6 100644
--- a/jjb/verify/voltha-go.yaml
+++ b/jjb/verify/voltha-go.yaml
@@ -41,4 +41,4 @@
build-timeout: 30
docker-repo: 'voltha'
dependency-jobs: 'version-tag'
- extraEnvironmentVars: BUILD_PROFILED=true
+ extraEnvironmentVars: BUILD_PROFILED=true BUILD_RACE=true
diff --git a/jjb/voltha-e2e.yaml b/jjb/voltha-e2e.yaml
index 60b9506..bf2ed71 100644
--- a/jjb/voltha-e2e.yaml
+++ b/jjb/voltha-e2e.yaml
@@ -88,7 +88,7 @@
name: 'verify_physical_voltha_patchset_auto'
build-node: 'tucson-pod'
config-pod: 'tucson-pod'
- oltDebVersionMaster: 'openolt_asfvolt16-2.4.9-dev-d4aeca5a2094f7dc7c519913ab1558348c546dab.deb'
+ oltDebVersionMaster: 'openolt_asfvolt16-2.4.9-dev-bal-3.4.7.5-fixed.deb'
oltDebVersionVoltha23: 'openolt_asfvolt16-2.4.9-dev-d4aeca5a2094f7dc7c519913ab1558348c546dab.deb'
profile: 'Default'
@@ -97,9 +97,26 @@
name: 'verify_physical_voltha_patchset_manual'
build-node: 'tucson-pod'
config-pod: 'tucson-pod'
- oltDebVersionMaster: 'openolt_asfvolt16-2.4.9-dev-d4aeca5a2094f7dc7c519913ab1558348c546dab.deb'
+ oltDebVersionMaster: 'openolt_asfvolt16-2.4.9-dev-bal-3.4.7.5-fixed.deb'
oltDebVersionVoltha23: 'openolt_asfvolt16-2.4.9-dev-d4aeca5a2094f7dc7c519913ab1558348c546dab.deb'
profile: 'Default'
+ trigger-string: 'hardware test'
+ default-test-args: '-i sanityORDeleteOLT -X'
+
+
+ # Per-patchset Pod builds on Tucson pod
+ - 'verify_physical_voltha_patchset_manual':
+ name: 'verify_physical_voltha_patchset_manual_DT'
+ build-node: 'tucson-pod'
+ config-pod: 'tucson-pod-DT'
+ oltDebVersionMaster: 'openolt_asfvolt16-2.4.9-dev-bal-3.4.7.5-fixed.deb'
+ oltDebVersionVoltha23: 'openolt_asfvolt16-2.4.9-dev-d4aeca5a2094f7dc7c519913ab1558348c546dab.deb'
+ profile: 'Default'
+ pipeline-script: 'voltha-dt-physical-build-and-tests.groovy'
+ trigger-string: 'DT hardware test'
+ default-test-args: '-i sanityDt -X'
+
+
# Manual build job for Tucson pod
# Allow local testing without disrupting above job
@@ -108,7 +125,7 @@
build-node: 'tucson-pod'
config-pod: 'tucson-pod'
manualBranch: 'master'
- oltDebVersionMaster: 'openolt_asfvolt16-2.4.9-dev-d4aeca5a2094f7dc7c519913ab1558348c546dab.deb'
+ oltDebVersionMaster: 'openolt_asfvolt16-2.4.9-dev-bal-3.4.7.5-fixed.deb'
oltDebVersionVoltha23: 'openolt_asfvolt16-2.4.9-dev-d4aeca5a2094f7dc7c519913ab1558348c546dab.deb'
profile: 'Default'
@@ -306,6 +323,7 @@
Copyright (c) 2019 Open Networking Foundation (ONF)
sandbox: true
pipeline-script: 'voltha-physical-build-and-tests.groovy'
+ default-test-args: '-i sanityORDeleteOLT -X'
properties:
- cord-infra-properties:
@@ -409,7 +427,7 @@
- string:
name: extraRobotArgs
- default: '-i sanityORDeleteOLT -X'
+ default: '{default-test-args}'
description: 'Arguments to pass to robot'
project-type: pipeline
@@ -448,6 +466,8 @@
Copyright (c) 2019 Open Networking Foundation (ONF)
sandbox: true
pipeline-script: 'voltha-physical-build-and-tests.groovy'
+ trigger-string: 'hardware test'
+ default-test-args: '-i sanityORDeleteOLT -X'
properties:
- cord-infra-properties:
@@ -551,7 +571,7 @@
- string:
name: extraRobotArgs
- default: '-i sanityORDeleteOLT -X'
+ default: '{default-test-args}'
description: 'Arguments to pass to robot'
project-type: pipeline
@@ -564,14 +584,14 @@
server-name: '{gerrit-server-name}'
dependency-jobs: '{dependency-jobs}'
silent-start: false
- successful-message: "PASSED hardware test"
- failure-message: "FAILED hardware test"
- unstable-message: "UNSTABLE hardware test"
+ successful-message: "PASSED {trigger-string}"
+ failure-message: "FAILED {trigger-string}"
+ unstable-message: "UNSTABLE {trigger-string}"
trigger-on:
- comment-added-contains-event:
- comment-contains-value: '^hardware test$'
+ comment-contains-value: '^{trigger-string}$'
- comment-added-contains-event:
- comment-contains-value: '^hardware test with delay$'
+ comment-contains-value: '^{trigger-string} with delay$'
projects:
- project-compare-type: REG_EXP
project-pattern: '^(voltha-go|voltha-openolt-adapter|voltha-openonu-adapter|voltha-api-server|voltha-system-tests|ofagent-py|ofagent-go|voltha-onos|kind-voltha|voltha-helm-charts)$'
diff --git a/jjb/voltha-scale.yaml b/jjb/voltha-scale.yaml
index 9548e35..b6ce923 100644
--- a/jjb/voltha-scale.yaml
+++ b/jjb/voltha-scale.yaml
@@ -77,6 +77,7 @@
onosReplicas: 3
atomixReplicas: 3
extraHelmFlags: '--set defaults.rw_core.timeout=30s '
+ withPcap: true
# jobs for DT with 512 ONUs with a 8 openonu-adapters and clustered ONOS.
- 'voltha-scale-measurements':
@@ -195,6 +196,7 @@
openoltAdapterImg: ''
openonuAdapterImg: ''
onosImg: ''
+ withPcap: true
- 'voltha-scale-measurements':
name: 'voltha-scale-measurements-voltha-2.4-2-16-32-dt-subscribers'
@@ -341,6 +343,16 @@
description: 'How many Atomix instances to run'
- string:
+ name: kafkaReplicas
+ default: '{kafkaReplicas}'
+ description: 'How many Kafka instances to run'
+
+ - string:
+ name: etcdReplicas
+ default: '{etcdReplicas}'
+ description: 'How many ETCD instances to run'
+
+ - string:
name: onosStatInterval
default: '{onosStatInterval}'
description: 'How often ONOS should poll for ports, flows and meters'
@@ -469,6 +481,8 @@
openonuAdapterReplicas: 1
onosReplicas: 1
atomixReplicas: 0
+ kafkaReplicas: 3
+ etcdReplicas: 3
extraHelmFlags: ''
onosStatInterval: 5
volthaSystemTestsChange: ''
@@ -542,6 +556,8 @@
openonuAdapterReplicas: 1
onosReplicas: 1
atomixReplicas: 0
+ kafkaReplicas: 1
+ etcdReplicas: 1
extraHelmFlags: ''
onosStatInterval: 5
volthaSystemTestsChange: ''