Merge "[VOL-2997] - add new pipeline to perform testcases in bbsim"
diff --git a/jjb/comac-in-a-box-github.yaml b/jjb/comac-in-a-box-github.yaml
new file mode 100644
index 0000000..60eeec0
--- /dev/null
+++ b/jjb/comac-in-a-box-github.yaml
@@ -0,0 +1,63 @@
+# COMAC in a box test for the omec projects on github
+
+- job-template:
+ id: 'omec-comac-in-a-box'
+ name: 'omec_{project}_comac-in-a-box'
+ project-type: pipeline
+
+ pipeline-script: 'comac-in-a-box-github.groovy'
+
+ description: |
+ Automated build on AMI executor using {pipeline-script}. <br /><br />
+ Created from job-template {id} from ci-management/jjb/comac-in-a-box-github.yaml <br />
+ Created by Jeremy Ronquillo, jeremyr@opennetworking.org <br />
+ Copyright (c) 2020 Open Networking Foundation (ONF)
+
+ properties:
+ - cord-infra-properties:
+ build-days-to-keep: '{build-days-to-keep}'
+ artifact-num-to-keep: '{artifact-num-to-keep}'
+ - github:
+ url: 'https://github.com/{github-organization}/{project}'
+
+ wrappers:
+ - lf-infra-wrappers:
+ build-timeout: '{build-timeout}'
+ jenkins-ssh-credential: '{jenkins-ssh-credential}'
+
+ parameters:
+ - string:
+ name: buildNode
+ default: '{build-node}'
+ description: 'Name of the Jenkins executor node to run the job on'
+
+ - string:
+ name: project
+ default: '{project}'
+ description: 'Name of the project'
+
+ - string:
+ name: branch
+ default: '$ghprbTargetBranch'
+ description: 'Replace value to test manually.'
+
+ - string:
+ name: ghprbPullId
+ default: '$ghprbPullId'
+ description: 'GitHub Pull-Request builder value. Delete default value to test manually.'
+
+ - string:
+ name: ghprbActualCommit
+ default: '$ghprbActualCommit'
+ description: 'GitHub Pull-Request builder value. Delete default value to test manually.'
+
+ triggers:
+ - cord-infra-github-pr-trigger:
+ github_pr_org_list: '{obj:github_pr_org_list}'
+ github_pr_auth_id: '{github_pr_auth_id}'
+ status_context: 'CORD Jenkins - COMAC-in-a-box Verification'
+
+ concurrent: false
+
+ project-type: pipeline
+ dsl: !include-raw-escape: pipeline/{pipeline-script}
diff --git a/jjb/device-management.yaml b/jjb/device-management.yaml
new file mode 100644
index 0000000..cd8b9f6
--- /dev/null
+++ b/jjb/device-management.yaml
@@ -0,0 +1,252 @@
+---
+# device-management tests
+
+- project:
+ name: device-management-e2e
+
+ project-name: '{name}'
+
+ jobs:
+ # Per-patchset Pod builds on Tucson pod
+ - 'verify_physical_device-management_patchset_manual':
+ name: 'verify_physical_device-management_patchset_manual'
+ testvm: 'tucson-pod'
+ config-pod: 'tucson-pod'
+ branch: 'master'
+ oltDebVersion: 'openolt_asfvolt16-2.3.0-bc6e0853e0e8bf6bd7e4223d4a7ee0dd35ce634d.deb'
+ profile: 'Default'
+ withPatchset: true
+
+- job-template:
+ id: 'device-management-patch-test'
+ name: 'verify_{project}_sanity-test{name-extension}'
+ extra-helm-flags: ''
+ skip-vote: false
+
+ description: |
+ <!-- Managed by Jenkins Job Builder -->
+ Created by {id} job-template from ci-management/jjb/device-management.yaml <br /><br />
+ Validation for device-management using mock redfish servers
+
+ properties:
+ - cord-infra-properties:
+ build-days-to-keep: '{build-days-to-keep}'
+ artifact-num-to-keep: '{artifact-num-to-keep}'
+
+ wrappers:
+ - lf-infra-wrappers:
+ build-timeout: '{build-timeout}'
+ jenkins-ssh-credential: '{jenkins-ssh-credential}'
+
+ parameters:
+ - string:
+ name: buildNode
+ default: 'ubuntu16.04-basebuild-4c-8g'
+ description: 'Name of the Jenkins node to run the job on'
+
+ - string:
+ name: manifestUrl
+ default: '{gerrit-server-url}/{voltha-test-manifest-repo}'
+ description: 'URL to the repo manifest'
+
+ - string:
+ name: manifestBranch
+ default: 'master'
+ description: 'Name of the repo branch to use'
+
+ - string:
+ name: gerritProject
+ default: '$GERRIT_PROJECT'
+ description: 'Name of the Gerrit project'
+
+ - string:
+ name: gerritChangeNumber
+ default: '$GERRIT_CHANGE_NUMBER'
+ description: 'Changeset number in Gerrit'
+
+ - string:
+ name: gerritPatchsetNumber
+ default: '$GERRIT_PATCHSET_NUMBER'
+ description: 'PatchSet number in Gerrit'
+
+ - string:
+ name: extraHelmFlags
+ default: '{extra-helm-flags}'
+ description: 'Helm flags to pass to ./voltha up'
+
+ project-type: pipeline
+ concurrent: true
+
+ dsl: !include-raw-escape: pipeline/{pipeline-script}
+
+ triggers:
+ - gerrit:
+ server-name: '{gerrit-server-name}'
+ dependency-jobs: '{dependency-jobs}'
+ silent-start: true
+ trigger-on:
+ - patchset-created-event:
+ exclude-drafts: true
+ exclude-trivial-rebase: false
+ exclude-no-code-change: true
+ - draft-published-event
+ - comment-added-contains-event:
+ comment-contains-value: '(?i)^.*recheck$'
+ projects:
+ - project-compare-type: REG_EXP
+ project-pattern: '^device-management$'
+ branches:
+ - branch-compare-type: REG_EXP
+ branch-pattern: '{branch-regexp}'
+ file-paths:
+ - compare-type: REG_EXP
+ pattern: '{all-files-regexp}'
+ skip-vote:
+ successful: '{skip-vote}'
+ failed: '{skip-vote}'
+ unstable: '{skip-vote}'
+ notbuilt: '{skip-vote}'
+
+# POD Per Patchset Pipeline Jobs
+
+- job-template:
+ name: '{name}'
+ id: verify_physical_device-management_patchset_manual
+ description: |
+ <!-- Managed by Jenkins Job Builder -->
+ Automated build on POD {config-pod} using {pipeline-script} <br /><br />
+ Created from job-template {id} from ci-management/jjb/device-management.yaml <br />
+ Created by Andy Bavier, andy@opennetworking.org <br />
+ Copyright (c) 2019 Open Networking Foundation (ONF)
+ sandbox: true
+ pipeline-script: 'device-management-physical-build-and-tests.groovy'
+
+ properties:
+ - cord-infra-properties:
+ build-days-to-keep: '{build-days-to-keep}'
+ artifact-num-to-keep: '{artifact-num-to-keep}'
+
+ parameters:
+ - string:
+ name: buildNode
+ default: '{testvm}'
+ description: 'Pod management node'
+
+ - string:
+ name: manifestUrl
+ default: '{gerrit-server-url}/{voltha-test-manifest-repo}'
+ description: 'URL to the repo manifest'
+
+ - string:
+ name: manifestBranch
+ default: master
+ description: 'Name of the repo branch to use'
+
+ - string:
+ name: gerritProject
+ default: '$GERRIT_PROJECT'
+ description: 'Name of the Gerrit project'
+
+ - string:
+ name: gerritChangeNumber
+ default: '$GERRIT_CHANGE_NUMBER'
+ description: 'Changeset number in Gerrit'
+
+ - string:
+ name: gerritPatchsetNumber
+ default: '$GERRIT_PATCHSET_NUMBER'
+ description: 'PatchSet number in Gerrit'
+
+ - string:
+ name: cordRepoUrl
+ default: '{gerrit-server-url}'
+ description: 'The URL of the CORD Project repository'
+
+ - string:
+ name: podName
+ default: '{config-pod}'
+
+ - string:
+ name: deploymentConfigFile
+ default: 'pod-configs/deployment-configs/{config-pod}.yaml'
+ description: 'Path of deployment config file'
+
+ - string:
+ name: kindVolthaValuesFile
+ default: 'pod-configs/kubernetes-configs/voltha/{config-pod}.yml'
+ description: 'Path of kind-voltha values override file'
+
+ - string:
+ name: sadisConfigFile
+ default: 'voltha/voltha-system-tests/tests/data/{config-pod}-sadis.json'
+ description: 'Path of SADIS config to load'
+
+ - string:
+ name: localConfigDir
+ default: null
+ description: 'If specified, config file paths are relative to this dir; otherwise $WORKSPACE'
+
+ - string:
+ name: configRepo
+ default: 'pod-configs'
+ description: 'A repository containing the config files, will be checked out if specified'
+
+ - string:
+ name: oltDebVersion
+ default: '{oltDebVersion}'
+ description: 'OLT Software version to install'
+
+ - string:
+ name: branch
+ default: '{branch}'
+
+ - string:
+ name: profile
+ default: '{profile}'
+ description: 'Technology Profile pushed to the ETCD'
+
+ - string:
+ name: notificationEmail
+ default: 'andy@opennetworking.org'
+ description: ''
+
+ - bool:
+ name: reinstallOlt
+ default: true
+ description: "Re-install OLT software"
+
+ - bool:
+ name: withPatchset
+ default: true
+ description: "Build with Gerrit patchset"
+
+ - string:
+ name: extraRobotArgs
+ default: '-i sanity'
+ description: 'Arguments to pass to robot'
+
+ project-type: pipeline
+ concurrent: true
+
+ dsl: !include-raw-escape: pipeline/{pipeline-script}
+
+ triggers:
+ - gerrit:
+ server-name: '{gerrit-server-name}'
+ dependency-jobs: '{dependency-jobs}'
+ silent-start: false
+ successful-message: "PASSED hardware test"
+ failure-message: "FAILED hardware test"
+ unstable-message: "UNSTABLE hardware test"
+ trigger-on:
+ - comment-added-contains-event:
+ comment-contains-value: '^hardware test$'
+ - comment-added-contains-event:
+ comment-contains-value: '^hardware test with delay$'
+ projects:
+ - project-compare-type: REG_EXP
+ project-pattern: '^device-management$'
+ branches:
+ - branch-compare-type: PLAIN
+ branch-pattern: 'master'
+
diff --git a/jjb/docker-publish.yaml b/jjb/docker-publish.yaml
index f9f878a..d264eb9 100644
--- a/jjb/docker-publish.yaml
+++ b/jjb/docker-publish.yaml
@@ -72,7 +72,13 @@
default: '{maintainers}'
description: "The person that sould be notified if this job fails"
+ - string:
+ name: extraEnvironmentVars
+ default: '{extraEnvironmentVars}'
+ description: "Provide extra environment variables to the build"
+
project-type: pipeline
concurrent: true
+ extraEnvironmentVars: ""
dsl: !include-raw-escape: pipeline/docker-publish.groovy
diff --git a/jjb/omec-ci.yaml b/jjb/omec-ci.yaml
index fb0ce6c..4445543 100644
--- a/jjb/omec-ci.yaml
+++ b/jjb/omec-ci.yaml
@@ -55,6 +55,8 @@
build-timeout: 30
docker-repo: 'omecproject'
build-node: 'ubuntu16.04-basebuild-4c-8g'
+ - 'omec-comac-in-a-box':
+ build-node: 'comac_in_a_box'
# for c3po
- project:
@@ -90,7 +92,9 @@
- 'docker-publish-github':
build-timeout: 30
docker-repo: 'omecproject'
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu16.04-basebuild-8c-15g'
+ - 'omec-comac-in-a-box':
+ build-node: 'comac_in_a_box'
# for openmme
- project:
@@ -123,6 +127,8 @@
build-timeout: 30
docker-repo: 'omecproject'
build-node: 'ubuntu16.04-basebuild-1c-2g'
+ - 'omec-comac-in-a-box':
+ build-node: 'comac_in_a_box'
# for nucleus
- project:
@@ -151,6 +157,8 @@
- 'omec-reuse':
pipeline-file: 'omec-reuse-scan.groovy'
build-node: 'omec-qa'
+ - 'omec-comac-in-a-box':
+ build-node: 'comac_in_a_box'
# for freediameter
- project:
diff --git a/jjb/pipeline/comac-in-a-box-gerrit.groovy b/jjb/pipeline/comac-in-a-box-gerrit.groovy
index dc2be39..e3f20e9 100644
--- a/jjb/pipeline/comac-in-a-box-gerrit.groovy
+++ b/jjb/pipeline/comac-in-a-box-gerrit.groovy
@@ -66,11 +66,26 @@
stage ("Run COMAC-in-a-box"){
steps {
- sh label: 'Run Makefile', script: """
- cd $HOME/automation-tools/comac-in-a-box/
- sudo make reset-test
- sudo make test
- """
+ script{
+ try{
+ sh label: 'Run Makefile', script: """
+ cd $HOME/automation-tools/comac-in-a-box/
+ sudo make reset-test
+ sudo make test
+ """
+ } finally {
+ sh label: 'Archive Logs', script: '''
+ mkdir logs
+ mkdir logs/pods
+ kubectl get pods -n omec > logs/kubectl_get_pods_omec.log
+ for pod in $(kubectl get pods -n omec | awk '{print $1}' | tail -n +2)
+ do
+ kubectl logs -n omec $pod --all-containers > logs/pods/$pod.log || true
+ done
+ '''
+ archiveArtifacts artifacts: "logs/**/*.log", allowEmptyArchive: true
+ }
+ }
}
}
}
diff --git a/jjb/pipeline/comac-in-a-box-github.groovy b/jjb/pipeline/comac-in-a-box-github.groovy
new file mode 100644
index 0000000..f2791ec
--- /dev/null
+++ b/jjb/pipeline/comac-in-a-box-github.groovy
@@ -0,0 +1,157 @@
+// Copyright 2020-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// comac-in-a-box-github build+test
+// steps taken from https://guide.opencord.org/profiles/comac/install/ciab.html
+
+docker_tag = ""
+abbreviated_commit_hash = ""
+
+pipeline {
+
+ /* no label, executor is determined by JJB */
+ agent {
+ label "${params.buildNode}"
+ }
+
+ options {
+ timeout(time: 1, unit: 'HOURS')
+ }
+
+ environment {
+
+ omec_cp = "$HOME/cord/helm-charts/omec/omec-control-plane/values.yaml"
+ omec_dp = "$HOME/cord/helm-charts/omec/omec-data-plane/values.yaml"
+ }
+
+ stages {
+ stage ("Environment Setup"){
+ steps {
+ sh label: 'Clean Logs', script: """
+ rm -rf logs/
+ """
+ sh label: 'Run COMAC-in-a-box reset-test', script: """
+ echo $HOME
+ cd $HOME/automation-tools/comac-in-a-box/
+ sudo make reset-test
+ """
+ sh label: 'Cleanup Docker Images', script: '''
+ sudo docker rmi -f $(sudo docker images --format '{{.Repository}} {{.ID}}' | grep 'none' | awk '{print $2}') || true
+ sudo docker rmi -f $(sudo docker images --format '{{.Repository}}:{{.Tag}}' | grep 'openmme') || true
+ sudo docker rmi -f $(sudo docker images --format '{{.Repository}}:{{.Tag}}' | grep 'ngic') || true
+ sudo docker rmi -f $(sudo docker images --format '{{.Repository}}:{{.Tag}}' | grep 'c3po') || true
+ '''
+ sh label: 'helm-charts Repo Fresh Clone', script: """
+ cd $HOME/cord/
+ sudo rm -rf helm-charts/
+ git clone https://gerrit.opencord.org/helm-charts
+ """
+ }
+ }
+
+ stage ("Build Local Docker Image"){
+ steps {
+ script {
+ if (params.ghprbPullId == ""){
+ docker_tag = "jenkins_debug"
+ } else {
+ pull_request_num = "PR_${params.ghprbPullId}"
+ abbreviated_commit_hash = params.ghprbActualCommit.substring(0, 7)
+ docker_tag = "${params.branch}-${pull_request_num}-${abbreviated_commit_hash}"
+ }
+ }
+ sh label: 'Clone repo', script: """
+ rm -rf ${params.project}
+ if [ "${params.project}" = "c3po" ]
+ then
+ git clone https://github.com/omec-project/${params.project} --recursive
+ else
+ git clone https://github.com/omec-project/${params.project}
+ fi
+ cd ${params.project}
+ if [ ! -z "${params.ghprbPullId}" ]
+ then
+ echo "Checking out GitHub Pull Request: ${params.ghprbPullId}"
+ git fetch origin pull/${params.ghprbPullId}/head && git checkout FETCH_HEAD
+ else
+ echo "GERRIT_REFSPEC not provided. Checking out target branch."
+ git checkout ${params.branch}
+ fi
+ sudo make DOCKER_TAG=${docker_tag} docker-build
+ """
+
+ }
+ }
+
+ stage ("Change Helm-Charts Docker Tags"){
+ steps {
+ sh label: 'Change Helm-Charts Docker Tags', script: """
+ if [ "${params.project}" = "c3po" ]
+ then
+ sed -i "s;hssdb: docker.*;hssdb: \\"c3po-hssdb:${docker_tag}\\";" ${omec_cp}
+ sed -i "s;hss: .*;hss: \\"c3po-hss:${docker_tag}\\";" ${omec_cp}
+ echo "Changed hssdb and hss tag: ${docker_tag}"
+ elif [ "${params.project}" = "openmme" ]
+ then
+ sed -i "s;mme: .*;mme: \\"openmme:${docker_tag}\\";" ${omec_cp}
+ echo "Changed mme tag: ${docker_tag}"
+ elif [ "${params.project}" = "Nucleus" ]
+ then
+ sed -i "s;mme: .*;mme: \\"openmme:${docker_tag}\\";" ${omec_cp} # nucleus shares the same docker name as openmme.
+ echo "Changed mme tag: ${docker_tag}"
+ elif [ "${params.project}" = "ngic-rtc" ]
+ then
+ sed -i "s;spgwc: .*;spgwc: \\"ngic-cp:${docker_tag}\\";" ${omec_cp}
+ sed -i "s;spgwu: .*;spgwu: \\"ngic-dp:${docker_tag}-debug\\";" ${omec_dp}
+ echo "Changed spgwc and spgwu tag: ${docker_tag}"
+ else
+ echo "The project ${params.project} is not supported. Aborting job."
+ exit 1
+ fi
+
+ echo "omec_cp:"
+ cat "${omec_cp}"
+
+ echo "omec_dp:"
+ cat "${omec_dp}"
+ """
+ }
+ }
+
+ stage ("Run COMAC-in-a-box"){
+ steps {
+ script{
+ try{
+ sh label: 'Run Makefile', script: """
+ cd $HOME/automation-tools/comac-in-a-box/
+ sudo make reset-test
+ sudo make test
+ """
+ } finally {
+ sh label: 'Archive Logs', script: '''
+ mkdir logs
+ mkdir logs/pods
+ kubectl get pods -n omec > logs/kubectl_get_pods_omec.log
+ for pod in $(kubectl get pods -n omec | awk '{print $1}' | tail -n +2)
+ do
+ kubectl logs -n omec $pod --all-containers > logs/pods/$pod.log || true
+ done
+ '''
+ archiveArtifacts artifacts: "logs/**/*.log", allowEmptyArchive: true
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/jjb/pipeline/device-management-mock-tests.groovy b/jjb/pipeline/device-management-mock-tests.groovy
new file mode 100644
index 0000000..fafff45
--- /dev/null
+++ b/jjb/pipeline/device-management-mock-tests.groovy
@@ -0,0 +1,198 @@
+// Copyright 2017-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// voltha-2.x e2e tests
+// uses kind-voltha to deploy voltha-2.X
+// uses bbsim to simulate OLT/ONUs
+
+pipeline {
+
+ /* no label, executor is determined by JJB */
+ agent {
+ label "${params.buildNode}"
+ }
+ options {
+ timeout(time: 90, unit: 'MINUTES')
+ }
+ environment {
+ KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
+ VOLTCONFIG="$HOME/.volt/config-minimal"
+ PATH="$WORKSPACE/voltha/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ TYPE="minimal"
+ FANCY=0
+ WITH_SIM_ADAPTERS="n"
+ WITH_RADIUS="y"
+ WITH_BBSIM="y"
+ DEPLOY_K8S="y"
+ VOLTHA_LOG_LEVEL="DEBUG"
+ CONFIG_SADIS="n"
+ ROBOT_MISC_ARGS="-d $WORKSPACE/RobotLogs"
+ }
+
+ stages {
+
+ stage('Repo') {
+ steps {
+ step([$class: 'WsCleanup'])
+ checkout(changelog: false, \
+ poll: false,
+ scm: [$class: 'RepoScm', \
+ manifestRepositoryUrl: "${params.manifestUrl}", \
+ manifestBranch: "${params.manifestBranch}", \
+ currentBranch: true, \
+ destinationDir: 'voltha', \
+ forceSync: true,
+ resetFirst: true, \
+ quiet: true, \
+ jobs: 4, \
+ showAllChanges: true] \
+ )
+ }
+ }
+ stage('Patch') {
+ steps {
+ sh """
+ pushd $WORKSPACE/
+ echo "${gerritProject}" "${gerritChangeNumber}" "${gerritPatchsetNumber}"
+ echo "${GERRIT_REFSPEC}"
+ git clone https://gerrit.opencord.org/${gerritProject}
+ cd "${gerritProject}"
+ git fetch https://gerrit.opencord.org/${gerritProject} "${GERRIT_REFSPEC}" && git checkout FETCH_HEAD
+ popd
+ """
+ }
+ }
+ stage('Create K8s Cluster') {
+ steps {
+ sh """
+ cd $WORKSPACE/voltha/kind-voltha/
+ JUST_K8S=y ./voltha up
+ bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/voltha/kind-voltha/bin"
+ """
+ }
+ }
+
+ stage('Build Redfish Importer Image') {
+ steps {
+ sh """
+ make -C $WORKSPACE/device-management/\$1 DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build-importer
+ """
+ }
+ }
+
+ stage('Build demo_test Image') {
+ steps {
+ sh """
+ make -C $WORKSPACE/device-management/\$1/demo_test DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build
+ """
+ }
+ }
+
+ stage('Build mock-redfish-server Image') {
+ steps {
+ sh """
+ make -C $WORKSPACE/device-management/\$1/mock-redfish-server DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build
+ """
+ }
+ }
+
+ stage('Push Images') {
+ steps {
+ sh '''
+ docker images | grep citest
+ for image in \$(docker images -f "reference=*/*citest" --format "{{.Repository}}"); do echo "Pushing \$image to nodes"; kind load docker-image \$image:citest --name voltha-\$TYPE --nodes voltha-\$TYPE-worker,voltha-\$TYPE-worker2; done
+ '''
+ }
+ }
+ stage('Deploy Voltha') {
+ steps {
+ sh '''
+ export EXTRA_HELM_FLAGS="--set log_agent.enabled=False ${extraHelmFlags} "
+
+ cd $WORKSPACE/voltha/kind-voltha/
+ echo \$EXTRA_HELM_FLAGS
+ kail -n voltha -n default > $WORKSPACE/onos-voltha-combined.log &
+ ./voltha up
+ '''
+ }
+ }
+
+ stage('Run E2E Tests') {
+ steps {
+ sh '''
+ mkdir -p $WORKSPACE/RobotLogs
+
+ # tell the kubernetes script to use images tagged citest and pullPolicy:Never
+ sed -i 's/master/citest/g' $WORKSPACE/device-management/kubernetes/deploy-redfish-importer.yaml
+ sed -i 's/imagePullPolicy: Always/imagePullPolicy: Never/g' $WORKSPACE/device-management/kubernetes/deploy-redfish-importer.yaml
+ make -C $WORKSPACE/device-management functional-mock-test || true
+ '''
+ }
+ }
+ }
+
+ post {
+ always {
+ sh '''
+ set +e
+ cp $WORKSPACE/voltha/kind-voltha/install-minimal.log $WORKSPACE/
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
+ kubectl get nodes -o wide
+ kubectl get pods -o wide
+ kubectl get pods -n voltha -o wide
+
+ sync
+ pkill kail || true
+ md5sum $WORKSPACE/voltha/kind-voltha/bin/voltctl
+
+ ## Pull out errors from log files
+ extract_errors_go() {
+ echo
+ echo "Error summary for $1:"
+ grep $1 $WORKSPACE/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
+ echo
+ }
+
+ extract_errors_python() {
+ echo
+ echo "Error summary for $1:"
+ grep $1 $WORKSPACE/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
+ echo
+ }
+
+ extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
+ extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
+ extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
+ extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
+
+ gzip $WORKSPACE/onos-voltha-combined.log
+
+ ## shut down kind-voltha
+ cd $WORKSPACE/voltha/kind-voltha
+ WAIT_ON_DOWN=y ./voltha down
+ '''
+ step([$class: 'RobotPublisher',
+ disableArchiveOutput: false,
+ logFileName: 'RobotLogs/log*.html',
+ otherFiles: '',
+ outputFileName: 'RobotLogs/output*.xml',
+ outputPath: '.',
+ passThreshold: 80,
+ reportFileName: 'RobotLogs/report*.html',
+ unstableThreshold: 0]);
+ archiveArtifacts artifacts: '*.log,*.gz'
+ }
+ }
+}
diff --git a/jjb/pipeline/device-management-physical-build-and-tests.groovy b/jjb/pipeline/device-management-physical-build-and-tests.groovy
new file mode 100644
index 0000000..1a50e9e
--- /dev/null
+++ b/jjb/pipeline/device-management-physical-build-and-tests.groovy
@@ -0,0 +1,345 @@
+// Copyright 2019-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// deploy VOLTHA built from patchset on a physical pod and run e2e test
+// uses kind-voltha to deploy voltha-2.X
+
+// Need this so that deployment_config has global scope when it's read later
+deployment_config = null
+localDeploymentConfigFile = null
+localKindVolthaValuesFile = null
+localSadisConfigFile = null
+
+// The pipeline assumes these variables are always defined
+if ( ! params.withPatchset ) {
+ GERRIT_EVENT_COMMENT_TEXT = ""
+ GERRIT_PROJECT = ""
+ GERRIT_CHANGE_NUMBER = ""
+ GERRIT_PATCHSET_NUMBER = ""
+}
+
+pipeline {
+
+ /* no label, executor is determined by JJB */
+ agent {
+ label "${params.buildNode}"
+ }
+ options {
+ timeout(time: 90, unit: 'MINUTES')
+ }
+
+ environment {
+ KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
+ VOLTCONFIG="$HOME/.volt/config-minimal"
+ PATH="$WORKSPACE/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ TYPE="minimal"
+ FANCY=0
+ //VOL-2194 ONOS SSH and REST ports hardcoded to 30115/30120 in tests
+ ONOS_SSH_PORT=30115
+ ONOS_API_PORT=30120
+ }
+
+ stages {
+ stage ('Initialize') {
+ steps {
+ sh returnStdout: false, script: """
+ test -e $WORKSPACE/kind-voltha/voltha && cd $WORKSPACE/kind-voltha && ./voltha down
+ cd $WORKSPACE
+ rm -rf $WORKSPACE/*
+ """
+ script {
+ if (env.configRepo && ! env.localConfigDir) {
+ env.localConfigDir = "$WORKSPACE"
+ sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/${configRepo}"
+ }
+ localDeploymentConfigFile = "${env.localConfigDir}/${params.deploymentConfigFile}"
+ localKindVolthaValuesFile = "${env.localConfigDir}/${params.kindVolthaValuesFile}"
+ localSadisConfigFile = "${env.localConfigDir}/${params.sadisConfigFile}"
+ }
+ }
+ }
+
+ stage('Repo') {
+ steps {
+ checkout(changelog: true,
+ poll: false,
+ scm: [$class: 'RepoScm',
+ manifestRepositoryUrl: "${params.manifestUrl}",
+ manifestBranch: "${params.manifestBranch}",
+ currentBranch: true,
+ destinationDir: 'voltha',
+ forceSync: true,
+ resetFirst: true,
+ quiet: true,
+ jobs: 4,
+ showAllChanges: true]
+ )
+ }
+ }
+
+ stage('Get Patch') {
+ when {
+ expression { params.withPatchset }
+ }
+ steps {
+ sh returnStdout: false, script: """
+ cd voltha
+ repo download "${gerritProject}" "${gerritChangeNumber}/${gerritPatchsetNumber}"
+ """
+ }
+ }
+
+ stage('Check config files') {
+ steps {
+ script {
+ try {
+ deployment_config = readYaml file: "${localDeploymentConfigFile}"
+ } catch (err) {
+ echo "Error reading ${localDeploymentConfigFile}"
+ throw err
+ }
+ sh returnStdout: false, script: """
+ if [ ! -e ${localKindVolthaValuesFile} ]; then echo "${localKindVolthaValuesFile} not found"; exit 1; fi
+ if [ ! -e ${localSadisConfigFile} ]; then echo "${localSadisConfigFile} not found"; exit 1; fi
+ """
+ }
+ }
+ }
+
+ stage('Create KinD Cluster') {
+ steps {
+ sh returnStdout: false, script: """
+ git clone https://github.com/ciena/kind-voltha.git
+ cd kind-voltha/
+ JUST_K8S=y ./voltha up
+ """
+ }
+ }
+
+ stage('Build and Push Images') {
+ when {
+ expression { params.withPatchset }
+ }
+ steps {
+ sh returnStdout: false, script: """
+ make -C $WORKSPACE/voltha/\$1 DOCKER_REPOSITORY=voltha/ DOCKER_TAG=citest docker-build
+ docker images | grep citest
+ for image in \$(docker images -f "reference=*/*citest" --format "{{.Repository}}"); do echo "Pushing \$image to nodes"; kind load docker-image \$image:citest --name voltha-\$TYPE --nodes voltha-\$TYPE-worker,voltha-\$TYPE-worker2; done
+ """
+ }
+ }
+
+ stage('Deploy Voltha') {
+ environment {
+ WITH_SIM_ADAPTERS="n"
+ WITH_RADIUS="y"
+ DEPLOY_K8S="n"
+ VOLTHA_LOG_LEVEL="debug"
+ }
+ steps {
+ script {
+ sh returnStdout: false, script: """
+ export EXTRA_HELM_FLAGS='--set log_agent.enabled=False -f ${localKindVolthaValuesFile} '
+
+ cd $WORKSPACE/kind-voltha/
+ echo \$EXTRA_HELM_FLAGS
+ kail -n voltha -n default > $WORKSPACE/onos-voltha-combined.log &
+ ./voltha up
+ """
+ }
+ }
+ }
+
+ stage('Deploy Kafka Dump Chart') {
+ steps {
+ script {
+ sh returnStdout: false, script: """
+ helm repo add cord https://charts.opencord.org
+ helm repo update
+ helm install -n voltha-kafka-dump cord/voltha-kafka-dump
+ """
+ }
+ }
+ }
+
+ stage('Push Tech-Profile') {
+ when {
+ expression { params.profile != "Default" }
+ }
+ steps {
+ sh returnStdout: false, script: """
+ etcd_container=\$(kubectl get pods -n voltha | grep voltha-etcd-cluster | awk 'NR==1{print \$1}')
+ kubectl cp $WORKSPACE/voltha/voltha-system-tests/tests/data/TechProfile-${profile}.json voltha/\$etcd_container:/tmp/flexpod.json
+ kubectl exec -it \$etcd_container -n voltha -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/XGS-PON/64'
+ """
+ }
+ }
+
+ stage('Push Sadis-config') {
+ steps {
+ sh returnStdout: false, script: """
+ curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:$ONOS_API_PORT/onos/v1/network/configuration --data @${localSadisConfigFile}
+ """
+ }
+ }
+
+ stage('Reinstall OLT software') {
+ when {
+ expression { params.reinstallOlt }
+ }
+ steps {
+ script {
+ deployment_config.olts.each { olt ->
+ sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'service openolt stop' || true"
+ sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'killall dev_mgmt_daemon' || true"
+ sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --remove asfvolt16 && dpkg --purge asfvolt16'"
+ waitUntil {
+ olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
+ return olt_sw_present.toInteger() == 0
+ }
+ sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --install ${oltDebVersion}'"
+ waitUntil {
+ olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
+ return olt_sw_present.toInteger() == 1
+ }
+ if ( olt.fortygig ) {
+ // If the OLT is connected to a 40G switch interface, set the NNI port to be downgraded
+ sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'echo port ce128 sp=40000 >> /broadcom/qax.soc ; /opt/bcm68620/svk_init.sh'"
+ }
+ }
+ }
+ }
+ }
+
+ stage('Restart OLT processes') {
+ steps {
+ script {
+ deployment_config.olts.each { olt ->
+ sh returnStdout: false, script: """
+ ssh-keyscan -H ${olt.ip} >> ~/.ssh/known_hosts
+ sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'service openolt stop' || true
+ sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'killall dev_mgmt_daemon' || true
+ sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'rm -f /var/log/openolt.log'
+ sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'rm -f /var/log/dev_mgmt_daemon.log'
+ sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'service dev_mgmt_daemon start &'
+ sleep 5
+ sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'service openolt start &'
+ """
+ waitUntil {
+ onu_discovered = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'grep \"onu discover indication\" /var/log/openolt.log | wc -l'"
+ return onu_discovered.toInteger() > 0
+ }
+ }
+ }
+ }
+ }
+
+ stage('Run E2E Tests') {
+ environment {
+ ROBOT_CONFIG_FILE="${localDeploymentConfigFile}"
+ ROBOT_MISC_ARGS="${params.extraRobotArgs} --removekeywords wuks -d $WORKSPACE/RobotLogs -v container_log_dir:$WORKSPACE "
+ ROBOT_FILE="Voltha_PODTests.robot"
+ }
+ steps {
+ sh returnStdout: false, script: """
+ cd voltha
+ git clone -b ${branch} ${cordRepoUrl}/cord-tester
+ mkdir -p $WORKSPACE/RobotLogs
+
+ # tell the kubernetes script to use images tagged citest and pullPolicy:Never
+ sed -i 's/master/citest/g' $WORKSPACE/voltha/device-management/kubernetes/deploy-redfish-importer.yaml
+ sed -i 's/imagePullPolicy: Always/imagePullPolicy: Never/g' $WORKSPACE/voltha/device-management/kubernetes/deploy-redfish-importer.yaml
+ make -C $WORKSPACE/voltha/device-management functional-mock-test || true
+ """
+ }
+ }
+
+ stage('After-Test Delay') {
+ when {
+ expression { params.withPatchset }
+ }
+ steps {
+ sh returnStdout: false, script: """
+ # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
+ REGEX="hardware test with delay\$"
+ [[ "$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]] && sleep 10m || true
+ """
+ }
+ }
+ }
+
+ post {
+ always {
+ sh returnStdout: false, script: '''
+ set +e
+ cp kind-voltha/install-minimal.log $WORKSPACE/
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\t'}{.imageID}{'\\n'}" | sort | uniq -c
+ kubectl get nodes -o wide
+ kubectl get pods -o wide
+ kubectl get pods -n voltha -o wide
+
+ sync
+ pkill kail || true
+
+ ## Pull out errors from log files
+ extract_errors_go() {
+ echo
+ echo "Error summary for $1:"
+ grep $1 $WORKSPACE/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
+ echo
+ }
+
+ extract_errors_python() {
+ echo
+ echo "Error summary for $1:"
+ grep $1 $WORKSPACE/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
+ echo
+ }
+
+ extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
+ extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
+ extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
+ extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
+
+ gzip $WORKSPACE/onos-voltha-combined.log
+
+ ## collect events, the chart should be running by now
+ kubectl get pods | grep -i voltha-kafka-dump | grep -i running
+ if [[ $? == 0 ]]; then
+ kubectl exec -it `kubectl get pods | grep -i voltha-kafka-dump | grep -i running | cut -f1 -d " "` ./voltha-dump-events.sh > $WORKSPACE/voltha-events.log
+ fi
+ '''
+ script {
+ deployment_config.olts.each { olt ->
+ sh returnStdout: false, script: """
+ sshpass -p ${olt.pass} scp ${olt.user}@${olt.ip}:/var/log/openolt.log $WORKSPACE/openolt-${olt.ip}.log || true
+ sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.ip}.log # Remove escape sequences
+ sshpass -p ${olt.pass} scp ${olt.user}@${olt.ip}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.ip}.log || true
+ sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.ip}.log # Remove escape sequences
+ """
+ }
+ }
+ step([$class: 'RobotPublisher',
+ disableArchiveOutput: false,
+ logFileName: 'RobotLogs/log*.html',
+ otherFiles: '',
+ outputFileName: 'RobotLogs/output*.xml',
+ outputPath: '.',
+ passThreshold: 100,
+ reportFileName: 'RobotLogs/report*.html',
+ unstableThreshold: 0]);
+ archiveArtifacts artifacts: '*.log,*.gz'
+ }
+ }
+}
diff --git a/jjb/pipeline/docker-publish.groovy b/jjb/pipeline/docker-publish.groovy
index a21eda9..316d429 100644
--- a/jjb/pipeline/docker-publish.groovy
+++ b/jjb/pipeline/docker-publish.groovy
@@ -31,6 +31,7 @@
[$class: 'WipeWorkspace'],
[$class: 'RelativeTargetDirectory', relativeTargetDir: "${params.projectName}"],
[$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
+ [$class: 'SubmoduleOption', recursiveSubmodules: true],
],
])
script {
@@ -54,7 +55,7 @@
# Build w/branch
echo "Building image with branch"
- make DOCKER_TAG="$branchName" docker-build 2>&1 | tee "$WORKSPACE/docker-build.log"
+ $extraEnvironmentVars DOCKER_TAG="$branchName" make docker-build 2>&1 | tee "$WORKSPACE/docker-build.log"
# Build w/tags if they exist
if [ -n "$git_tags" ]
@@ -67,7 +68,7 @@
# remove leading 'v' on funky golang tags
clean_tag=\$(echo \$tag | sed 's/^v//g')
echo "Building image with tag: \$clean_tag (should reuse cached layers)"
- make DOCKER_TAG="\$clean_tag" docker-build
+ $extraEnvironmentVars DOCKER_TAG="\$clean_tag" make docker-build
done
fi
""")
@@ -91,7 +92,7 @@
# Push w/branch
echo "Pushing image with branch"
- make DOCKER_TAG="$branchName" docker-push 2>&1 | tee "$WORKSPACE/docker-push.log"
+ $extraEnvironmentVars DOCKER_TAG="$branchName" make docker-push 2>&1 | tee "$WORKSPACE/docker-push.log"
# Push w/tags if they exist
if [ -n "$git_tags" ]
@@ -103,7 +104,7 @@
# remove leading 'v' on funky golang tags
clean_tag=\$(echo \$tag | sed 's/^v//g')
echo "Pushing image with tag: \$clean_tag (should reuse cached layers)"
- make DOCKER_TAG="\$clean_tag" docker-push
+ $extraEnvironmentVars DOCKER_TAG="\$clean_tag" make docker-push
done
fi
""")
diff --git a/jjb/pipeline/omec-fossa-scan.groovy b/jjb/pipeline/omec-fossa-scan.groovy
index c70f3eb..c7a3f3f 100644
--- a/jjb/pipeline/omec-fossa-scan.groovy
+++ b/jjb/pipeline/omec-fossa-scan.groovy
@@ -81,6 +81,12 @@
echo "Testing project: ${params.project}"
+ if [ ! -f ".fossa.yml" ]
+ then
+ echo ".fossa.yml not found. This file is mandatory for the test to proceed."
+ exit 1
+ fi
+
echo "Run 'fossa init'"
fossa init --no-ansi --verbose
diff --git a/jjb/pipeline/omec-postmerge.groovy b/jjb/pipeline/omec-postmerge.groovy
index 5f23be4..9291f16 100644
--- a/jjb/pipeline/omec-postmerge.groovy
+++ b/jjb/pipeline/omec-postmerge.groovy
@@ -35,13 +35,17 @@
steps {
script {
abbreviated_commit_hash = commitHash.substring(0, 7)
+ tags_to_build = [ "${branchName}-latest",
+ "${branchName}-${abbreviated_commit_hash}"]
+ tags_to_build.each { tag ->
+ build job: "docker-publish-github_$repoName", parameters: [
+ string(name: 'gitUrl', value: "${repoUrl}"),
+ string(name: 'gitRef', value: "${branchName}"),
+ string(name: 'branchName', value: "${tag}"),
+ string(name: 'projectName', value: "${repoName}"),
+ ]
+ }
}
- build job: "docker-publish-github_$repoName", parameters: [
- string(name: 'gitUrl', value: "${repoUrl}"),
- string(name: 'gitRef', value: "${branchName}"),
- string(name: 'branchName', value: "${branchName}-${abbreviated_commit_hash}"),
- string(name: 'projectName', value: "${repoName}"),
- ]
}
}
diff --git a/jjb/pipeline/voltha-bbsim-tests.groovy b/jjb/pipeline/voltha-bbsim-tests.groovy
index 7dfd54e..485fdbf 100644
--- a/jjb/pipeline/voltha-bbsim-tests.groovy
+++ b/jjb/pipeline/voltha-bbsim-tests.groovy
@@ -30,14 +30,9 @@
VOLTCONFIG="$HOME/.volt/config-minimal"
PATH="$WORKSPACE/voltha/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
TYPE="minimal"
+ VOLTHA_LOG_LEVEL="DEBUG"
FANCY=0
WITH_SIM_ADAPTERS="n"
- WITH_RADIUS="y"
- WITH_BBSIM="y"
- DEPLOY_K8S="y"
- VOLTHA_LOG_LEVEL="DEBUG"
- CONFIG_SADIS="n"
- ROBOT_MISC_ARGS="-d $WORKSPACE/RobotLogs"
}
stages {
@@ -144,7 +139,11 @@
'''
}
}
- stage('Deploy Voltha') {
+
+ stage('ATT workflow') {
+ environment {
+ ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ATTWorkflow"
+ }
steps {
sh '''
if [ "${branch}" != "master" ]; then
@@ -154,6 +153,12 @@
echo "on master, using default settings for kind-voltha"
fi
+ # Workflow-specific flags
+ export WITH_RADIUS=yes
+ export WITH_BBSIM=yes
+ export DEPLOY_K8S=yes
+ export CONFIG_SADIS=no
+
export EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${extraHelmFlags} "
IMAGES=""
@@ -205,32 +210,71 @@
md5sum $WORKSPACE/voltha/kind-voltha/bin/voltctl
fi
- cd $WORKSPACE/voltha/kind-voltha/
- echo \$EXTRA_HELM_FLAGS
+ printenv
kail -n voltha -n default > $WORKSPACE/onos-voltha-combined.log &
- ./voltha up
- '''
- }
- }
- stage('Run E2E Tests') {
- steps {
- sh '''
- mkdir -p $WORKSPACE/RobotLogs
+ cd $WORKSPACE/voltha/kind-voltha/
+ ./voltha up
+
+ # minimal-env.sh contains the environment we used
+ # Save value of EXTRA_HELM_FLAGS there to use in subsequent stages
+ echo export EXTRA_HELM_FLAGS=\\"\$EXTRA_HELM_FLAGS\\" >> minimal-env.sh
+
+ mkdir -p $ROBOT_LOGS_DIR
+ export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR"
# By default, all tests tagged 'sanity' are run. This covers basic functionality
# like running through the ATT workflow for a single subscriber.
- export TEST_TAGS=sanity
+ export TARGET=sanity-single-kind
# If the Gerrit comment contains a line with "functional tests" then run the full
# functional test suite. This covers tests tagged either 'sanity' or 'functional'.
# Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
REGEX="functional tests"
if [[ "$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]]; then
- TEST_TAGS=sanityORfunctional
+ TARGET=functional-single-kind
fi
- make -C $WORKSPACE/voltha/voltha-system-tests single-kind || true
+ make -C $WORKSPACE/voltha/voltha-system-tests \$TARGET || true
+ '''
+ }
+ }
+
+ stage('DT workflow') {
+ environment {
+ ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/DTWorkflow"
+ }
+ steps {
+ sh '''
+ cd $WORKSPACE/voltha/kind-voltha/
+ source minimal-env.sh
+ WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down
+
+ # Workflow-specific flags
+ export WITH_RADIUS=no
+ export WITH_EAPOL=no
+ export WITH_DHCP=no
+ export WITH_IGMP=no
+ export CONFIG_SADIS=no
+
+ DEPLOY_K8S=n ./voltha up
+
+ mkdir -p $ROBOT_LOGS_DIR
+ export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR"
+
+ # By default, all tests tagged 'sanityDt' are run. This covers basic functionality
+ # like running through the DT workflow for a single subscriber.
+ export TARGET=sanity-kind-dt
+
+ # If the Gerrit comment contains a line with "functional tests" then run the full
+ # functional test suite. This covers tests tagged either 'sanityDt' or 'functionalDt'.
+ # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
+ REGEX="functional tests"
+ if [[ "$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]]; then
+ TARGET=functional-single-kind-dt
+ fi
+
+ make -C $WORKSPACE/voltha/voltha-system-tests \$TARGET || true
'''
}
}
@@ -272,27 +316,15 @@
extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
gzip $WORKSPACE/onos-voltha-combined.log
-
-
- ## shut down kind-voltha
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/voltha/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
-
- cd $WORKSPACE/voltha/kind-voltha
- WAIT_ON_DOWN=y ./voltha down
'''
step([$class: 'RobotPublisher',
disableArchiveOutput: false,
- logFileName: 'RobotLogs/log*.html',
+ logFileName: 'RobotLogs/*/log*.html',
otherFiles: '',
- outputFileName: 'RobotLogs/output*.xml',
+ outputFileName: 'RobotLogs/*/output*.xml',
outputPath: '.',
- passThreshold: 80,
- reportFileName: 'RobotLogs/report*.html',
+ passThreshold: 100,
+ reportFileName: 'RobotLogs/*/report*.html',
unstableThreshold: 0]);
archiveArtifacts artifacts: '*.log,*.gz'
}
diff --git a/jjb/shell/github-release.sh b/jjb/shell/github-release.sh
index 40705c6..80b346f 100644
--- a/jjb/shell/github-release.sh
+++ b/jjb/shell/github-release.sh
@@ -95,7 +95,7 @@
# create release
echo "Creating Release: $GERRIT_PROJECT - $GIT_VERSION"
- gothub release \
+ github-release release \
--user "$GITHUB_ORGANIZATION" \
--repo "$GERRIT_PROJECT" \
--tag "$GIT_VERSION" \
@@ -116,7 +116,7 @@
for rel_file in *
do
echo "Uploading file: $rel_file"
- gothub upload \
+ github-release upload \
--user "$GITHUB_ORGANIZATION" \
--repo "$GERRIT_PROJECT" \
--tag "$GIT_VERSION" \
diff --git a/jjb/verify/device-management.yaml b/jjb/verify/device-management.yaml
index 193c78c..3921ad9 100644
--- a/jjb/verify/device-management.yaml
+++ b/jjb/verify/device-management.yaml
@@ -19,6 +19,9 @@
dependency-jobs: 'verify_device-management_licensed'
- 'make-unit-test':
junit-allow-empty-results: true
+ build-node: 'ubuntu16.04-basebuild-2c-4g'
+ - 'device-management-patch-test':
+ pipeline-script: 'device-management-mock-tests.groovy'
- job-group:
name: 'publish-device-management-jobs'
diff --git a/jjb/verify/voltha-go.yaml b/jjb/verify/voltha-go.yaml
index c40242d..089eaa5 100644
--- a/jjb/verify/voltha-go.yaml
+++ b/jjb/verify/voltha-go.yaml
@@ -41,3 +41,4 @@
build-timeout: 30
docker-repo: 'voltha'
dependency-jobs: 'version-tag'
+ extraEnvironmentVars: BUILD_PROFILED=true
diff --git a/jjb/verify/voltha-openolt-adapter.yaml b/jjb/verify/voltha-openolt-adapter.yaml
index 893ec19..6f9c4c8 100644
--- a/jjb/verify/voltha-openolt-adapter.yaml
+++ b/jjb/verify/voltha-openolt-adapter.yaml
@@ -41,3 +41,4 @@
build-timeout: 30
docker-repo: 'voltha'
dependency-jobs: 'version-tag'
+ extraEnvironmentVars: BUILD_PROFILED=true
diff --git a/jjb/voltha-scale.yaml b/jjb/voltha-scale.yaml
index a1a37ef..0c03a30 100644
--- a/jjb/voltha-scale.yaml
+++ b/jjb/voltha-scale.yaml
@@ -50,6 +50,16 @@
# multi-adapter
extraHelmFlags: "--set replicas.adapter_open_onu=8"
- 'voltha-scale-measurements-periodic':
+ name: 'voltha-scale-measurements-periodic-16-32-20s'
+ build-node: 'onf-pod1-head-node'
+ time-trigger: "H H/4 * * *"
+ onuPerPon: 32
+ ponPorts: 16
+ expectedOnus: 512
+ BBSIMdelay: 20000
+ # multi-adapter
+ extraHelmFlags: "--set replicas.adapter_open_onu=8"
+ - 'voltha-scale-measurements-periodic':
name: 'voltha-scale-measurements-periodic-16-64-200ms'
build-node: 'onf-pod1-head-node'
time-trigger: "H H/4 * * *"
@@ -60,6 +70,16 @@
# multi-adapter
extraHelmFlags: "--set replicas.adapter_open_onu=8"
- 'voltha-scale-measurements-periodic':
+ name: 'voltha-scale-measurements-periodic-16-64-20s'
+ build-node: 'onf-pod1-head-node'
+ time-trigger: "H H/4 * * *"
+ onuPerPon: 64
+ ponPorts: 16
+ expectedOnus: 1024
+ BBSIMdelay: 20000
+ # multi-adapter
+ extraHelmFlags: "--set replicas.adapter_open_onu=8"
+ - 'voltha-scale-measurements-periodic':
name: 'voltha-scale-measurements-periodic-4-64-200ms'
build-node: 'onf-pod1-head-node'
time-trigger: "H H/4 * * *"
@@ -142,6 +162,7 @@
flowStatInterval: 5
portsStatInterval: 5
expectedFlows: 201
+ extraHelmFlags: "--set replicas.adapter_open_onu=2"
- 'voltha-scale-measurements':
name: 'voltha-scale-measurements-dev'
build-node: 'voltha-scale'
diff --git a/packer/provision/basebuild.sh b/packer/provision/basebuild.sh
index 41c358c..18f2d89 100644
--- a/packer/provision/basebuild.sh
+++ b/packer/provision/basebuild.sh
@@ -167,7 +167,8 @@
tox \
twine==1.15.0 \
urllib3 \
- virtualenv
+ virtualenv \
+ yamllint
# end of pip install list
# install ruby gems
@@ -192,8 +193,8 @@
go get -v github.com/t-yuki/gocover-cobertura
go get -v github.com/jstemmer/go-junit-report
- # gothub - uploader for github artifacts
- go get -v github.com/itchio/gothub
+ # github-release - uploader for github artifacts
+ go get -v github.com/github-release/github-release
# dep for go package dependencies w/versioning, version 0.5.2, adapted from:
# https://golang.github.io/dep/docs/installation.html#install-from-source
@@ -294,6 +295,14 @@
dpkg -i /tmp/pandoc.deb
rm -f /tmp/pandoc.deb
+ # install yq (YAML query)
+ YQ_VERSION="3.3.0"
+ YQ_SHA256SUM="e70e482e7ddb9cf83b52f5e83b694a19e3aaf36acf6b82512cbe66e41d569201"
+ curl -L -o /tmp/yq https://github.com/mikefarah/yq/releases/download/${YQ_VERSION}/yq_linux_amd64
+ echo "$YQ_SHA256SUM /tmp/yq" | sha256sum -c -
+ mv /tmp/yq /usr/local/bin/yq
+ chmod -R a+rx /usr/local/bin/yq
+
# remove apparmor
service apparmor stop
update-rc.d -f apparmor remove