Merge "Enabling sanity test on bbsim-sadis-server"
diff --git a/jjb/cord-test/voltha.yaml b/jjb/cord-test/voltha.yaml
index ed891f5..f122874 100644
--- a/jjb/cord-test/voltha.yaml
+++ b/jjb/cord-test/voltha.yaml
@@ -261,6 +261,36 @@
profile: '1T4GEM'
power-switch: True
+ # Flex pod with xgs-pon olt/onu - master TT workflow openonu go and timer based job
+ - 'build_voltha_pod_release_timer':
+ build-node: 'qa-testvm-pod'
+ config-pod: 'flex-ocp-cord-openonugo'
+ release: 'master'
+ branch: 'master'
+ num-of-onos: '3'
+ num-of-atomix: '3'
+ name-extension: '_TT_openonugo'
+ work-flow: 'TT'
+ test-repo: 'voltha-system-tests'
+ Jenkinsfile: 'Jenkinsfile-voltha-build'
+ configurePod: true
+ profile: 'TP'
+ time: '14'
+
+
+ # Flex POD test job - master versions, TT workflow and openonu go on voltha branch
+ - 'build_voltha_pod_test':
+ build-node: 'qa-testvm-pod'
+ config-pod: 'flex-ocp-cord-openonugo'
+ name-extension: '_TT_openonugo'
+ release: 'master'
+ branch: 'master'
+ test-repo: 'voltha-system-tests'
+ work-flow: 'TT'
+ pipeline-script: 'voltha-tt-physical-functional-tests.groovy'
+ profile: 'TP'
+ power-switch: True
+
# Menlo pod with olt/onu - Default tech profile and timer based job
- 'build_voltha_pod_release_timer':
build-node: 'menlo-demo-pod'
diff --git a/jjb/omec-ci.yaml b/jjb/omec-ci.yaml
index 5c13e45..c3097da 100644
--- a/jjb/omec-ci.yaml
+++ b/jjb/omec-ci.yaml
@@ -175,6 +175,9 @@
- 'omec-reuse':
pipeline-file: 'omec-reuse-scan.groovy'
build-node: 'ubuntu16.04-basebuild-1c-2g'
+ - 'omec-cppcheck':
+ pipeline-file: 'omec-cppcheck.groovy'
+ build-node: 'ubuntu16.04-basebuild-1c-2g'
- 'docker-publish-github':
build-timeout: 60
docker-repo: 'omecproject'
@@ -473,6 +476,65 @@
project-type: pipeline
dsl: !include-raw-escape: pipeline/{pipeline-file}
+# cppcheck
+- job-template:
+ id: 'omec-cppcheck'
+ name: 'omec_{project}-cppcheck'
+ project-type: pipeline
+
+ description: |
+ <!-- Managed by Jenkins Job Builder -->
+ Created by {id} job-template from ci-management/jjb/omec-ci.yaml<br />
+
+ properties:
+ - cord-infra-properties:
+ build-days-to-keep: '{build-days-to-keep}'
+ artifact-num-to-keep: '{artifact-num-to-keep}'
+ - github:
+ url: 'https://github.com/{github-organization}/{project}'
+
+ wrappers:
+ - lf-infra-wrappers:
+ build-timeout: '{build-timeout}'
+ jenkins-ssh-credential: '{jenkins-ssh-credential}'
+
+ parameters:
+ - string:
+ name: ghprbPullId
+ default: '$ghprbPullId'
+ description: 'Pull request number to fetch changes from. Leave blank to run manually.'
+
+ - string:
+ name: branch
+ default: '$ghprbTargetBranch'
+ description: 'Branch to run. Only used when manually run.'
+
+ - string:
+ name: buildNode
+ default: '{build-node}'
+ description: 'Name of the Jenkins executor node to run the job on'
+
+ - string:
+ name: project
+ default: '{project}'
+ description: 'Name of the project'
+
+ - string:
+ name: ghprbGhRepository
+ default: '{github-organization}/{project}'
+ description: 'Repository of the project.'
+
+ triggers:
+ - cord-infra-github-pr-trigger:
+ github_pr_org_list: '{obj:github_pr_org_list}'
+ github_pr_auth_id: '{github_pr_auth_id}'
+ status_context: 'CORD Jenkins - cppcheck Verification'
+
+ concurrent: false
+
+ project-type: pipeline
+ dsl: !include-raw-escape: pipeline/{pipeline-file}
+
# tests
- job-template:
id: 'omec-tc1'
diff --git a/jjb/pipeline/omec-cppcheck.groovy b/jjb/pipeline/omec-cppcheck.groovy
new file mode 100644
index 0000000..31d8c4d
--- /dev/null
+++ b/jjb/pipeline/omec-cppcheck.groovy
@@ -0,0 +1,86 @@
+// Copyright 2020-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// omec-reuse-scan.groovy
+// checks an omec-project repo against reuse in a docker container
+
+pipeline {
+
+ agent {
+ docker {
+ image "registry.aetherproject.org/ci/cppcheck-verify:latest"
+ label "${params.buildNode}"
+ registryUrl "https://registry.aetherproject.org/"
+ registryCredentialsId "registry.aetherproject.org"
+ }
+ }
+
+ options {
+ timeout(15)
+ }
+
+ stages {
+ stage ("Clean Workspace") {
+ steps {
+ sh 'rm -rf *'
+ }
+ }
+
+ stage ("Checkout Pull Request") {
+ when {
+ expression {return params.ghprbPullId != ""}
+ }
+ steps {
+ checkout([
+ $class: 'GitSCM',
+ userRemoteConfigs: [[ url: "https://github.com/${params.ghprbGhRepository}", refspec: "pull/${params.ghprbPullId}/head" ]],
+ extensions: [[$class: 'RelativeTargetDirectory', relativeTargetDir: "${params.project}"]],
+ ],
+ )
+ }
+ }
+
+ stage ("Checkout Repo (manual)") {
+ when {
+ expression {return params.ghprbPullId == ""}
+ }
+ steps {
+ checkout([
+ $class: 'GitSCM',
+ userRemoteConfigs: [[ url: "https://github.com/${params.ghprbGhRepository}" ]],
+ extensions: [[$class: 'RelativeTargetDirectory', relativeTargetDir: "${params.project}"]],
+ ],
+ )
+ }
+ }
+
+ stage("Run cppcheck"){
+ steps {
+ script {
+ sh """
+ cd ${params.project}
+ if [ ! -z ${params.ghprbPullId} ]
+ then
+ git checkout FETCH_HEAD
+ else
+ git checkout ${params.branch}
+ fi
+ git show
+ make cppcheck
+ """
+ }
+ }
+ }
+ }
+}
diff --git a/jjb/pipeline/voltha-scale-multi-stack.groovy b/jjb/pipeline/voltha-scale-multi-stack.groovy
index 1e00fc6..98831b7 100644
--- a/jjb/pipeline/voltha-scale-multi-stack.groovy
+++ b/jjb/pipeline/voltha-scale-multi-stack.groovy
@@ -40,11 +40,6 @@
WITH_KAFKA="yes"
WITH_ETCD="yes"
VOLTHA_ETCD_PORT=9999
-
- // VOLTHA namespaces are defined at runtime depending on the stack we're installing
- // VOLTHA_NS="default"
- // ADAPTER_NS="default"
- // BBSIM_NS="default"
INFRA_NS="infra"
// configurable options
@@ -86,9 +81,6 @@
helm repo add bbsim-sadis https://ciena.github.io/bbsim-sadis-server/charts
helm repo update
- # NOTE this is temporary, for now the bbsim-sadis-server service will be overridden and ONOS will use the new server
- kubectl delete -n infra -f $HOME/bbsim-sadis-server/deployments/bbsim-sadis-server.yaml
-
# removing ETCD port forward
P_ID="\$(ps e -ww -A | grep "_TAG=etcd-port-forward" | grep -v grep | awk '{print \$1}')"
if [ -n "\$P_ID" ]; then
@@ -244,10 +236,6 @@
script {
sh returnStdout: false, script: """
- # NOTE this is temporary, for now the bbsim-sadis-server service will be overridden and ONOS will use the new server
- helm del -n infra bbsim-sadis-server
- kubectl apply -n infra -f $HOME/bbsim-sadis-server/deployments/bbsim-sadis-server.yaml
-
# TODO this needs to be repeated per stack
# kubectl exec \$(kubectl get pods | grep -E "bbsim[0-9]" | awk 'NR==1{print \$1}') -- bbsimctl log ${logLevel.toLowerCase()} false
@@ -276,7 +264,6 @@
fi
if [ ${withPcap} = true ] && [ ${volthaStacks} -eq 1 ] ; then
- # FIXME ofagent pcap has to be replicated per stack
# Start the tcp-dump in ofagent
export OF_AGENT=\$(kubectl -n \$INFRA_NS get pods -l app=ofagent -o name)
kubectl exec \$OF_AGENT -- apk update
@@ -313,7 +300,7 @@
make vst_venv
'''
sh '''
- if [ ${withProfiling} = true ] ; then
+ if [ ${withProfiling} = true ] && [ ${volthaStacks} -eq 1 ]; then
mkdir -p $LOG_FOLDER/pprof
cat << EOF > $WORKSPACE/pprof.sh
timestamp() {
@@ -345,11 +332,10 @@
_TAG="pprof"
_TAG=$_TAG bash $WORKSPACE/pprof.sh &
+ else
+ echo "Profiling not supported for multiple VOLTHA stacks"
fi
'''
- // bbsim-sadis server takes a while to cache the subscriber entries
- // wait for that before starting the tests
- sleep(60)
}
}
stage('Run Test') {
@@ -379,7 +365,7 @@
fi
done
- if [ ${withPcap} = true ] ; then
+ if [ ${withPcap} = true ] && [ ${volthaStacks} -eq 1 ]; then
# stop ofAgent tcpdump
P_ID="\$(ps e -ww -A | grep "_TAG=ofagent-tcpdump" | grep -v grep | awk '{print \$1}')"
if [ -n "\$P_ID" ]; then
@@ -417,7 +403,7 @@
fi
'''
sh '''
- if [ ${withProfiling} = true ] ; then
+ if [ ${withProfiling} = true ] && [ ${volthaStacks} -eq 1 ]; then
_TAG="pprof"
P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
if [ -n "$P_IDS" ]; then
@@ -442,7 +428,7 @@
[file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
[file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
],
- group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus})", yaxis: 'Time (s)', useDescr: true
+ group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (Stacks: ${params.volthaStacks}, OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus})", yaxis: 'Time (s)', useDescr: true
])
step([$class: 'RobotPublisher',
disableArchiveOutput: false,
@@ -480,8 +466,8 @@
for bbsim in "\${IDS[@]}"
do
- kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl onu list > $LOG_FOLDER/\$bbsim-device-list.txt || true
- kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl service list > $LOG_FOLDER/\$bbsim-service-list.txt || true
+ kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl onu list > $LOG_FOLDER/${stack_ns}/\$bbsim-device-list.txt || true
+ kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl service list > $LOG_FOLDER/${stack_ns}/\$bbsim-service-list.txt || true
done
"""
}
@@ -532,20 +518,19 @@
for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
stack_ns="voltha"+i
voltcfg="~/.volt/config-voltha"+i
- println stack_ns
try {
sh """
- voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device list -o json > $LOG_FOLDER/device-list.json || true
- python -m json.tool $LOG_FOLDER/device-list.json > $LOG_FOLDER/voltha-devices-list.json || true
- rm $LOG_FOLDER/device-list.json || true
- voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device list > $LOG_FOLDER/voltha-devices-list.txt || true
+ voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device list -o json > $LOG_FOLDER/${stack_ns}/device-list.json || true
+ python -m json.tool $LOG_FOLDER/${stack_ns}/device-list.json > $LOG_FOLDER/${stack_ns}/voltha-devices-list.json || true
+ rm $LOG_FOLDER/${stack_ns}/device-list.json || true
+ voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device list > $LOG_FOLDER/${stack_ns}/voltha-devices-list.txt || true
DEVICE_LIST=
- printf '%s\n' \$(voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -c $HOME/.volt/config-${stack_ns}-m 8MB device flows # > $LOG_FOLDER/voltha-device-flows-#.txt" || true
- printf '%s\n' \$(voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/voltha-device-ports-#.txt" || true
+ printf '%s\n' \$(voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -c $HOME/.volt/config-${stack_ns}-m 8MB device flows # > $LOG_FOLDER/${stack_ns}/voltha-device-flows-#.txt" || true
+ printf '%s\n' \$(voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/${stack_ns}/voltha-device-ports-#.txt" || true
- printf '%s\n' \$(voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB logicaldevice flows # > $LOG_FOLDER/voltha-logicaldevice-flows-#.txt" || true
- printf '%s\n' \$(voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB logicaldevice port list # > $LOG_FOLDER/voltha-logicaldevice-ports-#.txt" || true
+ printf '%s\n' \$(voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB logicaldevice flows # > $LOG_FOLDER/${stack_ns}/voltha-logicaldevice-flows-#.txt" || true
+ printf '%s\n' \$(voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB logicaldevice port list # > $LOG_FOLDER/${stack_ns}/voltha-logicaldevice-ports-#.txt" || true
"""
} catch(e) {
sh '''
@@ -563,7 +548,7 @@
python tests/scale/sizing.py -o $WORKSPACE/plots || true
fi
'''
- archiveArtifacts artifacts: 'kind-voltha/install-*.log,execution-time.txt,logs/*,logs/pprof/*,RobotLogs/**/*,plots/*,etcd-metrics/*'
+ archiveArtifacts artifacts: 'kind-voltha/install-*.log,execution-time-*.txt,logs/*,logs/pprof/*,RobotLogs/**/*,plots/*,etcd-metrics/*'
}
}
}
diff --git a/jjb/pipeline/voltha-scale-test.groovy b/jjb/pipeline/voltha-scale-test.groovy
index 6ff2d6d..76fa65a 100644
--- a/jjb/pipeline/voltha-scale-test.groovy
+++ b/jjb/pipeline/voltha-scale-test.groovy
@@ -320,9 +320,6 @@
_TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
done
'''
- // bbsim-sadis server takes a while to cache the subscriber entries
- // wait for that before starting the tests
- sleep(120)
}
}
stage('Configuration') {
diff --git a/jjb/voltha-scale.yaml b/jjb/voltha-scale.yaml
index cffc377..70970e3 100644
--- a/jjb/voltha-scale.yaml
+++ b/jjb/voltha-scale.yaml
@@ -102,8 +102,6 @@
withDhcp: false
withIgmp: false
extraHelmFlags: '--set use_openonu_adapter_go=true'
- volthaSystemTestsChange: refs/changes/27/21627/4
- kindVolthaChange: refs/changes/85/21485/13
openonuAdapterReplicas: 1
@@ -208,14 +206,14 @@
# multi-stack jobs
- 'voltha-scale-measurements':
- name: 'voltha-scale-measurements-master-5-stacks-2-16-32-att-subscribers'
- build-node: 'onf-pod1-head-node'
+ name: 'voltha-scale-measurements-master-10-stacks-2-16-32-att-subscribers'
+ build-node: 'voltha-scale-2'
pipeline-script: 'voltha-scale-multi-stack.groovy'
- 'disable-job': true
+ 'disable-job': false
time-trigger: "H H/4 * * *"
withMonitoring: false
logLevel: WARN
- volthaStacks: 5
+ volthaStacks: 10
olts: 2
pons: 16
onus: 32
@@ -225,19 +223,18 @@
withEapol: true
withDhcp: true
withIgmp: false
- extraHelmFlags: '--set authRetry=true,dhcpRetry=true'
- volthaSystemTestsChange: refs/changes/27/21627/4
- kindVolthaChange: refs/changes/85/21485/13
+ extraHelmFlags: '--set use_openonu_adapter_go=true'
+ openonuAdapterReplicas: 1
- 'voltha-scale-measurements':
- name: 'voltha-scale-measurements-master-5-stacks-2-16-32-dt-subscribers'
- build-node: 'onf-pod1-head-node'
+ name: 'voltha-scale-measurements-master-10-stacks-2-16-32-dt-subscribers'
+ build-node: 'voltha-scale-2'
pipeline-script: 'voltha-scale-multi-stack.groovy'
- 'disable-job': true
+ 'disable-job': false
time-trigger: "H H/4 * * *"
withMonitoring: false
logLevel: WARN
- volthaStacks: 5
+ volthaStacks: 10
olts: 2
pons: 16
onus: 32
@@ -247,19 +244,18 @@
withEapol: false
withDhcp: false
withIgmp: false
- extraHelmFlags: '--set authRetry=true,dhcpRetry=true'
- volthaSystemTestsChange: refs/changes/27/21627/4
- kindVolthaChange: refs/changes/85/21485/13
+ extraHelmFlags: '--set use_openonu_adapter_go=true'
+ openonuAdapterReplicas: 1
- 'voltha-scale-measurements':
- name: 'voltha-scale-measurements-master-5-stacks-2-16-32-tt-subscribers'
- build-node: 'onf-pod1-head-node'
+ name: 'voltha-scale-measurements-master-10-stacks-2-16-32-tt-subscribers'
+ build-node: 'voltha-scale-2'
pipeline-script: 'voltha-scale-multi-stack.groovy'
- 'disable-job': true
+ 'disable-job': false
time-trigger: "H H/4 * * *"
withMonitoring: false
logLevel: WARN
- volthaStacks: 5
+ volthaStacks: 10
olts: 2
pons: 16
onus: 32
@@ -269,9 +265,8 @@
withEapol: false
withDhcp: true
withIgmp: true
- extraHelmFlags: '--set authRetry=true,dhcpRetry=true'
- volthaSystemTestsChange: refs/changes/27/21627/4
- kindVolthaChange: refs/changes/85/21485/13
+ extraHelmFlags: '--set use_openonu_adapter_go=true'
+ openonuAdapterReplicas: 1
# voltha-2.5 Jobs
- 'voltha-scale-measurements':