[VOL-4628] Added memory-leak test job
Change-Id: I936d897b17d4ebfc403e7864969a2251cd74c347
diff --git a/jjb/pipeline/voltha/master/bbsim-tests.groovy b/jjb/pipeline/voltha/master/bbsim-tests.groovy
index 527bf65..73bd509 100755
--- a/jjb/pipeline/voltha/master/bbsim-tests.groovy
+++ b/jjb/pipeline/voltha/master/bbsim-tests.groovy
@@ -42,6 +42,17 @@
}
}
}
+ stage('Deploy common infrastructure') {
+ sh '''
+ helm repo add onf https://charts.opencord.org
+ helm repo update
+ if [ ${withMonitoring} = true ] ; then
+ helm install nem-monitoring onf/nem-monitoring \
+ --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
+ --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
+ fi
+ '''
+ }
stage('Deploy Voltha') {
if (teardown) {
timeout(10) {
@@ -104,6 +115,7 @@
JENKINS_NODE_COOKIE="dontKillMe" _TAG="bbsim\${i}" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/bbsim\${i} \${bbsimDmiPortFwd}:50075; done"&
((bbsimDmiPortFwd++))
done
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG="nem-monitoring-prometheus-server" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n default svc/nem-monitoring-prometheus-server 31301:80; done"&
ps aux | grep port-forward
"""
// setting ONOS log level
@@ -125,6 +137,14 @@
}
stage('Run test ' + testTarget + ' on ' + workflow + ' workFlow') {
sh """
+ mkdir -p $WORKSPACE/voltha-pods-mem-consumption
+ cd $WORKSPACE/voltha-system-tests
+ make vst_venv
+ source ./vst_venv/bin/activate || true
+ # Collect initial memory consumption
+ python scripts/mem_consumption.py -o $WORKSPACE/voltha-pods-mem-consumption -a 0.0.0.0:31301 -n ${volthaNamespace} || true
+ """
+ sh """
mkdir -p ${logsDir}
export ROBOT_MISC_ARGS="-d ${logsDir} ${params.extraRobotArgs} "
ROBOT_MISC_ARGS+="-v ONOS_SSH_PORT:30115 -v ONOS_REST_PORT:30120 -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace} -v container_log_dir:${logsDir} -v logging:${testLogging}"
@@ -148,7 +168,13 @@
sh """
kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/${exitStatus}/voltha.log || true
"""
- archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.txt,**/*.html'
+ sh """
+ cd $WORKSPACE/voltha-system-tests
+ source ./vst_venv/bin/activate || true
+ # Collect memory consumption of voltha pods once all the tests are complete
+ python scripts/mem_consumption.py -o $WORKSPACE/voltha-pods-mem-consumption -a 0.0.0.0:31301 -n voltha || true
+ """
+ archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.txt,**/*.html,**/voltha-pods-mem-consumption/*'
sh '''
sync
pkill kail || true
diff --git a/jjb/voltha-e2e.yaml b/jjb/voltha-e2e.yaml
index 9cbca48..3e01fa2 100755
--- a/jjb/voltha-e2e.yaml
+++ b/jjb/voltha-e2e.yaml
@@ -1333,6 +1333,31 @@
teardown: true
logging: true
+ - 'voltha-periodic-test':
+ name: 'periodic-voltha-memory-leak-test-bbsim'
+ build-node: 'ubuntu18.04-basebuild-4c-8g'
+ code-branch: 'master'
+ time-trigger: "H H/23 * * *"
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
+ withMonitoring: true
+ testTargets: |
+ - target: memory-leak-test-single-kind-att
+ workflow: att
+ flags: ""
+ teardown: true
+ logging: true
+ - target: memory-leak-test-single-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ logging: true
+ - target: memory-leak-test-single-kind-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+ logging: true
+ timeout: 360
+
# ATT Per-patchset Pod builds on Tucson pod (master)
- 'verify_physical_voltha_patchset_auto':
name: 'verify_physical_voltha_patchset_auto'
@@ -1402,6 +1427,7 @@
timeout: 130
logLevel: 'INFO'
enableMacLearning: false
+ withMonitoring: false
trigger-comment: vv7CBoQQYYonvaN8xcru
time-trigger: 0 0 29 2 *
@@ -1498,6 +1524,11 @@
default: '{logLevel}'
description: 'Log level for all the components'
+ - bool:
+ name: withMonitoring
+ default: '{withMonitoring}'
+ description: 'Option to install Prometheus'
+
- string:
name: timeout
default: '{timeout}'