blob: 32ba8332f96644a4f17ac9509b5fc585708598e7 [file] [log] [blame]
Matteo Scandolo11311742021-01-25 13:56:22 -08001// Copyright 2019-present Open Networking Foundation
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// deploy VOLTHA using kind-voltha and performs a scale test
16
17pipeline {
18
19 /* no label, executor is determined by JJB */
20 agent {
21 label "${params.buildNode}"
22 }
23 options {
24 timeout(time: 60, unit: 'MINUTES')
25 }
26 environment {
27 JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
28 KUBECONFIG="$HOME/.kube/config"
29 VOLTCONFIG="$HOME/.volt/config"
30 SSHPASS="karaf"
31 PATH="$PATH:$WORKSPACE/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
32 SCHEDULE_ON_CONTROL_NODES="yes"
33 FANCY=0
34 WITH_SIM_ADAPTERS="no"
35 WITH_RADIUS="${withRadius}"
36 WITH_BBSIM="yes"
37 LEGACY_BBSIM_INDEX="no"
38 DEPLOY_K8S="no"
39 CONFIG_SADIS="external"
40 WITH_KAFKA="kafka.default.svc.cluster.local"
41 WITH_ETCD="etcd.default.svc.cluster.local"
42 VOLTHA_ETCD_PORT=9999
43
44 // install everything in the default namespace
45 VOLTHA_NS="default"
46 ADAPTER_NS="default"
47 INFRA_NS="default"
48 BBSIM_NS="default"
49
50 // configurable options
51 WITH_EAPOL="${withEapol}"
52 WITH_DHCP="${withDhcp}"
53 WITH_IGMP="${withIgmp}"
54 VOLTHA_LOG_LEVEL="${logLevel}"
55 NUM_OF_BBSIM="${olts}"
56 NUM_OF_OPENONU="${openonuAdapterReplicas}"
57 NUM_OF_ONOS="${onosReplicas}"
58 NUM_OF_ATOMIX="${atomixReplicas}"
59 WITH_PPROF="${withProfiling}"
60 EXTRA_HELM_FLAGS="${extraHelmFlags} " // note that the trailing space is required to separate the parameters from appends done later
61 VOLTHA_CHART="${volthaChart}"
62 VOLTHA_BBSIM_CHART="${bbsimChart}"
63 VOLTHA_ADAPTER_OPEN_OLT_CHART="${openoltAdapterChart}"
64 VOLTHA_ADAPTER_OPEN_ONU_CHART="${openonuAdapterChart}"
65 ONOS_CLASSIC_CHART="${onosChart}"
66 RADIUS_CHART="${radiusChart}"
67
68 APPS_TO_LOG="etcd kafka onos-onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server"
69 LOG_FOLDER="$WORKSPACE/logs"
70
71 GERRIT_PROJECT="${GERRIT_PROJECT}"
72 }
73
74 stages {
75 stage ('Cleanup') {
76 steps {
77 timeout(time: 11, unit: 'MINUTES') {
78 sh returnStdout: false, script: """
79 helm repo add stable https://charts.helm.sh/stable
80 helm repo add onf https://charts.opencord.org
81 helm repo add cord https://charts.opencord.org
82 helm repo add onos https://charts.onosproject.org
83 helm repo add atomix https://charts.atomix.io
84 helm repo add bbsim-sadis https://ciena.github.io/bbsim-sadis-server/charts
85 helm repo update
86
87 # removing ETCD port forward
88 P_ID="\$(ps e -ww -A | grep "_TAG=etcd-port-forward" | grep -v grep | awk '{print \$1}')"
89 if [ -n "\$P_ID" ]; then
90 kill -9 \$P_ID
91 fi
92
93 NAMESPACES="voltha1 voltha2 infra default"
94 for NS in \$NAMESPACES
95 do
96 for hchart in \$(helm list -n \$NS -q | grep -E -v 'docker-registry|kafkacat');
97 do
98 echo "Purging chart: \${hchart}"
99 helm delete -n \$NS "\${hchart}"
100 done
101 done
102
103 test -e $WORKSPACE/kind-voltha/voltha && cd $WORKSPACE/kind-voltha && ./voltha down
104
105 # remove orphaned port-forward from different namespaces
106 ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9
107
108 cd $WORKSPACE
109 rm -rf $WORKSPACE/*
110 """
111 }
112 }
113 }
114 stage('Clone kind-voltha') {
115 steps {
116 checkout([
117 $class: 'GitSCM',
118 userRemoteConfigs: [[
119 url: "https://gerrit.opencord.org/kind-voltha",
120 ]],
Matteo Scandolo23b10072021-03-05 12:17:05 -0800121 branches: [[name: 'refs/tags/6.0.0']],
Matteo Scandolo11311742021-01-25 13:56:22 -0800122 extensions: [
123 [$class: 'WipeWorkspace'],
124 [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
125 [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
126 ],
127 ])
128 }
129 }
130 stage('Clone voltha-system-tests') {
131 steps {
132 checkout([
133 $class: 'GitSCM',
134 userRemoteConfigs: [[
135 url: "https://gerrit.opencord.org/voltha-system-tests",
136 refspec: "${volthaSystemTestsChange}"
137 ]],
138 branches: [[ name: "${release}", ]],
139 extensions: [
140 [$class: 'WipeWorkspace'],
141 [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
142 [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
143 ],
144 ])
145 script {
146 sh(script:"""
147 if [ '${volthaSystemTestsChange}' != '' ] ; then
148 cd $WORKSPACE/voltha-system-tests;
149 git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
150 fi
151 """)
152 }
153 }
154 }
155 stage('Build patch') {
156 when {
157 expression {
158 return params.GERRIT_PROJECT
159 }
160 }
161 steps {
162 sh """
163 git clone https://\$GERRIT_HOST/\$GERRIT_PROJECT
164 cd \$GERRIT_PROJECT
165 git fetch https://\$GERRIT_HOST/\$GERRIT_PROJECT \$GERRIT_REFSPEC && git checkout FETCH_HEAD
166
167 DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-build
168 DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-push
169 """
170 }
171 }
172 stage('Deploy common infrastructure') {
173 // includes monitoring, kafka, etcd
174 steps {
175 sh '''
176 helm install kafka $HOME/teone/helm-charts/kafka --set replicaCount=${kafkaReplicas} --set persistence.enabled=false \
177 --set zookeeper.replicaCount=${kafkaReplicas} --set zookeeper.persistence.enabled=false \
178 --set prometheus.kafka.enabled=true,prometheus.operator.enabled=true,prometheus.jmx.enabled=true,prometheus.operator.serviceMonitor.namespace=default
179
180 # the ETCD chart use "auth" for resons different than BBsim, so strip that away
181 ETCD_FLAGS=$(echo ${extraHelmFlags} | sed -e 's/--set auth=false / /g') | sed -e 's/--set auth=true / /g'
182 ETCD_FLAGS+=" --set auth.rbac.enabled=false,persistence.enabled=false,statefulset.replicaCount=${etcdReplicas}"
183 ETCD_FLAGS+=" --set memoryMode=${inMemoryEtcdStorage} "
184 helm install -f $WORKSPACE/kind-voltha/values.yaml --set replicas=${etcdReplicas} etcd $HOME/teone/helm-charts/etcd $ETCD_FLAGS
185
186 if [ ${withMonitoring} = true ] ; then
187 helm install nem-monitoring cord/nem-monitoring \
188 -f $HOME/voltha-scale/grafana.yaml \
189 --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
190 --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
191 fi
192 '''
193 }
194 }
195 stage('Deploy Voltha') {
196 steps {
197 script {
198 sh returnStdout: false, script: """
199
200 cd $WORKSPACE/kind-voltha/
201
202 export EXTRA_HELM_FLAGS+=' '
203
204 # Load the release defaults
205 if [ '${release.trim()}' != 'master' ]; then
206 source $WORKSPACE/kind-voltha/releases/${release}
207 EXTRA_HELM_FLAGS+=" ${extraHelmFlags} "
208 fi
209
210 # BBSim custom image handling
211 if [ '${bbsimImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'bbsim' ]; then
212 IFS=: read -r bbsimRepo bbsimTag <<< '${bbsimImg.trim()}'
213 EXTRA_HELM_FLAGS+="--set images.bbsim.repository=\$bbsimRepo,images.bbsim.tag=\$bbsimTag "
214 fi
215
216 # VOLTHA and ofAgent custom image handling
217 # NOTE to override the rw-core image in a released version you must set the ofAgent image too
218 # TODO split ofAgent and voltha-go
219 if [ '${rwCoreImg.trim()}' != '' ] && [ '${ofAgentImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-go' ]; then
220 IFS=: read -r rwCoreRepo rwCoreTag <<< '${rwCoreImg.trim()}'
221 IFS=: read -r ofAgentRepo ofAgentTag <<< '${ofAgentImg.trim()}'
222 EXTRA_HELM_FLAGS+="--set images.rw_core.repository=\$rwCoreRepo,images.rw_core.tag=\$rwCoreTag,images.ofagent.repository=\$ofAgentRepo,images.ofagent.tag=\$ofAgentTag "
223 fi
224
225 # OpenOLT custom image handling
226 if [ '${openoltAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openolt-adapter' ]; then
227 IFS=: read -r openoltAdapterRepo openoltAdapterTag <<< '${openoltAdapterImg.trim()}'
228 EXTRA_HELM_FLAGS+="--set images.adapter_open_olt.repository=\$openoltAdapterRepo,images.adapter_open_olt.tag=\$openoltAdapterTag "
229 fi
230
231 # OpenONU custom image handling
232 if [ '${openonuAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter' ]; then
233 IFS=: read -r openonuAdapterRepo openonuAdapterTag <<< '${openonuAdapterImg.trim()}'
234 EXTRA_HELM_FLAGS+="--set images.adapter_open_onu.repository=\$openonuAdapterRepo,images.adapter_open_onu.tag=\$openonuAdapterTag "
235 fi
236
237 # OpenONU GO custom image handling
238 if [ '${openonuAdapterGoImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter-go' ]; then
239 IFS=: read -r openonuAdapterGoRepo openonuAdapterGoTag <<< '${openonuAdapterGoImg.trim()}'
240 EXTRA_HELM_FLAGS+="--set images.adapter_open_onu_go.repository=\$openonuAdapterGoRepo,images.adapter_open_onu_go.tag=\$openonuAdapterGoTag "
241 fi
242
243 # ONOS custom image handling
244 if [ '${onosImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-onos' ]; then
245 IFS=: read -r onosRepo onosTag <<< '${onosImg.trim()}'
246 EXTRA_HELM_FLAGS+="--set images.onos.repository=\$onosRepo,images.onos.tag=\$onosTag "
247 fi
248
249 # set BBSim parameters
250 EXTRA_HELM_FLAGS+='--set enablePerf=true,pon=${pons},onu=${onus} '
251
252 # disable the securityContext, this is a development cluster
253 EXTRA_HELM_FLAGS+='--set securityContext.enabled=false '
254
255 # No persistent-volume-claims in Atomix
256 EXTRA_HELM_FLAGS+="--set atomix.persistence.enabled=false "
257
258 echo "Installing with the following extra arguments:"
259 echo $EXTRA_HELM_FLAGS
260
261 # if it's newer than voltha-2.4 set the correct BBSIM_CFG
262 if [ '${release.trim()}' != 'voltha-2.4' ]; then
263 export BBSIM_CFG="$WORKSPACE/kind-voltha/configs/bbsim-sadis-${workflow}.yaml"
264 fi
265
266 # Use custom built images
267
268 if [ '\$GERRIT_PROJECT' == 'voltha-go' ]; then
269 EXTRA_HELM_FLAGS+="--set images.rw_core.repository=${dockerRegistry}/voltha/voltha-rw-core,images.rw_core.tag=voltha-scale "
270 fi
271
272 if [ '\$GERRIT_PROJECT' == 'voltha-openolt-adapter' ]; then
273 EXTRA_HELM_FLAGS+="--set images.adapter_open_olt.repository=${dockerRegistry}/voltha/voltha-openolt-adapter,images.adapter_open_olt.tag=voltha-scale "
274 fi
275
276 if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter' ]; then
277 EXTRA_HELM_FLAGS+="--set images.adapter_open_onu.repository=${dockerRegistry}/voltha/voltha-openonu-adapter,images.adapter_open_onu.tag=voltha-scale "
278 fi
279
280 if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter-go' ]; then
281 EXTRA_HELM_FLAGS+="--set images.adapter_open_onu_go.repository=${dockerRegistry}/voltha/voltha-openonu-adapter-go,images.adapter_open_onu_go.tag=voltha-scale "
282 fi
283
284 if [ '\$GERRIT_PROJECT' == 'ofagent-go' ]; then
285 EXTRA_HELM_FLAGS+="--set images.ofagent.repository=${dockerRegistry}/voltha/voltha-ofagent-go,images.ofagent.tag=voltha-scale "
286 fi
287
288 if [ '\$GERRIT_PROJECT' == 'voltha-onos' ]; then
289 EXTRA_HELM_FLAGS+="--set images.onos.repository=${dockerRegistry}/voltha/voltha-onos,images.onos.tag=voltha-scale "
290 fi
291
292 if [ '\$GERRIT_PROJECT' == 'bbsim' ]; then
293 EXTRA_HELM_FLAGS+="--set images.bbsim.repository=${dockerRegistry}/voltha/bbsim,images.bbsim.tag=voltha-scale "
294 fi
295
296 ./voltha up
297
298 # Forward the ETCD port onto $VOLTHA_ETCD_PORT
299 _TAG=etcd-port-forward kubectl port-forward --address 0.0.0.0 -n default service/etcd $VOLTHA_ETCD_PORT:2379&
300 """
301 }
302 sh returnStdout: false, script: '''
303 # start logging with kail
304
305 mkdir -p $LOG_FOLDER
306
307 list=($APPS_TO_LOG)
308 for app in "${list[@]}"
309 do
310 echo "Starting logs for: ${app}"
311 _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
312 done
313 '''
314 }
315 }
316 stage('Configuration') {
317 steps {
318 script {
319 sh returnStdout: false, script: """
320 #Setting link discovery
321 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
322
323 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
324 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
325
326
327 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 1000
328
329 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
330
331 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.onosproject
332 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.opencord
333
334
335 kubectl exec \$(kubectl get pods | grep -E "bbsim[0-9]" | awk 'NR==1{print \$1}') -- bbsimctl log ${logLevel.toLowerCase()} false
336
337 # Set Flows/Ports/Meters poll frequency
338 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
339 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
340
341 if [ ${withFlows} = false ]; then
342 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.olt
343 fi
344
345 if [ ${withMibTemplate} = true ] ; then
346 rm -f BBSM-12345123451234512345-00000000000001-v1.json
347 wget https://raw.githubusercontent.com/opencord/voltha-openonu-adapter/master/templates/BBSM-12345123451234512345-00000000000001-v1.json
348 cat BBSM-12345123451234512345-00000000000001-v1.json | kubectl exec -it \$(kubectl get pods |grep etcd | awk 'NR==1{print \$1}') -- etcdctl put service/voltha/omci_mibs/templates/BBSM/12345123451234512345/00000000000001
349 fi
350
351 if [ ${withPcap} = true ] ; then
352 # Start the tcp-dump in ofagent
353 export OF_AGENT=\$(kubectl get pods -l app=ofagent -o name)
354 kubectl exec \$OF_AGENT -- apk update
355 kubectl exec \$OF_AGENT -- apk add tcpdump
356 kubectl exec \$OF_AGENT -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
357 _TAG=ofagent-tcpdump kubectl exec \$OF_AGENT -- tcpdump -nei eth0 -w out.pcap&
358
359 # Start the tcp-dump in radius
360 export RADIUS=\$(kubectl get pods -l app=radius -o name)
361 kubectl exec \$RADIUS -- apt-get update
362 kubectl exec \$RADIUS -- apt-get install -y tcpdump
363 _TAG=radius-tcpdump kubectl exec \$RADIUS -- tcpdump -w out.pcap&
364
365 # Start the tcp-dump in ONOS
366 for i in \$(seq 0 \$ONOSES); do
367 INSTANCE="onos-onos-classic-\$i"
368 kubectl exec \$INSTANCE -- apt-get update
369 kubectl exec \$INSTANCE -- apt-get install -y tcpdump
370 kubectl exec \$INSTANCE -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
371 _TAG=\$INSTANCE kubectl exec \$INSTANCE -- /usr/bin/tcpdump -nei eth0 port 1812 -w out.pcap&
372 done
373 fi
374 """
375 }
376 }
377 }
378 stage('Run Test') {
379 steps {
380 sh '''
381 mkdir -p $WORKSPACE/RobotLogs
382 cd $WORKSPACE/voltha-system-tests
383 make vst_venv
384 '''
385 sh '''
386 if [ ${withProfiling} = true ] ; then
387 mkdir -p $LOG_FOLDER/pprof
388 echo $PATH
389 #Creating Python script for ONU Detection
390 cat << EOF > $WORKSPACE/pprof.sh
391timestamp() {
392 date +"%T"
393}
394
395i=0
396while [[ true ]]; do
397 ((i++))
398 ts=$(timestamp)
399 go tool pprof -png http://127.0.0.1:6060/debug/pprof/heap > $LOG_FOLDER/pprof/rw-core-heap-\\$i-\\$ts.png
400 go tool pprof -png http://127.0.0.1:6060/debug/pprof/goroutine > $LOG_FOLDER/pprof/rw-core-goroutine-\\$i-\\$ts.png
401 curl -o $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof http://127.0.0.1:6060/debug/pprof/profile?seconds=10
402 go tool pprof -png $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.png
403
404 go tool pprof -png http://127.0.0.1:6061/debug/pprof/heap > $LOG_FOLDER/pprof/openolt-heap-\\$i-\\$ts.png
405 go tool pprof -png http://127.0.0.1:6061/debug/pprof/goroutine > $LOG_FOLDER/pprof/openolt-goroutine-\\$i-\\$ts.png
406 curl -o $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof http://127.0.0.1:6061/debug/pprof/profile?seconds=10
407 go tool pprof -png $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.png
408
409 go tool pprof -png http://127.0.0.1:6062/debug/pprof/heap > $LOG_FOLDER/pprof/ofagent-heap-\\$i-\\$ts.png
410 go tool pprof -png http://127.0.0.1:6062/debug/pprof/goroutine > $LOG_FOLDER/pprof/ofagent-goroutine-\\$i-\\$ts.png
411 curl -o $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof http://127.0.0.1:6062/debug/pprof/profile?seconds=10
412 go tool pprof -png $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.png
413
414 sleep 10
415done
416EOF
417
418 _TAG="pprof"
419 _TAG=$_TAG bash $WORKSPACE/pprof.sh &
420 fi
421 '''
422 timeout(time: 15, unit: 'MINUTES') {
423 sh '''
424 ROBOT_PARAMS="-v olt:${olts} \
425 -v pon:${pons} \
426 -v onu:${onus} \
427 -v workflow:${workflow} \
428 -v withEapol:${withEapol} \
429 -v withDhcp:${withDhcp} \
430 -v withIgmp:${withIgmp} \
431 --noncritical non-critical \
432 -e teardown "
433
434 if [ ${withEapol} = false ] ; then
435 ROBOT_PARAMS+="-e authentication "
436 fi
437
438 if [ ${withDhcp} = false ] ; then
439 ROBOT_PARAMS+="-e dhcp "
440 fi
441
442 if [ ${provisionSubscribers} = false ] ; then
443 # if we're not considering subscribers then we don't care about authentication and dhcp
444 ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
445 fi
446
447 if [ ${withFlows} = false ] ; then
448 ROBOT_PARAMS+="-i setup -i activation "
449 fi
450
451 cd $WORKSPACE/voltha-system-tests
452 source ./vst_venv/bin/activate
453 robot -d $WORKSPACE/RobotLogs \
454 $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
455 '''
456 }
457 }
458 }
459 }
460 post {
461 always {
462 // collect result, done in the "post" step so it's executed even in the
463 // event of a timeout in the tests
464 sh '''
465
466 # stop the kail processes
467 list=($APPS_TO_LOG)
468 for app in "${list[@]}"
469 do
470 echo "Stopping logs for: ${app}"
471 _TAG="kail-$app"
472 P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
473 if [ -n "$P_IDS" ]; then
474 echo $P_IDS
475 for P_ID in $P_IDS; do
476 kill -9 $P_ID
477 done
478 fi
479 done
480
481 if [ ${withPcap} = true ] ; then
482 # stop ofAgent tcpdump
483 P_ID="\$(ps e -ww -A | grep "_TAG=ofagent-tcpdump" | grep -v grep | awk '{print \$1}')"
484 if [ -n "\$P_ID" ]; then
485 kill -9 \$P_ID
486 fi
487
488 # stop radius tcpdump
489 P_ID="\$(ps e -ww -A | grep "_TAG=radius-tcpdump" | grep -v grep | awk '{print \$1}')"
490 if [ -n "\$P_ID" ]; then
491 kill -9 \$P_ID
492 fi
493
494 # stop onos tcpdump
495 LIMIT=$(($NUM_OF_ONOS - 1))
496 for i in $(seq 0 $LIMIT); do
497 INSTANCE="onos-onos-classic-$i"
498 P_ID="\$(ps e -ww -A | grep "_TAG=$INSTANCE" | grep -v grep | awk '{print \$1}')"
499 if [ -n "\$P_ID" ]; then
500 kill -9 \$P_ID
501 fi
502 done
503
504 # copy the file
505 export OF_AGENT=$(kubectl get pods -l app=ofagent | awk 'NR==2{print $1}') || true
506 kubectl cp $OF_AGENT:out.pcap $LOG_FOLDER/ofagent.pcap || true
507
508 export RADIUS=$(kubectl get pods -l app=radius | awk 'NR==2{print $1}') || true
509 kubectl cp $RADIUS:out.pcap $LOG_FOLDER/radius.pcap || true
510
511 LIMIT=$(($NUM_OF_ONOS - 1))
512 for i in $(seq 0 $LIMIT); do
513 INSTANCE="onos-onos-classic-$i"
514 kubectl cp $INSTANCE:out.pcap $LOG_FOLDER/$INSTANCE.pcap || true
515 done
516 fi
517
518 cd voltha-system-tests
519 source ./vst_venv/bin/activate
520 python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time.txt || true
521 cat $WORKSPACE/execution-time.txt
522 '''
523 sh '''
524 if [ ${withProfiling} = true ] ; then
525 _TAG="pprof"
526 P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
527 if [ -n "$P_IDS" ]; then
528 echo $P_IDS
529 for P_ID in $P_IDS; do
530 kill -9 $P_ID
531 done
532 fi
533 fi
534 '''
535 plot([
536 csvFileName: 'scale-test.csv',
537 csvSeries: [
538 [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
539 [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
540 [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
541 [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
542 [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
543 [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
544 [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
545 [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
546 [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
547 [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
548 ],
549 group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus})", yaxis: 'Time (s)', useDescr: true
550 ])
551 step([$class: 'RobotPublisher',
552 disableArchiveOutput: false,
553 logFileName: 'RobotLogs/log.html',
554 otherFiles: '',
555 outputFileName: 'RobotLogs/output.xml',
556 outputPath: '.',
557 passThreshold: 100,
558 reportFileName: 'RobotLogs/report.html',
559 unstableThreshold: 0]);
560 // get all the logs from kubernetes PODs
561 sh returnStdout: false, script: '''
562
563 # store information on running charts
564 helm ls > $LOG_FOLDER/helm-list.txt || true
565
566 # store information on the running pods
567 kubectl get pods -o wide > $LOG_FOLDER/pods.txt || true
568 kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
569 kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
570
571 # copy the ONOS logs directly from the container to avoid the color codes
572 printf '%s\n' $(kubectl get pods -l app=onos-onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
573
574 # get radius logs out of the container
575 kubectl cp $(kubectl get pods -l app=radius --no-headers | awk '{print $1}'):/var/log/freeradius/radius.log $LOG_FOLDER//radius.log || true
576 '''
577 // dump all the BBSim(s) ONU information
578 sh '''
579 BBSIM_IDS=$(kubectl get pods | grep bbsim | grep -v server | awk '{print $1}')
580 IDS=($BBSIM_IDS)
581
582 for bbsim in "${IDS[@]}"
583 do
584 kubectl exec -t $bbsim -- bbsimctl onu list > $LOG_FOLDER/$bbsim-device-list.txt || true
585 kubectl exec -t $bbsim -- bbsimctl service list > $LOG_FOLDER/$bbsim-service-list.txt || true
586 done
587 '''
588 // get DHCP server stats
589 sh '''
590 BBSIM_IDS=$(kubectl get pods | grep bbsim | grep -v server | awk '{print $1}')
591 IDS=($BBSIM_IDS)
592
593 for bbsim in "${IDS[@]}"
594 do
595 kubectl exec -t $bbsim -- dhcpd -lf /var/lib/dhcp/dhcpd.leases -play /tmp/dhcplog 2>&1 | tee $LOG_FOLDER/$bbsim-dhcp-replay.txt || true
596 kubectl cp $bbsim:/tmp/dhcplog $LOG_FOLDER/$bbsim-dhcp-logs || true
597 kubectl cp $bbsim:/var/lib/dhcp/dhcpd.leases $LOG_FOLDER/$bbsim-dhcp-leases || true
598 done
599 '''
600 // get ONOS debug infos
601 sh '''
602
603 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt || true
604 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt || true
605 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt || true
606 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt || true
607
608 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt || true
609 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt || true
610
611 if [ ${withFlows} = true ] ; then
612 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt || true
613 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt || true
614 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt || true
615 fi
616
617 if [ ${provisionSubscribers} = true ]; then
618 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt || true
619 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt || true
620 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt || true
621 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt || true
622 fi
623
624 if [ ${withEapol} = true ] ; then
625 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt || true
626 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt || true
627 fi
628
629 if [ ${withDhcp} = true ] ; then
630 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt || true
631 fi
632 '''
633 // collect etcd metrics
634 sh '''
635 mkdir -p $WORKSPACE/etcd-metrics
636 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
637 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
638 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
639 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time.json || true
640 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time-sum.json || true
641 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time-bucket.json || true
642 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_wal_fsync_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-wal-fsync-time-bucket.json || true
643 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_network_peer_round_trip_time_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-network-peer-round-trip-time-seconds.json || true
644
645 '''
646 // get VOLTHA debug infos
647 script {
648 try {
649 sh '''
650 voltctl -m 8MB device list -o json > $LOG_FOLDER/device-list.json || true
651 python -m json.tool $LOG_FOLDER/device-list.json > $LOG_FOLDER/voltha-devices-list.json || true
652 rm $LOG_FOLDER/device-list.json || true
653 voltctl -m 8MB device list > $LOG_FOLDER/voltha-devices-list.txt || true
654
655 printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device flows # > $LOG_FOLDER/voltha-device-flows-#.txt" || true
656 printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/voltha-device-ports-#.txt" || true
657
658 printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice flows # > $LOG_FOLDER/voltha-logicaldevice-flows-#.txt" || true
659 printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice port list # > $LOG_FOLDER/voltha-logicaldevice-ports-#.txt" || true
660 '''
661 } catch(e) {
662 sh '''
663 echo "Can't get device list from voltclt"
664 '''
665 }
666 }
667 // get cpu usage by container
668 sh '''
669 if [ ${withMonitoring} = true ] ; then
670 cd $WORKSPACE/voltha-system-tests
671 source ./vst_venv/bin/activate
672 sleep 60 # we have to wait for prometheus to collect all the information
673 python tests/scale/sizing.py -o $WORKSPACE/plots || true
674 fi
675 '''
676 archiveArtifacts artifacts: 'kind-voltha/install-minimal.log,execution-time.txt,logs/*,logs/pprof/*,RobotLogs/*,plots/*,etcd-metrics/*'
677 }
678 }
679}