blob: 73eeb52b7da62791e6a1d15034a38cddc6b3a25f [file] [log] [blame]
Matteo Scandolofed6bab2021-03-31 14:19:57 -07001// Copyright 2019-present Open Networking Foundation
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// deploy VOLTHA and performs a scale test
16
17// this function generates the correct parameters for ofAgent
18// to connect to multple ONOS instances
19def ofAgentConnections(numOfOnos, releaseName, namespace) {
20 def params = " "
21 numOfOnos.times {
22 params += "--set voltha.services.controller[${it}].address=${releaseName}-onos-classic-${it}.${releaseName}-onos-classic-hs.${namespace}.svc:6653 "
23 }
24 return params
25}
26
27pipeline {
28
29 /* no label, executor is determined by JJB */
30 agent {
31 label "${params.buildNode}"
32 }
33 options {
34 timeout(time: 60, unit: 'MINUTES')
35 }
36 environment {
37 JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
38 KUBECONFIG="$HOME/.kube/config"
39 VOLTCONFIG="$HOME/.volt/config"
40 SSHPASS="karaf"
41 VOLTHA_LOG_LEVEL="${logLevel}"
42 NUM_OF_BBSIM="${olts}"
43 NUM_OF_OPENONU="${openonuAdapterReplicas}"
44 NUM_OF_ONOS="${onosReplicas}"
45 NUM_OF_ATOMIX="${atomixReplicas}"
46 EXTRA_HELM_FLAGS="${extraHelmFlags} " // note that the trailing space is required to separate the parameters from appends done later
47
48 APPS_TO_LOG="etcd kafka onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server onos-config-loader"
49 LOG_FOLDER="$WORKSPACE/logs"
50
51 GERRIT_PROJECT="${GERRIT_PROJECT}"
52 }
53
54 stages {
55 stage ('Cleanup') {
56 steps {
57 timeout(time: 11, unit: 'MINUTES') {
58 sh returnStdout: false, script: '''
59 helm repo add onf https://charts.opencord.org
60 helm repo update
61
62 NAMESPACES="voltha1 voltha2 infra default"
63 for NS in $NAMESPACES
64 do
65 for hchart in $(helm list -n $NS -q | grep -E -v 'docker-registry|kafkacat');
66 do
67 echo "Purging chart: ${hchart}"
68 helm delete -n $NS "${hchart}"
69 done
70 done
71
72 # wait for pods to be removed
73 echo -ne "\nWaiting for PODs to be removed..."
Matteo Scandoloc882f862021-04-01 14:25:30 -070074 PODS=$(kubectl get pods --all-namespaces --no-headers | grep -v -E "kube|cattle|registry|fleet" | wc -l)
Matteo Scandolofed6bab2021-03-31 14:19:57 -070075 while [[ $PODS != 0 ]]; do
76 sleep 5
77 echo -ne "."
Matteo Scandoloc882f862021-04-01 14:25:30 -070078 PODS=$(kubectl get pods --all-namespaces --no-headers | grep -v -E "kube|cattle|registry|fleet" | wc -l)
Matteo Scandolofed6bab2021-03-31 14:19:57 -070079 done
80
81 # remove orphaned port-forward from different namespaces
82 ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9
83
84 cd $WORKSPACE
85 rm -rf $WORKSPACE/*
86 '''
87 }
88 }
89 }
90 stage('Clone voltha-system-tests') {
91 steps {
92 checkout([
93 $class: 'GitSCM',
94 userRemoteConfigs: [[
95 url: "https://gerrit.opencord.org/voltha-system-tests",
96 refspec: "${volthaSystemTestsChange}"
97 ]],
98 branches: [[ name: "${release}", ]],
99 extensions: [
100 [$class: 'WipeWorkspace'],
101 [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
102 [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
103 ],
104 ])
105 script {
106 sh(script:"""
107 if [ '${volthaSystemTestsChange}' != '' ] ; then
108 cd $WORKSPACE/voltha-system-tests;
109 git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
110 fi
111 """)
112 }
113 }
114 }
115 stage('Clone voltha-helm-charts') {
116 steps {
117 checkout([
118 $class: 'GitSCM',
119 userRemoteConfigs: [[
120 url: "https://gerrit.opencord.org/voltha-helm-charts",
121 refspec: "${volthaHelmChartsChange}"
122 ]],
123 branches: [[ name: "${release}", ]],
124 extensions: [
125 [$class: 'WipeWorkspace'],
126 [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-helm-charts"],
127 [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
128 ],
129 ])
130 script {
131 sh(script:"""
132 if [ '${volthaHelmChartsChange}' != '' ] ; then
133 cd $WORKSPACE/voltha-helm-charts;
134 git fetch https://gerrit.opencord.org/voltha-helm-charts ${volthaHelmChartsChange} && git checkout FETCH_HEAD
135 fi
136 """)
137 }
138 }
139 }
140 stage('Build patch') {
141 when {
142 expression {
143 return params.GERRIT_PROJECT
144 }
145 }
146 steps {
147 sh """
148 git clone https://\$GERRIT_HOST/\$GERRIT_PROJECT
149 cd \$GERRIT_PROJECT
150 git fetch https://\$GERRIT_HOST/\$GERRIT_PROJECT \$GERRIT_REFSPEC && git checkout FETCH_HEAD
151
152 DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-build
153 DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-push
154 """
155 }
156 }
157 stage('Deploy common infrastructure') {
158 // includes monitoring, kafka, etcd
159 steps {
160 sh '''
161 helm install kafka $HOME/teone/helm-charts/kafka --set replicaCount=${kafkaReplicas},replicas=${kafkaReplicas} --set persistence.enabled=false \
162 --set zookeeper.replicaCount=${kafkaReplicas} --set zookeeper.persistence.enabled=false \
163 --set prometheus.kafka.enabled=true,prometheus.operator.enabled=true,prometheus.jmx.enabled=true,prometheus.operator.serviceMonitor.namespace=default
164
165 # the ETCD chart use "auth" for resons different than BBsim, so strip that away
166 ETCD_FLAGS=$(echo ${extraHelmFlags} | sed -e 's/--set auth=false / /g') | sed -e 's/--set auth=true / /g'
167 ETCD_FLAGS+=" --set auth.rbac.enabled=false,persistence.enabled=false,statefulset.replicaCount=${etcdReplicas}"
168 ETCD_FLAGS+=" --set memoryMode=${inMemoryEtcdStorage} "
169 helm install --set replicas=${etcdReplicas} etcd $HOME/teone/helm-charts/etcd $ETCD_FLAGS
170
171 if [ ${withMonitoring} = true ] ; then
172 helm install nem-monitoring onf/nem-monitoring \
173 -f $HOME/voltha-scale/grafana.yaml \
174 --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
175 --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
176 fi
177 '''
178 }
179 }
180 stage('Deploy Voltha') {
181 steps {
182 timeout(time: 10, unit: 'MINUTES') {
183 script {
184 sh returnStdout: false, script: '''
185 # start logging with kail
186
187 mkdir -p $LOG_FOLDER
188
189 list=($APPS_TO_LOG)
190 for app in "${list[@]}"
191 do
192 echo "Starting logs for: ${app}"
193 _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
194 done
195 '''
196 sh returnStdout: false, script: """
197
198 export EXTRA_HELM_FLAGS+=' '
199
200 # BBSim custom image handling
201 if [ '${bbsimImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'bbsim' ]; then
202 IFS=: read -r bbsimRepo bbsimTag <<< '${bbsimImg.trim()}'
203 EXTRA_HELM_FLAGS+="--set images.bbsim.repository=\$bbsimRepo,images.bbsim.tag=\$bbsimTag "
204 fi
205
206 # VOLTHA custom image handling
207 if [ '${rwCoreImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-go' ]; then
208 IFS=: read -r rwCoreRepo rwCoreTag <<< '${rwCoreImg.trim()}'
209 EXTRA_HELM_FLAGS+="--set voltha.images.rw_core.repository=\$rwCoreRepo,voltha.images.rw_core.tag=\$rwCoreTag "
210 fi
211
212 # ofAgent custom image handling
213 if [ '${ofAgentImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'of-agent' ]; then
214 IFS=: read -r ofAgentRepo ofAgentTag <<< '${ofAgentImg.trim()}'
215 EXTRA_HELM_FLAGS+="--set voltha.images.ofagent.repository=\$ofAgentRepo,voltha.images.ofagent.tag=\$ofAgentTag "
216 fi
217
218 # OpenOLT custom image handling
219 if [ '${openoltAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openolt-adapter' ]; then
220 IFS=: read -r openoltAdapterRepo openoltAdapterTag <<< '${openoltAdapterImg.trim()}'
221 EXTRA_HELM_FLAGS+="--set voltha-adapter-openolt.images.adapter_open_olt.repository=\$openoltAdapterRepo,voltha-adapter-openolt.images.adapter_open_olt.tag=\$openoltAdapterTag "
222 fi
223
224 # OpenONU custom image handling
225 if [ '${openonuAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter' ]; then
226 IFS=: read -r openonuAdapterRepo openonuAdapterTag <<< '${openonuAdapterImg.trim()}'
227 EXTRA_HELM_FLAGS+="--set voltha-adapter-openonu.images.adapter_open_onu.repository=\$openonuAdapterRepo,voltha-adapter-openonu.images.adapter_open_onu.tag=\$openonuAdapterTag "
228 fi
229
230 # OpenONU GO custom image handling
231 if [ '${openonuAdapterGoImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter-go' ]; then
232 IFS=: read -r openonuAdapterGoRepo openonuAdapterGoTag <<< '${openonuAdapterGoImg.trim()}'
233 EXTRA_HELM_FLAGS+="--set voltha-adapter-openonu.images.adapter_open_onu_go.repository=\$openonuAdapterGoRepo,voltha-adapter-openonu.images.adapter_open_onu_go.tag=\$openonuAdapterGoTag "
234 fi
235
236 # ONOS custom image handling
237 if [ '${onosImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-onos' ]; then
238 IFS=: read -r onosRepo onosTag <<< '${onosImg.trim()}'
239 EXTRA_HELM_FLAGS+="--set onos-classic.image.repository=\$onosRepo,onos-classic.image.tag=\$onosTag "
240 fi
241
242 # set BBSim parameters
243 EXTRA_HELM_FLAGS+='--set enablePerf=true,pon=${pons},onu=${onus} '
244
245 # disable the securityContext, this is a development cluster
246 EXTRA_HELM_FLAGS+='--set securityContext.enabled=false '
247
248 # No persistent-volume-claims in Atomix
249 EXTRA_HELM_FLAGS+="--set onos-classic.atomix.persistence.enabled=false "
250
251 echo "Installing with the following extra arguments:"
252 echo $EXTRA_HELM_FLAGS
253
254
255
256 # Use custom built images
257
258 if [ '\$GERRIT_PROJECT' == 'voltha-go' ]; then
259 EXTRA_HELM_FLAGS+="--set voltha.images.rw_core.repository=${dockerRegistry}/voltha/voltha-rw-core,voltha.images.rw_core.tag=voltha-scale "
260 fi
261
262 if [ '\$GERRIT_PROJECT' == 'voltha-openolt-adapter' ]; then
263 EXTRA_HELM_FLAGS+="--set voltha-openolt-adapter.images.adapter_open_olt.repository=${dockerRegistry}/voltha/voltha-openolt-adapter,voltha-openolt-adapter.images.adapter_open_olt.tag=voltha-scale "
264 fi
265
266 if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter' ]; then
267 EXTRA_HELM_FLAGS+="--set voltha-openonu-adapter.images.adapter_open_onu.repository=${dockerRegistry}/voltha/voltha-openonu-adapter,voltha-openonu-adapter.images.adapter_open_onu.tag=voltha-scale "
268 fi
269
270 if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter-go' ]; then
271 EXTRA_HELM_FLAGS+="--set voltha-openonu-adapter-go.images.adapter_open_onu_go.repository=${dockerRegistry}/voltha/voltha-openonu-adapter-go,voltha-openonu-adapter-go.images.adapter_open_onu_go.tag=voltha-scale "
272 fi
273
274 if [ '\$GERRIT_PROJECT' == 'ofagent-go' ]; then
275 EXTRA_HELM_FLAGS+="--set voltha.images.ofagent.repository=${dockerRegistry}/voltha/voltha-ofagent-go,ofagent-go.images.ofagent.tag=voltha-scale "
276 fi
277
278 if [ '\$GERRIT_PROJECT' == 'voltha-onos' ]; then
279 EXTRA_HELM_FLAGS+="--set onos-classic.image.repository=${dockerRegistry}/voltha/voltha-onos,onos-classic.image.tag=voltha-scale "
280 fi
281
282 if [ '\$GERRIT_PROJECT' == 'bbsim' ]; then
283 EXTRA_HELM_FLAGS+="--set images.bbsim.repository=${dockerRegistry}/voltha/bbsim,images.bbsim.tag=voltha-scale "
284 fi
285
286 helm upgrade --install voltha-infra onf/voltha-infra \$EXTRA_HELM_FLAGS \
287 --set onos-classic.replicas=${onosReplicas},onos-classic.atomix.replicas=${atomixReplicas} \
288 --set etcd.enabled=false,kafka.enabled=false \
289 --set global.log_level=${logLevel} \
290 -f $WORKSPACE/voltha-helm-charts/examples/${workflow}-values.yaml \
291 --version 0.1.13
292
293 helm upgrade --install voltha1 onf/voltha-stack \$EXTRA_HELM_FLAGS \
294 --set global.stack_name=voltha1 \
295 --set global.voltha_infra_name=voltha-infra \
296 --set global.voltha_infra_namespace=default \
297 --set global.log_level=${logLevel} \
298 ${ofAgentConnections(onosReplicas.toInteger(), "voltha-infra", "default")} \
299 --set voltha.services.kafka.adapter.address=kafka.default.svc:9092 \
300 --set voltha.services.kafka.cluster.address=kafka.default.svc:9092 \
301 --set voltha.services.etcd.address=etcd.default.svc:2379 \
302 --set voltha-adapter-openolt.services.kafka.adapter.address=kafka.default.svc:9092 \
303 --set voltha-adapter-openolt.services.kafka.cluster.address=kafka.default.svc:9092 \
304 --set voltha-adapter-openolt.services.etcd.address=etcd.default.svc:2379 \
305 --set voltha-adapter-openonu.services.kafka.adapter.address=kafka.default.svc:9092 \
306 --set voltha-adapter-openonu.services.kafka.cluster.address=kafka.default.svc:9092 \
307 --set voltha-adapter-openonu.services.etcd.address=etcd.default.svc:2379 \
308 --version 0.1.17
309
310
311 for i in {0..${olts.toInteger() - 1}}; do
312 stackId=1
313 helm upgrade --install bbsim\$i onf/bbsim \$EXTRA_HELM_FLAGS \
314 --set olt_id="\${stackId}\${i}" \
315 --set onu=${onus},pon=${pons} \
316 --set global.log_level=${logLevel.toLowerCase()} \
317 -f $WORKSPACE/voltha-helm-charts/examples/${workflow}-values.yaml \
318 --version 4.2.0
319 done
320 """
321 sh """
322 set +x
323
324 echo -ne "\nWaiting for VOLTHA and ONOS to start..."
325 voltha=\$(kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
326 onos=\$(kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l)
327 while [[ \$voltha != 0 || \$onos != 0 ]]; do
328 sleep 5
329 echo -ne "."
330 voltha=\$(kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
331 onos=\$(kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l)
332 done
333 echo -ne "\nVOLTHA and ONOS pods ready\n"
334 kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l
335 kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l
336 """
337 start_port_forward(olts)
338 }
339 }
340 }
341 }
342 stage('Configuration') {
343 steps {
344 script {
345 def tech_prof_directory = "XGS-PON"
346 sh returnStdout: false, script: """
347 #Setting link discovery
348 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
349
350 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
351 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
352
353
354 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 900
355
356 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
357
358 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.opencord.olt.impl.Olt provisionDelay 1000
359
360 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 log:set ${logLevel} org.onosproject
361 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 log:set ${logLevel} org.opencord
362
363 # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 log:set DEBUG org.opencord.cordmcast
364 # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 log:set DEBUG org.onosproject.mcast
365 # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 log:set DEBUG org.opencord.igmpproxy
366 # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 log:set DEBUG org.opencord.olt
367
368 kubectl exec \$(kubectl get pods | grep -E "bbsim[0-9]" | awk 'NR==1{print \$1}') -- bbsimctl log ${logLevel.toLowerCase()} false
369
370 # Set Flows/Ports/Meters poll frequency
371 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
372 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
373
374 if [ ${withFlows} = false ]; then
375 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.olt
376 fi
377
378 if [ '${workflow}' = 'tt' ]; then
379 etcd_container=\$(kubectl get pods --all-namespaces | grep etcd | awk 'NR==1{print \$2}')
380 kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-HSIA.json \$etcd_container:/tmp/hsia.json
381 put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64')
382 kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-VoIP.json \$etcd_container:/tmp/voip.json
383 put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65')
384 kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST.json \$etcd_container:/tmp/mcast.json
385 put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66')
386 fi
387
388 if [ ${withPcap} = true ] ; then
389 # Start the tcp-dump in ofagent
390 export OF_AGENT=\$(kubectl get pods -l app=ofagent -o name)
391 kubectl exec \$OF_AGENT -- apk update
392 kubectl exec \$OF_AGENT -- apk add tcpdump
393 kubectl exec \$OF_AGENT -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
394 _TAG=ofagent-tcpdump kubectl exec \$OF_AGENT -- tcpdump -nei eth0 -w out.pcap&
395
396 # Start the tcp-dump in radius
397 export RADIUS=\$(kubectl get pods -l app=radius -o name)
398 kubectl exec \$RADIUS -- apt-get update
399 kubectl exec \$RADIUS -- apt-get install -y tcpdump
400 _TAG=radius-tcpdump kubectl exec \$RADIUS -- tcpdump -w out.pcap&
401
402 # Start the tcp-dump in ONOS
403 for i in \$(seq 0 \$ONOSES); do
404 INSTANCE="onos-onos-classic-\$i"
405 kubectl exec \$INSTANCE -- apt-get update
406 kubectl exec \$INSTANCE -- apt-get install -y tcpdump
407 kubectl exec \$INSTANCE -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
408 _TAG=\$INSTANCE kubectl exec \$INSTANCE -- /usr/bin/tcpdump -nei eth0 port 1812 -w out.pcap&
409 done
410 fi
411 """
412 }
413 }
414 }
Matteo Scandolofcb75632021-04-06 16:25:19 -0700415 stage('Load MIB Template') {
416 when {
417 expression {
418 return params.withMibTemplate
419 }
420 }
421 steps {
422 sh """
423 # load MIB template
424 wget https://raw.githubusercontent.com/opencord/voltha-openonu-adapter-go/master/templates/BBSM-12345123451234512345-00000000000001-v1.json
425 cat BBSM-12345123451234512345-00000000000001-v1.json | kubectl exec -it \$(kubectl get pods |grep etcd | awk 'NR==1{print \$1}') -- etcdctl put service/voltha/omci_mibs/go_templates/BBSM/12345123451234512345/00000000000001
426 """
427 }
428 }
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700429 stage('Run Test') {
430 steps {
431 sh '''
432 mkdir -p $WORKSPACE/RobotLogs
433 cd $WORKSPACE/voltha-system-tests
434 make vst_venv
435 '''
436 sh '''
437 if [ ${withProfiling} = true ] ; then
438 mkdir -p $LOG_FOLDER/pprof
439 echo $PATH
440 #Creating Python script for ONU Detection
441 cat << EOF > $WORKSPACE/pprof.sh
442timestamp() {
443 date +"%T"
444}
445
446i=0
447while [[ true ]]; do
448 ((i++))
449 ts=$(timestamp)
450 go tool pprof -png http://127.0.0.1:6060/debug/pprof/heap > $LOG_FOLDER/pprof/rw-core-heap-\\$i-\\$ts.png
451 go tool pprof -png http://127.0.0.1:6060/debug/pprof/goroutine > $LOG_FOLDER/pprof/rw-core-goroutine-\\$i-\\$ts.png
452 curl -o $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof http://127.0.0.1:6060/debug/pprof/profile?seconds=10
453 go tool pprof -png $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.png
454
455 go tool pprof -png http://127.0.0.1:6061/debug/pprof/heap > $LOG_FOLDER/pprof/openolt-heap-\\$i-\\$ts.png
456 go tool pprof -png http://127.0.0.1:6061/debug/pprof/goroutine > $LOG_FOLDER/pprof/openolt-goroutine-\\$i-\\$ts.png
457 curl -o $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof http://127.0.0.1:6061/debug/pprof/profile?seconds=10
458 go tool pprof -png $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.png
459
460 go tool pprof -png http://127.0.0.1:6062/debug/pprof/heap > $LOG_FOLDER/pprof/ofagent-heap-\\$i-\\$ts.png
461 go tool pprof -png http://127.0.0.1:6062/debug/pprof/goroutine > $LOG_FOLDER/pprof/ofagent-goroutine-\\$i-\\$ts.png
462 curl -o $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof http://127.0.0.1:6062/debug/pprof/profile?seconds=10
463 go tool pprof -png $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.png
464
465 sleep 10
466done
467EOF
468
469 _TAG="pprof"
470 _TAG=$_TAG bash $WORKSPACE/pprof.sh &
471 fi
472 '''
473 timeout(time: 15, unit: 'MINUTES') {
474 sh '''
475 ROBOT_PARAMS="--exitonfailure \
476 -v olt:${olts} \
477 -v pon:${pons} \
478 -v onu:${onus} \
479 -v workflow:${workflow} \
480 -v withEapol:${withEapol} \
481 -v withDhcp:${withDhcp} \
482 -v withIgmp:${withIgmp} \
483 --noncritical non-critical \
484 -e igmp -e teardown "
485
486 if [ ${withEapol} = false ] ; then
487 ROBOT_PARAMS+="-e authentication "
488 fi
489
490 if [ ${withDhcp} = false ] ; then
491 ROBOT_PARAMS+="-e dhcp "
492 fi
493
494 if [ ${provisionSubscribers} = false ] ; then
495 # if we're not considering subscribers then we don't care about authentication and dhcp
496 ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
497 fi
498
499 if [ ${withFlows} = false ] ; then
500 ROBOT_PARAMS+="-i setup -i activation "
501 fi
502
503 cd $WORKSPACE/voltha-system-tests
504 source ./vst_venv/bin/activate
505 robot -d $WORKSPACE/RobotLogs \
506 $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
507 '''
508 }
509 }
510 }
511 stage('Run Igmp Tests') {
512 environment {
513 ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/IgmpTests"
514 }
515 when {
516 expression {
517 return params.withIgmp
518 }
519 }
520 steps {
521 sh '''
522 set +e
523 mkdir -p $ROBOT_LOGS_DIR
524 cd $WORKSPACE/voltha-system-tests
525 make vst_venv
526 '''
527 timeout(time: 11, unit: 'MINUTES') {
528 sh '''
529 ROBOT_PARAMS="--exitonfailure \
530 -v olt:${olts} \
531 -v pon:${pons} \
532 -v onu:${onus} \
533 -v workflow:${workflow} \
534 -v withEapol:${withEapol} \
535 -v withDhcp:${withDhcp} \
536 -v withIgmp:${withIgmp} \
537 --noncritical non-critical \
538 -i igmp \
539 -e setup -e activation -e flow-before \
540 -e authentication -e provision -e flow-after \
541 -e dhcp -e teardown "
542 cd $WORKSPACE/voltha-system-tests
543 source ./vst_venv/bin/activate
544 robot -d $ROBOT_LOGS_DIR \
545 $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
546 '''
547 }
548 }
549 }
550 }
551 post {
552 always {
553 // collect result, done in the "post" step so it's executed even in the
554 // event of a timeout in the tests
555 sh '''
556
557 # stop the kail processes
558 list=($APPS_TO_LOG)
559 for app in "${list[@]}"
560 do
561 echo "Stopping logs for: ${app}"
562 _TAG="kail-$app"
563 P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
564 if [ -n "$P_IDS" ]; then
565 echo $P_IDS
566 for P_ID in $P_IDS; do
567 kill -9 $P_ID
568 done
569 fi
570 done
571
572 if [ ${withPcap} = true ] ; then
573 # stop ofAgent tcpdump
574 P_ID="\$(ps e -ww -A | grep "_TAG=ofagent-tcpdump" | grep -v grep | awk '{print \$1}')"
575 if [ -n "\$P_ID" ]; then
576 kill -9 \$P_ID
577 fi
578
579 # stop radius tcpdump
580 P_ID="\$(ps e -ww -A | grep "_TAG=radius-tcpdump" | grep -v grep | awk '{print \$1}')"
581 if [ -n "\$P_ID" ]; then
582 kill -9 \$P_ID
583 fi
584
585 # stop onos tcpdump
586 LIMIT=$(($NUM_OF_ONOS - 1))
587 for i in $(seq 0 $LIMIT); do
588 INSTANCE="onos-onos-classic-$i"
589 P_ID="\$(ps e -ww -A | grep "_TAG=$INSTANCE" | grep -v grep | awk '{print \$1}')"
590 if [ -n "\$P_ID" ]; then
591 kill -9 \$P_ID
592 fi
593 done
594
595 # copy the file
596 export OF_AGENT=$(kubectl get pods -l app=ofagent | awk 'NR==2{print $1}') || true
597 kubectl cp $OF_AGENT:out.pcap $LOG_FOLDER/ofagent.pcap || true
598
599 export RADIUS=$(kubectl get pods -l app=radius | awk 'NR==2{print $1}') || true
600 kubectl cp $RADIUS:out.pcap $LOG_FOLDER/radius.pcap || true
601
602 LIMIT=$(($NUM_OF_ONOS - 1))
603 for i in $(seq 0 $LIMIT); do
604 INSTANCE="onos-onos-classic-$i"
605 kubectl cp $INSTANCE:out.pcap $LOG_FOLDER/$INSTANCE.pcap || true
606 done
607 fi
608
609 cd voltha-system-tests
610 source ./vst_venv/bin/activate
611 python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time.txt || true
612 cat $WORKSPACE/execution-time.txt
613 '''
614 sh '''
615 if [ ${withProfiling} = true ] ; then
616 _TAG="pprof"
617 P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
618 if [ -n "$P_IDS" ]; then
619 echo $P_IDS
620 for P_ID in $P_IDS; do
621 kill -9 $P_ID
622 done
623 fi
624 fi
625 '''
626 plot([
627 csvFileName: 'scale-test.csv',
628 csvSeries: [
629 [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
630 [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
631 [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
632 [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
633 [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
634 [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
635 [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
636 [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
637 [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
638 [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
639 ],
640 group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus})", yaxis: 'Time (s)', useDescr: true
641 ])
642 step([$class: 'RobotPublisher',
643 disableArchiveOutput: false,
644 logFileName: '**/log*.html',
645 otherFiles: '',
646 outputFileName: '**/output*.xml',
647 outputPath: 'RobotLogs',
648 passThreshold: 100,
649 reportFileName: '**/report*.html',
650 unstableThreshold: 0]);
651 // get all the logs from kubernetes PODs
652 sh returnStdout: false, script: '''
653
654 # store information on running charts
655 helm ls > $LOG_FOLDER/helm-list.txt || true
656
657 # store information on the running pods
658 kubectl get pods -o wide > $LOG_FOLDER/pods.txt || true
659 kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
660 kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
661
662 # copy the ONOS logs directly from the container to avoid the color codes
663 printf '%s\n' $(kubectl get pods -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
664
665 # get ONOS cfg from the 3 nodes
666 printf '%s\n' $(kubectl get pods -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl exec -it # -- ${karafHome}/bin/client cfg get > $LOG_FOLDER/#.cfg" || true
667
668
669 # get radius logs out of the container
670 kubectl cp $(kubectl get pods -l app=radius --no-headers | awk '{print $1}'):/var/log/freeradius/radius.log $LOG_FOLDER/radius.log || true
671 '''
672 // dump all the BBSim(s) ONU information
673 sh '''
674 BBSIM_IDS=$(kubectl get pods | grep bbsim | grep -v server | awk '{print $1}')
675 IDS=($BBSIM_IDS)
676
677 for bbsim in "${IDS[@]}"
678 do
679 kubectl exec -t $bbsim -- bbsimctl onu list > $LOG_FOLDER/$bbsim-device-list.txt || true
680 kubectl exec -t $bbsim -- bbsimctl service list > $LOG_FOLDER/$bbsim-service-list.txt || true
681 kubectl exec -t $bbsim -- bbsimctl olt resources GEM_PORT > $LOG_FOLDER/$bbsim-flows-gem-ports.txt || true
682 kubectl exec -t $bbsim -- bbsimctl olt resources ALLOC_ID > $LOG_FOLDER/$bbsim-flows-alloc-ids.txt || true
683 kubectl exec -t $bbsim -- bbsimctl olt pons > $LOG_FOLDER/$bbsim-pon-resources.txt || true
684 done
685 '''
686 script {
687 // first make sure the port-forward is still running,
688 // sometimes Jenkins kills it relardless of the JENKINS_NODE_COOKIE=dontKillMe
689 def running = sh (
690 script: 'ps aux | grep port-forw | grep -E "onos|voltha" | grep -v grep | wc -l',
691 returnStdout: true
692 ).trim()
693 // if any of the voltha-api, onos-rest, onos-ssh port-forwards are not there
694 // kill all and restart
695 if (running != "3") {
696 start_port_forward(olts)
697 }
698 }
699 // get ONOS debug infos
700 sh '''
701
702 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt
703 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt
704 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt
705 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt
706 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg get > $LOG_FOLDER/onos-cfg.txt
707 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 netcfg > $LOG_FOLDER/onos-netcfg.txt
708
709 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt
710 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt
711
712 if [ ${withFlows} = true ] ; then
713 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt
714 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt
715 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt
716 fi
717
718 if [ ${provisionSubscribers} = true ]; then
719 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt
720 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt
721 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt
722 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt
723 fi
724
725 if [ ${withEapol} = true ] ; then
726 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt
727 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt
728 fi
729
730 if [ ${withDhcp} = true ] ; then
731 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt
732 fi
733
734 if [ ${withIgmp} = true ] ; then
735 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 mcast-host-routes > $LOG_FOLDER/onos-mcast-host-routes.txt
736 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 mcast-host-show > $LOG_FOLDER/onos-mcast-host-show.txt
737 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 groups > $LOG_FOLDER/onos-groups.txt
738 fi
739 '''
740 // collect etcd metrics
741 sh '''
742 mkdir -p $WORKSPACE/etcd-metrics
743 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
744 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
745 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
746 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time.json || true
747 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time-sum.json || true
748 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time-bucket.json || true
749 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_wal_fsync_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-wal-fsync-time-bucket.json || true
750 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_network_peer_round_trip_time_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-network-peer-round-trip-time-seconds.json || true
751
752 '''
753 // get VOLTHA debug infos
754 script {
755 try {
756 sh '''
757 voltctl -m 8MB device list -o json > $LOG_FOLDER/device-list.json || true
758 python -m json.tool $LOG_FOLDER/device-list.json > $LOG_FOLDER/voltha-devices-list.json || true
759 rm $LOG_FOLDER/device-list.json || true
760 voltctl -m 8MB device list > $LOG_FOLDER/voltha-devices-list.txt || true
761
762 printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device flows # > $LOG_FOLDER/voltha-device-flows-#.txt" || true
763 printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/voltha-device-ports-#.txt" || true
764
765 printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice flows # > $LOG_FOLDER/voltha-logicaldevice-flows-#.txt" || true
766 printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice port list # > $LOG_FOLDER/voltha-logicaldevice-ports-#.txt" || true
767 '''
768 } catch(e) {
769 sh '''
770 echo "Can't get device list from voltclt"
771 '''
772 }
773 }
774 // get cpu usage by container
775 sh '''
776 if [ ${withMonitoring} = true ] ; then
777 cd $WORKSPACE/voltha-system-tests
778 source ./vst_venv/bin/activate
779 sleep 60 # we have to wait for prometheus to collect all the information
780 python tests/scale/sizing.py -o $WORKSPACE/plots || true
781 fi
782 '''
783 archiveArtifacts artifacts: 'execution-time.txt,logs/*,logs/pprof/*,RobotLogs/**/*,plots/*,etcd-metrics/*'
784 }
785 }
786}
787
788def start_port_forward(olts) {
789 sh """
790 daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward --address 0.0.0.0 -n default svc/voltha-infra-onos-classic-hs 8101:8101
791 daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward --address 0.0.0.0 -n default svc/voltha-infra-onos-classic-hs 8181:8181
792 daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward --address 0.0.0.0 -n default svc/voltha1-voltha-api 55555:55555
793
794 bbsimRestPortFwd=50071
795 for i in {0..${olts.toInteger() - 1}}; do
796 daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward --address 0.0.0.0 -n default svc/bbsim\${i} \${bbsimRestPortFwd}:50071
797 ((bbsimRestPortFwd++))
798 done
799 """
800}