blob: d88b18073525652f04c1404cb4cf60fa04e491b7 [file] [log] [blame]
Matteo Scandolofed6bab2021-03-31 14:19:57 -07001// Copyright 2019-present Open Networking Foundation
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// deploy VOLTHA and performs a scale test
16
17// this function generates the correct parameters for ofAgent
18// to connect to multple ONOS instances
19def ofAgentConnections(numOfOnos, releaseName, namespace) {
20 def params = " "
21 numOfOnos.times {
22 params += "--set voltha.services.controller[${it}].address=${releaseName}-onos-classic-${it}.${releaseName}-onos-classic-hs.${namespace}.svc:6653 "
23 }
24 return params
25}
26
27pipeline {
28
29 /* no label, executor is determined by JJB */
30 agent {
31 label "${params.buildNode}"
32 }
33 options {
34 timeout(time: 60, unit: 'MINUTES')
35 }
36 environment {
37 JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
38 KUBECONFIG="$HOME/.kube/config"
Matteo Scandolo8731a1f2021-04-09 13:05:06 -070039 VOLTCONFIG="$HOME/.volt/config-2.7" // voltha-2.7 does not have ingress and still relies on port-forwarding
Matteo Scandolofed6bab2021-03-31 14:19:57 -070040 SSHPASS="karaf"
41 VOLTHA_LOG_LEVEL="${logLevel}"
42 NUM_OF_BBSIM="${olts}"
43 NUM_OF_OPENONU="${openonuAdapterReplicas}"
44 NUM_OF_ONOS="${onosReplicas}"
45 NUM_OF_ATOMIX="${atomixReplicas}"
46 EXTRA_HELM_FLAGS="${extraHelmFlags} " // note that the trailing space is required to separate the parameters from appends done later
47
48 APPS_TO_LOG="etcd kafka onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server onos-config-loader"
49 LOG_FOLDER="$WORKSPACE/logs"
50
51 GERRIT_PROJECT="${GERRIT_PROJECT}"
52 }
53
54 stages {
55 stage ('Cleanup') {
56 steps {
57 timeout(time: 11, unit: 'MINUTES') {
58 sh returnStdout: false, script: '''
59 helm repo add onf https://charts.opencord.org
60 helm repo update
61
62 NAMESPACES="voltha1 voltha2 infra default"
63 for NS in $NAMESPACES
64 do
65 for hchart in $(helm list -n $NS -q | grep -E -v 'docker-registry|kafkacat');
66 do
67 echo "Purging chart: ${hchart}"
68 helm delete -n $NS "${hchart}"
69 done
70 done
71
72 # wait for pods to be removed
73 echo -ne "\nWaiting for PODs to be removed..."
Matteo Scandolo8731a1f2021-04-09 13:05:06 -070074 PODS=$(kubectl get pods --all-namespaces --no-headers | grep -v -E "kube|cattle|registry|fleet|ingress-nginx" | wc -l)
Matteo Scandolofed6bab2021-03-31 14:19:57 -070075 while [[ $PODS != 0 ]]; do
76 sleep 5
77 echo -ne "."
Matteo Scandolo8731a1f2021-04-09 13:05:06 -070078 PODS=$(kubectl get pods --all-namespaces --no-headers | grep -v -E "kube|cattle|registry|fleet|ingress-nginx" | wc -l)
Matteo Scandolofed6bab2021-03-31 14:19:57 -070079 done
80
81 # remove orphaned port-forward from different namespaces
82 ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9
83
84 cd $WORKSPACE
85 rm -rf $WORKSPACE/*
86 '''
87 }
88 }
89 }
90 stage('Clone voltha-system-tests') {
91 steps {
92 checkout([
93 $class: 'GitSCM',
94 userRemoteConfigs: [[
95 url: "https://gerrit.opencord.org/voltha-system-tests",
96 refspec: "${volthaSystemTestsChange}"
97 ]],
98 branches: [[ name: "${release}", ]],
99 extensions: [
100 [$class: 'WipeWorkspace'],
101 [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
102 [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
103 ],
104 ])
105 script {
106 sh(script:"""
107 if [ '${volthaSystemTestsChange}' != '' ] ; then
108 cd $WORKSPACE/voltha-system-tests;
109 git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
110 fi
111 """)
112 }
113 }
114 }
115 stage('Clone voltha-helm-charts') {
116 steps {
117 checkout([
118 $class: 'GitSCM',
119 userRemoteConfigs: [[
120 url: "https://gerrit.opencord.org/voltha-helm-charts",
121 refspec: "${volthaHelmChartsChange}"
122 ]],
123 branches: [[ name: "${release}", ]],
124 extensions: [
125 [$class: 'WipeWorkspace'],
126 [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-helm-charts"],
127 [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
128 ],
129 ])
130 script {
131 sh(script:"""
132 if [ '${volthaHelmChartsChange}' != '' ] ; then
133 cd $WORKSPACE/voltha-helm-charts;
134 git fetch https://gerrit.opencord.org/voltha-helm-charts ${volthaHelmChartsChange} && git checkout FETCH_HEAD
135 fi
136 """)
137 }
138 }
139 }
140 stage('Build patch') {
141 when {
142 expression {
143 return params.GERRIT_PROJECT
144 }
145 }
146 steps {
147 sh """
148 git clone https://\$GERRIT_HOST/\$GERRIT_PROJECT
149 cd \$GERRIT_PROJECT
150 git fetch https://\$GERRIT_HOST/\$GERRIT_PROJECT \$GERRIT_REFSPEC && git checkout FETCH_HEAD
151
152 DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-build
153 DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-push
154 """
155 }
156 }
157 stage('Deploy common infrastructure') {
158 // includes monitoring, kafka, etcd
159 steps {
160 sh '''
161 helm install kafka $HOME/teone/helm-charts/kafka --set replicaCount=${kafkaReplicas},replicas=${kafkaReplicas} --set persistence.enabled=false \
162 --set zookeeper.replicaCount=${kafkaReplicas} --set zookeeper.persistence.enabled=false \
163 --set prometheus.kafka.enabled=true,prometheus.operator.enabled=true,prometheus.jmx.enabled=true,prometheus.operator.serviceMonitor.namespace=default
164
165 # the ETCD chart use "auth" for resons different than BBsim, so strip that away
166 ETCD_FLAGS=$(echo ${extraHelmFlags} | sed -e 's/--set auth=false / /g') | sed -e 's/--set auth=true / /g'
167 ETCD_FLAGS+=" --set auth.rbac.enabled=false,persistence.enabled=false,statefulset.replicaCount=${etcdReplicas}"
168 ETCD_FLAGS+=" --set memoryMode=${inMemoryEtcdStorage} "
169 helm install --set replicas=${etcdReplicas} etcd $HOME/teone/helm-charts/etcd $ETCD_FLAGS
170
171 if [ ${withMonitoring} = true ] ; then
172 helm install nem-monitoring onf/nem-monitoring \
173 -f $HOME/voltha-scale/grafana.yaml \
174 --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
175 --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
176 fi
177 '''
178 }
179 }
180 stage('Deploy Voltha') {
181 steps {
182 timeout(time: 10, unit: 'MINUTES') {
183 script {
184 sh returnStdout: false, script: '''
185 # start logging with kail
186
187 mkdir -p $LOG_FOLDER
188
189 list=($APPS_TO_LOG)
190 for app in "${list[@]}"
191 do
192 echo "Starting logs for: ${app}"
193 _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
194 done
195 '''
196 sh returnStdout: false, script: """
197
198 export EXTRA_HELM_FLAGS+=' '
199
200 # BBSim custom image handling
201 if [ '${bbsimImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'bbsim' ]; then
202 IFS=: read -r bbsimRepo bbsimTag <<< '${bbsimImg.trim()}'
203 EXTRA_HELM_FLAGS+="--set images.bbsim.repository=\$bbsimRepo,images.bbsim.tag=\$bbsimTag "
204 fi
205
206 # VOLTHA custom image handling
207 if [ '${rwCoreImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-go' ]; then
208 IFS=: read -r rwCoreRepo rwCoreTag <<< '${rwCoreImg.trim()}'
209 EXTRA_HELM_FLAGS+="--set voltha.images.rw_core.repository=\$rwCoreRepo,voltha.images.rw_core.tag=\$rwCoreTag "
210 fi
211
212 # ofAgent custom image handling
213 if [ '${ofAgentImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'of-agent' ]; then
214 IFS=: read -r ofAgentRepo ofAgentTag <<< '${ofAgentImg.trim()}'
215 EXTRA_HELM_FLAGS+="--set voltha.images.ofagent.repository=\$ofAgentRepo,voltha.images.ofagent.tag=\$ofAgentTag "
216 fi
217
218 # OpenOLT custom image handling
219 if [ '${openoltAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openolt-adapter' ]; then
220 IFS=: read -r openoltAdapterRepo openoltAdapterTag <<< '${openoltAdapterImg.trim()}'
221 EXTRA_HELM_FLAGS+="--set voltha-adapter-openolt.images.adapter_open_olt.repository=\$openoltAdapterRepo,voltha-adapter-openolt.images.adapter_open_olt.tag=\$openoltAdapterTag "
222 fi
223
224 # OpenONU custom image handling
225 if [ '${openonuAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter' ]; then
226 IFS=: read -r openonuAdapterRepo openonuAdapterTag <<< '${openonuAdapterImg.trim()}'
227 EXTRA_HELM_FLAGS+="--set voltha-adapter-openonu.images.adapter_open_onu.repository=\$openonuAdapterRepo,voltha-adapter-openonu.images.adapter_open_onu.tag=\$openonuAdapterTag "
228 fi
229
230 # OpenONU GO custom image handling
231 if [ '${openonuAdapterGoImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter-go' ]; then
232 IFS=: read -r openonuAdapterGoRepo openonuAdapterGoTag <<< '${openonuAdapterGoImg.trim()}'
233 EXTRA_HELM_FLAGS+="--set voltha-adapter-openonu.images.adapter_open_onu_go.repository=\$openonuAdapterGoRepo,voltha-adapter-openonu.images.adapter_open_onu_go.tag=\$openonuAdapterGoTag "
234 fi
235
236 # ONOS custom image handling
237 if [ '${onosImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-onos' ]; then
238 IFS=: read -r onosRepo onosTag <<< '${onosImg.trim()}'
239 EXTRA_HELM_FLAGS+="--set onos-classic.image.repository=\$onosRepo,onos-classic.image.tag=\$onosTag "
240 fi
241
242 # set BBSim parameters
243 EXTRA_HELM_FLAGS+='--set enablePerf=true,pon=${pons},onu=${onus} '
244
245 # disable the securityContext, this is a development cluster
246 EXTRA_HELM_FLAGS+='--set securityContext.enabled=false '
247
248 # No persistent-volume-claims in Atomix
249 EXTRA_HELM_FLAGS+="--set onos-classic.atomix.persistence.enabled=false "
250
251 echo "Installing with the following extra arguments:"
252 echo $EXTRA_HELM_FLAGS
253
254
255
256 # Use custom built images
257
258 if [ '\$GERRIT_PROJECT' == 'voltha-go' ]; then
259 EXTRA_HELM_FLAGS+="--set voltha.images.rw_core.repository=${dockerRegistry}/voltha/voltha-rw-core,voltha.images.rw_core.tag=voltha-scale "
260 fi
261
262 if [ '\$GERRIT_PROJECT' == 'voltha-openolt-adapter' ]; then
263 EXTRA_HELM_FLAGS+="--set voltha-openolt-adapter.images.adapter_open_olt.repository=${dockerRegistry}/voltha/voltha-openolt-adapter,voltha-openolt-adapter.images.adapter_open_olt.tag=voltha-scale "
264 fi
265
266 if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter' ]; then
267 EXTRA_HELM_FLAGS+="--set voltha-openonu-adapter.images.adapter_open_onu.repository=${dockerRegistry}/voltha/voltha-openonu-adapter,voltha-openonu-adapter.images.adapter_open_onu.tag=voltha-scale "
268 fi
269
270 if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter-go' ]; then
271 EXTRA_HELM_FLAGS+="--set voltha-openonu-adapter-go.images.adapter_open_onu_go.repository=${dockerRegistry}/voltha/voltha-openonu-adapter-go,voltha-openonu-adapter-go.images.adapter_open_onu_go.tag=voltha-scale "
272 fi
273
274 if [ '\$GERRIT_PROJECT' == 'ofagent-go' ]; then
275 EXTRA_HELM_FLAGS+="--set voltha.images.ofagent.repository=${dockerRegistry}/voltha/voltha-ofagent-go,ofagent-go.images.ofagent.tag=voltha-scale "
276 fi
277
278 if [ '\$GERRIT_PROJECT' == 'voltha-onos' ]; then
279 EXTRA_HELM_FLAGS+="--set onos-classic.image.repository=${dockerRegistry}/voltha/voltha-onos,onos-classic.image.tag=voltha-scale "
280 fi
281
282 if [ '\$GERRIT_PROJECT' == 'bbsim' ]; then
283 EXTRA_HELM_FLAGS+="--set images.bbsim.repository=${dockerRegistry}/voltha/bbsim,images.bbsim.tag=voltha-scale "
284 fi
285
286 helm upgrade --install voltha-infra onf/voltha-infra \$EXTRA_HELM_FLAGS \
287 --set onos-classic.replicas=${onosReplicas},onos-classic.atomix.replicas=${atomixReplicas} \
288 --set etcd.enabled=false,kafka.enabled=false \
289 --set global.log_level=${logLevel} \
290 -f $WORKSPACE/voltha-helm-charts/examples/${workflow}-values.yaml \
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700291 --set onos-classic.onosSshPort=30115 --set onos-classic.onosApiPort=30120 \
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700292 --version 0.1.13
293
294 helm upgrade --install voltha1 onf/voltha-stack \$EXTRA_HELM_FLAGS \
295 --set global.stack_name=voltha1 \
296 --set global.voltha_infra_name=voltha-infra \
297 --set global.voltha_infra_namespace=default \
298 --set global.log_level=${logLevel} \
299 ${ofAgentConnections(onosReplicas.toInteger(), "voltha-infra", "default")} \
300 --set voltha.services.kafka.adapter.address=kafka.default.svc:9092 \
301 --set voltha.services.kafka.cluster.address=kafka.default.svc:9092 \
302 --set voltha.services.etcd.address=etcd.default.svc:2379 \
303 --set voltha-adapter-openolt.services.kafka.adapter.address=kafka.default.svc:9092 \
304 --set voltha-adapter-openolt.services.kafka.cluster.address=kafka.default.svc:9092 \
305 --set voltha-adapter-openolt.services.etcd.address=etcd.default.svc:2379 \
306 --set voltha-adapter-openonu.services.kafka.adapter.address=kafka.default.svc:9092 \
307 --set voltha-adapter-openonu.services.kafka.cluster.address=kafka.default.svc:9092 \
308 --set voltha-adapter-openonu.services.etcd.address=etcd.default.svc:2379 \
309 --version 0.1.17
310
311
312 for i in {0..${olts.toInteger() - 1}}; do
313 stackId=1
314 helm upgrade --install bbsim\$i onf/bbsim \$EXTRA_HELM_FLAGS \
315 --set olt_id="\${stackId}\${i}" \
316 --set onu=${onus},pon=${pons} \
317 --set global.log_level=${logLevel.toLowerCase()} \
318 -f $WORKSPACE/voltha-helm-charts/examples/${workflow}-values.yaml \
319 --version 4.2.0
320 done
321 """
322 sh """
323 set +x
324
325 echo -ne "\nWaiting for VOLTHA and ONOS to start..."
326 voltha=\$(kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
327 onos=\$(kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l)
328 while [[ \$voltha != 0 || \$onos != 0 ]]; do
329 sleep 5
330 echo -ne "."
331 voltha=\$(kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
332 onos=\$(kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l)
333 done
334 echo -ne "\nVOLTHA and ONOS pods ready\n"
335 kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l
336 kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l
337 """
338 start_port_forward(olts)
339 }
340 }
341 }
342 }
343 stage('Configuration') {
344 steps {
345 script {
346 def tech_prof_directory = "XGS-PON"
347 sh returnStdout: false, script: """
348 #Setting link discovery
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700349 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700350
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700351 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
352 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700353
354
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700355 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 900
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700356
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700357 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700358
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700359 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.opencord.olt.impl.Olt provisionDelay 1000
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700360
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700361 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set ${logLevel} org.onosproject
362 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set ${logLevel} org.opencord
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700363
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700364 # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.opencord.cordmcast
365 # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.onosproject.mcast
366 # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.opencord.igmpproxy
367 # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.opencord.olt
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700368
369 kubectl exec \$(kubectl get pods | grep -E "bbsim[0-9]" | awk 'NR==1{print \$1}') -- bbsimctl log ${logLevel.toLowerCase()} false
370
371 # Set Flows/Ports/Meters poll frequency
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700372 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
373 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700374
375 if [ ${withFlows} = false ]; then
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700376 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 app deactivate org.opencord.olt
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700377 fi
378
379 if [ '${workflow}' = 'tt' ]; then
380 etcd_container=\$(kubectl get pods --all-namespaces | grep etcd | awk 'NR==1{print \$2}')
381 kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-HSIA.json \$etcd_container:/tmp/hsia.json
382 put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64')
383 kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-VoIP.json \$etcd_container:/tmp/voip.json
384 put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65')
385 kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST.json \$etcd_container:/tmp/mcast.json
386 put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66')
387 fi
388
389 if [ ${withPcap} = true ] ; then
390 # Start the tcp-dump in ofagent
391 export OF_AGENT=\$(kubectl get pods -l app=ofagent -o name)
392 kubectl exec \$OF_AGENT -- apk update
393 kubectl exec \$OF_AGENT -- apk add tcpdump
394 kubectl exec \$OF_AGENT -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
395 _TAG=ofagent-tcpdump kubectl exec \$OF_AGENT -- tcpdump -nei eth0 -w out.pcap&
396
397 # Start the tcp-dump in radius
398 export RADIUS=\$(kubectl get pods -l app=radius -o name)
399 kubectl exec \$RADIUS -- apt-get update
400 kubectl exec \$RADIUS -- apt-get install -y tcpdump
401 _TAG=radius-tcpdump kubectl exec \$RADIUS -- tcpdump -w out.pcap&
402
403 # Start the tcp-dump in ONOS
404 for i in \$(seq 0 \$ONOSES); do
405 INSTANCE="onos-onos-classic-\$i"
406 kubectl exec \$INSTANCE -- apt-get update
407 kubectl exec \$INSTANCE -- apt-get install -y tcpdump
408 kubectl exec \$INSTANCE -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
409 _TAG=\$INSTANCE kubectl exec \$INSTANCE -- /usr/bin/tcpdump -nei eth0 port 1812 -w out.pcap&
410 done
411 fi
412 """
413 }
414 }
415 }
Matteo Scandolofcb75632021-04-06 16:25:19 -0700416 stage('Load MIB Template') {
417 when {
418 expression {
419 return params.withMibTemplate
420 }
421 }
422 steps {
423 sh """
424 # load MIB template
425 wget https://raw.githubusercontent.com/opencord/voltha-openonu-adapter-go/master/templates/BBSM-12345123451234512345-00000000000001-v1.json
426 cat BBSM-12345123451234512345-00000000000001-v1.json | kubectl exec -it \$(kubectl get pods |grep etcd | awk 'NR==1{print \$1}') -- etcdctl put service/voltha/omci_mibs/go_templates/BBSM/12345123451234512345/00000000000001
427 """
428 }
429 }
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700430 stage('Run Test') {
431 steps {
432 sh '''
433 mkdir -p $WORKSPACE/RobotLogs
434 cd $WORKSPACE/voltha-system-tests
435 make vst_venv
436 '''
437 sh '''
438 if [ ${withProfiling} = true ] ; then
439 mkdir -p $LOG_FOLDER/pprof
440 echo $PATH
441 #Creating Python script for ONU Detection
442 cat << EOF > $WORKSPACE/pprof.sh
443timestamp() {
444 date +"%T"
445}
446
447i=0
448while [[ true ]]; do
449 ((i++))
450 ts=$(timestamp)
451 go tool pprof -png http://127.0.0.1:6060/debug/pprof/heap > $LOG_FOLDER/pprof/rw-core-heap-\\$i-\\$ts.png
452 go tool pprof -png http://127.0.0.1:6060/debug/pprof/goroutine > $LOG_FOLDER/pprof/rw-core-goroutine-\\$i-\\$ts.png
453 curl -o $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof http://127.0.0.1:6060/debug/pprof/profile?seconds=10
454 go tool pprof -png $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.png
455
456 go tool pprof -png http://127.0.0.1:6061/debug/pprof/heap > $LOG_FOLDER/pprof/openolt-heap-\\$i-\\$ts.png
457 go tool pprof -png http://127.0.0.1:6061/debug/pprof/goroutine > $LOG_FOLDER/pprof/openolt-goroutine-\\$i-\\$ts.png
458 curl -o $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof http://127.0.0.1:6061/debug/pprof/profile?seconds=10
459 go tool pprof -png $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.png
460
461 go tool pprof -png http://127.0.0.1:6062/debug/pprof/heap > $LOG_FOLDER/pprof/ofagent-heap-\\$i-\\$ts.png
462 go tool pprof -png http://127.0.0.1:6062/debug/pprof/goroutine > $LOG_FOLDER/pprof/ofagent-goroutine-\\$i-\\$ts.png
463 curl -o $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof http://127.0.0.1:6062/debug/pprof/profile?seconds=10
464 go tool pprof -png $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.png
465
466 sleep 10
467done
468EOF
469
470 _TAG="pprof"
471 _TAG=$_TAG bash $WORKSPACE/pprof.sh &
472 fi
473 '''
474 timeout(time: 15, unit: 'MINUTES') {
475 sh '''
476 ROBOT_PARAMS="--exitonfailure \
477 -v olt:${olts} \
478 -v pon:${pons} \
479 -v onu:${onus} \
480 -v workflow:${workflow} \
481 -v withEapol:${withEapol} \
482 -v withDhcp:${withDhcp} \
483 -v withIgmp:${withIgmp} \
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700484 -v ONOS_SSH_PORT:30115 \
485 -v ONOS_REST_PORT:30120 \
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700486 --noncritical non-critical \
487 -e igmp -e teardown "
488
489 if [ ${withEapol} = false ] ; then
490 ROBOT_PARAMS+="-e authentication "
491 fi
492
493 if [ ${withDhcp} = false ] ; then
494 ROBOT_PARAMS+="-e dhcp "
495 fi
496
497 if [ ${provisionSubscribers} = false ] ; then
498 # if we're not considering subscribers then we don't care about authentication and dhcp
499 ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
500 fi
501
502 if [ ${withFlows} = false ] ; then
503 ROBOT_PARAMS+="-i setup -i activation "
504 fi
505
506 cd $WORKSPACE/voltha-system-tests
507 source ./vst_venv/bin/activate
508 robot -d $WORKSPACE/RobotLogs \
509 $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
510 '''
511 }
512 }
513 }
514 stage('Run Igmp Tests') {
515 environment {
516 ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/IgmpTests"
517 }
518 when {
519 expression {
520 return params.withIgmp
521 }
522 }
523 steps {
524 sh '''
525 set +e
526 mkdir -p $ROBOT_LOGS_DIR
527 cd $WORKSPACE/voltha-system-tests
528 make vst_venv
529 '''
530 timeout(time: 11, unit: 'MINUTES') {
531 sh '''
532 ROBOT_PARAMS="--exitonfailure \
533 -v olt:${olts} \
534 -v pon:${pons} \
535 -v onu:${onus} \
536 -v workflow:${workflow} \
537 -v withEapol:${withEapol} \
538 -v withDhcp:${withDhcp} \
539 -v withIgmp:${withIgmp} \
540 --noncritical non-critical \
541 -i igmp \
542 -e setup -e activation -e flow-before \
543 -e authentication -e provision -e flow-after \
544 -e dhcp -e teardown "
545 cd $WORKSPACE/voltha-system-tests
546 source ./vst_venv/bin/activate
547 robot -d $ROBOT_LOGS_DIR \
548 $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
549 '''
550 }
551 }
552 }
553 }
554 post {
555 always {
556 // collect result, done in the "post" step so it's executed even in the
557 // event of a timeout in the tests
558 sh '''
559
560 # stop the kail processes
561 list=($APPS_TO_LOG)
562 for app in "${list[@]}"
563 do
564 echo "Stopping logs for: ${app}"
565 _TAG="kail-$app"
566 P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
567 if [ -n "$P_IDS" ]; then
568 echo $P_IDS
569 for P_ID in $P_IDS; do
570 kill -9 $P_ID
571 done
572 fi
573 done
574
575 if [ ${withPcap} = true ] ; then
576 # stop ofAgent tcpdump
577 P_ID="\$(ps e -ww -A | grep "_TAG=ofagent-tcpdump" | grep -v grep | awk '{print \$1}')"
578 if [ -n "\$P_ID" ]; then
579 kill -9 \$P_ID
580 fi
581
582 # stop radius tcpdump
583 P_ID="\$(ps e -ww -A | grep "_TAG=radius-tcpdump" | grep -v grep | awk '{print \$1}')"
584 if [ -n "\$P_ID" ]; then
585 kill -9 \$P_ID
586 fi
587
588 # stop onos tcpdump
589 LIMIT=$(($NUM_OF_ONOS - 1))
590 for i in $(seq 0 $LIMIT); do
591 INSTANCE="onos-onos-classic-$i"
592 P_ID="\$(ps e -ww -A | grep "_TAG=$INSTANCE" | grep -v grep | awk '{print \$1}')"
593 if [ -n "\$P_ID" ]; then
594 kill -9 \$P_ID
595 fi
596 done
597
598 # copy the file
599 export OF_AGENT=$(kubectl get pods -l app=ofagent | awk 'NR==2{print $1}') || true
600 kubectl cp $OF_AGENT:out.pcap $LOG_FOLDER/ofagent.pcap || true
601
602 export RADIUS=$(kubectl get pods -l app=radius | awk 'NR==2{print $1}') || true
603 kubectl cp $RADIUS:out.pcap $LOG_FOLDER/radius.pcap || true
604
605 LIMIT=$(($NUM_OF_ONOS - 1))
606 for i in $(seq 0 $LIMIT); do
607 INSTANCE="onos-onos-classic-$i"
608 kubectl cp $INSTANCE:out.pcap $LOG_FOLDER/$INSTANCE.pcap || true
609 done
610 fi
611
612 cd voltha-system-tests
613 source ./vst_venv/bin/activate
614 python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time.txt || true
615 cat $WORKSPACE/execution-time.txt
616 '''
617 sh '''
618 if [ ${withProfiling} = true ] ; then
619 _TAG="pprof"
620 P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
621 if [ -n "$P_IDS" ]; then
622 echo $P_IDS
623 for P_ID in $P_IDS; do
624 kill -9 $P_ID
625 done
626 fi
627 fi
628 '''
629 plot([
630 csvFileName: 'scale-test.csv',
631 csvSeries: [
632 [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
633 [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
634 [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
635 [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
636 [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
637 [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
638 [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
639 [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
640 [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
641 [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
642 ],
643 group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus})", yaxis: 'Time (s)', useDescr: true
644 ])
645 step([$class: 'RobotPublisher',
646 disableArchiveOutput: false,
647 logFileName: '**/log*.html',
648 otherFiles: '',
649 outputFileName: '**/output*.xml',
650 outputPath: 'RobotLogs',
651 passThreshold: 100,
652 reportFileName: '**/report*.html',
653 unstableThreshold: 0]);
654 // get all the logs from kubernetes PODs
655 sh returnStdout: false, script: '''
656
657 # store information on running charts
658 helm ls > $LOG_FOLDER/helm-list.txt || true
659
660 # store information on the running pods
661 kubectl get pods -o wide > $LOG_FOLDER/pods.txt || true
662 kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
663 kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
664
665 # copy the ONOS logs directly from the container to avoid the color codes
666 printf '%s\n' $(kubectl get pods -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
667
668 # get ONOS cfg from the 3 nodes
669 printf '%s\n' $(kubectl get pods -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl exec -it # -- ${karafHome}/bin/client cfg get > $LOG_FOLDER/#.cfg" || true
670
671
672 # get radius logs out of the container
673 kubectl cp $(kubectl get pods -l app=radius --no-headers | awk '{print $1}'):/var/log/freeradius/radius.log $LOG_FOLDER/radius.log || true
674 '''
675 // dump all the BBSim(s) ONU information
676 sh '''
677 BBSIM_IDS=$(kubectl get pods | grep bbsim | grep -v server | awk '{print $1}')
678 IDS=($BBSIM_IDS)
679
680 for bbsim in "${IDS[@]}"
681 do
682 kubectl exec -t $bbsim -- bbsimctl onu list > $LOG_FOLDER/$bbsim-device-list.txt || true
683 kubectl exec -t $bbsim -- bbsimctl service list > $LOG_FOLDER/$bbsim-service-list.txt || true
684 kubectl exec -t $bbsim -- bbsimctl olt resources GEM_PORT > $LOG_FOLDER/$bbsim-flows-gem-ports.txt || true
685 kubectl exec -t $bbsim -- bbsimctl olt resources ALLOC_ID > $LOG_FOLDER/$bbsim-flows-alloc-ids.txt || true
686 kubectl exec -t $bbsim -- bbsimctl olt pons > $LOG_FOLDER/$bbsim-pon-resources.txt || true
687 done
688 '''
689 script {
690 // first make sure the port-forward is still running,
691 // sometimes Jenkins kills it relardless of the JENKINS_NODE_COOKIE=dontKillMe
692 def running = sh (
693 script: 'ps aux | grep port-forw | grep -E "onos|voltha" | grep -v grep | wc -l',
694 returnStdout: true
695 ).trim()
696 // if any of the voltha-api, onos-rest, onos-ssh port-forwards are not there
697 // kill all and restart
698 if (running != "3") {
699 start_port_forward(olts)
700 }
701 }
702 // get ONOS debug infos
703 sh '''
704
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700705 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt
706 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt
707 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt
708 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt
709 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg get > $LOG_FOLDER/onos-cfg.txt
710 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 netcfg > $LOG_FOLDER/onos-netcfg.txt
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700711
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700712 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt
713 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700714
715 if [ ${withFlows} = true ] ; then
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700716 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt
717 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt
718 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700719 fi
720
721 if [ ${provisionSubscribers} = true ]; then
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700722 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt
723 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt
724 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt
725 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700726 fi
727
728 if [ ${withEapol} = true ] ; then
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700729 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt
730 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700731 fi
732
733 if [ ${withDhcp} = true ] ; then
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700734 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700735 fi
736
737 if [ ${withIgmp} = true ] ; then
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700738 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 mcast-host-routes > $LOG_FOLDER/onos-mcast-host-routes.txt
739 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 mcast-host-show > $LOG_FOLDER/onos-mcast-host-show.txt
740 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 groups > $LOG_FOLDER/onos-groups.txt
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700741 fi
742 '''
743 // collect etcd metrics
744 sh '''
745 mkdir -p $WORKSPACE/etcd-metrics
746 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
747 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
748 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
749 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time.json || true
750 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time-sum.json || true
751 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time-bucket.json || true
752 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_wal_fsync_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-wal-fsync-time-bucket.json || true
753 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_network_peer_round_trip_time_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-network-peer-round-trip-time-seconds.json || true
754
755 '''
756 // get VOLTHA debug infos
757 script {
758 try {
759 sh '''
760 voltctl -m 8MB device list -o json > $LOG_FOLDER/device-list.json || true
761 python -m json.tool $LOG_FOLDER/device-list.json > $LOG_FOLDER/voltha-devices-list.json || true
762 rm $LOG_FOLDER/device-list.json || true
763 voltctl -m 8MB device list > $LOG_FOLDER/voltha-devices-list.txt || true
764
765 printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device flows # > $LOG_FOLDER/voltha-device-flows-#.txt" || true
766 printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/voltha-device-ports-#.txt" || true
767
768 printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice flows # > $LOG_FOLDER/voltha-logicaldevice-flows-#.txt" || true
769 printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice port list # > $LOG_FOLDER/voltha-logicaldevice-ports-#.txt" || true
770 '''
771 } catch(e) {
772 sh '''
773 echo "Can't get device list from voltclt"
774 '''
775 }
776 }
777 // get cpu usage by container
778 sh '''
779 if [ ${withMonitoring} = true ] ; then
780 cd $WORKSPACE/voltha-system-tests
781 source ./vst_venv/bin/activate
782 sleep 60 # we have to wait for prometheus to collect all the information
783 python tests/scale/sizing.py -o $WORKSPACE/plots || true
784 fi
785 '''
786 archiveArtifacts artifacts: 'execution-time.txt,logs/*,logs/pprof/*,RobotLogs/**/*,plots/*,etcd-metrics/*'
787 }
788 }
789}
790
791def start_port_forward(olts) {
792 sh """
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700793 daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward --address 0.0.0.0 -n default svc/voltha1-voltha-api 55555:55555
794
795 bbsimRestPortFwd=50071
796 for i in {0..${olts.toInteger() - 1}}; do
797 daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward --address 0.0.0.0 -n default svc/bbsim\${i} \${bbsimRestPortFwd}:50071
798 ((bbsimRestPortFwd++))
799 done
800 """
801}