blob: 5455e4e31f0d41c7a6302aa671bde36cece81aa7 [file] [log] [blame]
Joey Armstrongbeef4cd2023-01-18 09:59:58 -05001// Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
Matteo Scandolofed6bab2021-03-31 14:19:57 -07002//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// deploy VOLTHA and performs a scale test
16
Hardik Windlass6d9a82e2021-07-08 16:23:21 +000017library identifier: 'cord-jenkins-libraries@master',
18 retriever: modernSCM([
19 $class: 'GitSCMSource',
20 remote: 'https://gerrit.opencord.org/ci-management.git'
21])
22
Matteo Scandolofed6bab2021-03-31 14:19:57 -070023// this function generates the correct parameters for ofAgent
Hardik Windlass6d9a82e2021-07-08 16:23:21 +000024// to connect to multiple ONOS instances
Matteo Scandolofed6bab2021-03-31 14:19:57 -070025def ofAgentConnections(numOfOnos, releaseName, namespace) {
26 def params = " "
27 numOfOnos.times {
28 params += "--set voltha.services.controller[${it}].address=${releaseName}-onos-classic-${it}.${releaseName}-onos-classic-hs.${namespace}.svc:6653 "
29 }
30 return params
31}
32
33pipeline {
34
35 /* no label, executor is determined by JJB */
36 agent {
37 label "${params.buildNode}"
38 }
39 options {
40 timeout(time: 60, unit: 'MINUTES')
41 }
42 environment {
43 JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
44 KUBECONFIG="$HOME/.kube/config"
Hardik Windlass6d9a82e2021-07-08 16:23:21 +000045 VOLTCONFIG="$HOME/.volt/config"
Matteo Scandolofed6bab2021-03-31 14:19:57 -070046 SSHPASS="karaf"
47 VOLTHA_LOG_LEVEL="${logLevel}"
48 NUM_OF_BBSIM="${olts}"
49 NUM_OF_OPENONU="${openonuAdapterReplicas}"
50 NUM_OF_ONOS="${onosReplicas}"
51 NUM_OF_ATOMIX="${atomixReplicas}"
Hardik Windlass6d9a82e2021-07-08 16:23:21 +000052 EXTRA_HELM_FLAGS=" "
Matteo Scandolofed6bab2021-03-31 14:19:57 -070053
54 APPS_TO_LOG="etcd kafka onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server onos-config-loader"
55 LOG_FOLDER="$WORKSPACE/logs"
56
57 GERRIT_PROJECT="${GERRIT_PROJECT}"
Hardik Windlassb5f82662022-03-04 08:11:15 +000058 PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
Matteo Scandolofed6bab2021-03-31 14:19:57 -070059 }
60
61 stages {
62 stage ('Cleanup') {
63 steps {
Matteo Scandolodc329dc2021-10-11 16:00:23 -070064 script {
65 try {
Matteo Scandoloce8fd782021-10-12 07:32:06 -070066 timeout(time: 5, unit: 'MINUTES') {
Matteo Scandolodc329dc2021-10-11 16:00:23 -070067 sh returnStdout: false, script: '''
68 cd $WORKSPACE
69 rm -rf $WORKSPACE/*
70 '''
71 // removing the voltha-infra chart first
72 // if we don't ONOS might get stuck because of all the events when BBSim goes down
73 sh returnStdout: false, script: '''
74 set +x
75 helm del voltha-infra || true
76 echo -ne "\nWaiting for ONOS to be removed..."
77 onos=$(kubectl get pod -n default -l app=onos-classic --no-headers | wc -l)
78 while [[ $onos != 0 ]]; do
79 onos=$(kubectl get pod -n default -l app=onos-classic --no-headers | wc -l)
80 sleep 5
81 echo -ne "."
82 done
83 '''
84 }
85 } catch(org.jenkinsci.plugins.workflow.steps.FlowInterruptedException e) {
86 // if we have a timeout in the Cleanup fase most likely ONOS got stuck somewhere, thuse force remove the pods
87 sh '''
88 kubectl get pods | grep Terminating | awk '{print $1}' | xargs kubectl delete pod --force --grace-period=0
89 '''
Hardik Windlass6d9a82e2021-07-08 16:23:21 +000090 }
Matteo Scandolodc329dc2021-10-11 16:00:23 -070091 timeout(time: 10, unit: 'MINUTES') {
92 script {
93 helmTeardown(["default"])
94 }
95 sh returnStdout: false, script: '''
96 helm repo add onf https://charts.opencord.org
97 helm repo update
Matteo Scandolofed6bab2021-03-31 14:19:57 -070098
Matteo Scandolodc329dc2021-10-11 16:00:23 -070099 # remove all persistent volume claims
100 kubectl delete pvc --all-namespaces --all
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000101 PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
Matteo Scandolodc329dc2021-10-11 16:00:23 -0700102 while [[ \$PVCS != 0 ]]; do
103 sleep 5
104 PVCS=\$(kubectl get pvc --all-namespaces --no-headers | wc -l)
105 done
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700106
Matteo Scandolodc329dc2021-10-11 16:00:23 -0700107 # remove orphaned port-forward from different namespaces
108 ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
109 '''
110 }
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700111 }
112 }
113 }
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000114 stage('Download Code') {
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700115 steps {
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000116 getVolthaCode([
117 branch: "${release}",
118 volthaSystemTestsChange: "${volthaSystemTestsChange}",
119 volthaHelmChartsChange: "${volthaHelmChartsChange}",
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700120 ])
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700121 }
122 }
123 stage('Build patch') {
124 when {
125 expression {
126 return params.GERRIT_PROJECT
127 }
128 }
129 steps {
130 sh """
131 git clone https://\$GERRIT_HOST/\$GERRIT_PROJECT
132 cd \$GERRIT_PROJECT
133 git fetch https://\$GERRIT_HOST/\$GERRIT_PROJECT \$GERRIT_REFSPEC && git checkout FETCH_HEAD
134
135 DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-build
136 DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-push
137 """
138 }
139 }
140 stage('Deploy common infrastructure') {
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700141 steps {
142 sh '''
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700143 if [ ${withMonitoring} = true ] ; then
144 helm install nem-monitoring onf/nem-monitoring \
145 -f $HOME/voltha-scale/grafana.yaml \
146 --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
147 --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
148 fi
149 '''
150 }
151 }
152 stage('Deploy Voltha') {
153 steps {
154 timeout(time: 10, unit: 'MINUTES') {
Hardik Windlassb5f82662022-03-04 08:11:15 +0000155 installVoltctl("${release}")
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700156 script {
157 sh returnStdout: false, script: '''
158 # start logging with kail
159
160 mkdir -p $LOG_FOLDER
161
162 list=($APPS_TO_LOG)
163 for app in "${list[@]}"
164 do
165 echo "Starting logs for: ${app}"
166 _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
167 done
168 '''
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000169 def returned_flags = sh (returnStdout: true, script: """
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700170
171 export EXTRA_HELM_FLAGS+=' '
172
173 # BBSim custom image handling
174 if [ '${bbsimImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'bbsim' ]; then
175 IFS=: read -r bbsimRepo bbsimTag <<< '${bbsimImg.trim()}'
176 EXTRA_HELM_FLAGS+="--set images.bbsim.repository=\$bbsimRepo,images.bbsim.tag=\$bbsimTag "
177 fi
178
179 # VOLTHA custom image handling
180 if [ '${rwCoreImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-go' ]; then
181 IFS=: read -r rwCoreRepo rwCoreTag <<< '${rwCoreImg.trim()}'
182 EXTRA_HELM_FLAGS+="--set voltha.images.rw_core.repository=\$rwCoreRepo,voltha.images.rw_core.tag=\$rwCoreTag "
183 fi
184
185 # ofAgent custom image handling
186 if [ '${ofAgentImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'of-agent' ]; then
187 IFS=: read -r ofAgentRepo ofAgentTag <<< '${ofAgentImg.trim()}'
188 EXTRA_HELM_FLAGS+="--set voltha.images.ofagent.repository=\$ofAgentRepo,voltha.images.ofagent.tag=\$ofAgentTag "
189 fi
190
191 # OpenOLT custom image handling
192 if [ '${openoltAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openolt-adapter' ]; then
193 IFS=: read -r openoltAdapterRepo openoltAdapterTag <<< '${openoltAdapterImg.trim()}'
194 EXTRA_HELM_FLAGS+="--set voltha-adapter-openolt.images.adapter_open_olt.repository=\$openoltAdapterRepo,voltha-adapter-openolt.images.adapter_open_olt.tag=\$openoltAdapterTag "
195 fi
196
197 # OpenONU custom image handling
198 if [ '${openonuAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter' ]; then
199 IFS=: read -r openonuAdapterRepo openonuAdapterTag <<< '${openonuAdapterImg.trim()}'
200 EXTRA_HELM_FLAGS+="--set voltha-adapter-openonu.images.adapter_open_onu.repository=\$openonuAdapterRepo,voltha-adapter-openonu.images.adapter_open_onu.tag=\$openonuAdapterTag "
201 fi
202
203 # OpenONU GO custom image handling
204 if [ '${openonuAdapterGoImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter-go' ]; then
205 IFS=: read -r openonuAdapterGoRepo openonuAdapterGoTag <<< '${openonuAdapterGoImg.trim()}'
206 EXTRA_HELM_FLAGS+="--set voltha-adapter-openonu.images.adapter_open_onu_go.repository=\$openonuAdapterGoRepo,voltha-adapter-openonu.images.adapter_open_onu_go.tag=\$openonuAdapterGoTag "
207 fi
208
209 # ONOS custom image handling
210 if [ '${onosImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-onos' ]; then
211 IFS=: read -r onosRepo onosTag <<< '${onosImg.trim()}'
212 EXTRA_HELM_FLAGS+="--set onos-classic.image.repository=\$onosRepo,onos-classic.image.tag=\$onosTag "
213 fi
214
215 # set BBSim parameters
216 EXTRA_HELM_FLAGS+='--set enablePerf=true,pon=${pons},onu=${onus} '
217
218 # disable the securityContext, this is a development cluster
219 EXTRA_HELM_FLAGS+='--set securityContext.enabled=false '
220
221 # No persistent-volume-claims in Atomix
222 EXTRA_HELM_FLAGS+="--set onos-classic.atomix.persistence.enabled=false "
223
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700224 # Use custom built images
225
226 if [ '\$GERRIT_PROJECT' == 'voltha-go' ]; then
227 EXTRA_HELM_FLAGS+="--set voltha.images.rw_core.repository=${dockerRegistry}/voltha/voltha-rw-core,voltha.images.rw_core.tag=voltha-scale "
228 fi
229
230 if [ '\$GERRIT_PROJECT' == 'voltha-openolt-adapter' ]; then
231 EXTRA_HELM_FLAGS+="--set voltha-openolt-adapter.images.adapter_open_olt.repository=${dockerRegistry}/voltha/voltha-openolt-adapter,voltha-openolt-adapter.images.adapter_open_olt.tag=voltha-scale "
232 fi
233
234 if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter' ]; then
235 EXTRA_HELM_FLAGS+="--set voltha-openonu-adapter.images.adapter_open_onu.repository=${dockerRegistry}/voltha/voltha-openonu-adapter,voltha-openonu-adapter.images.adapter_open_onu.tag=voltha-scale "
236 fi
237
238 if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter-go' ]; then
239 EXTRA_HELM_FLAGS+="--set voltha-openonu-adapter-go.images.adapter_open_onu_go.repository=${dockerRegistry}/voltha/voltha-openonu-adapter-go,voltha-openonu-adapter-go.images.adapter_open_onu_go.tag=voltha-scale "
240 fi
241
242 if [ '\$GERRIT_PROJECT' == 'ofagent-go' ]; then
243 EXTRA_HELM_FLAGS+="--set voltha.images.ofagent.repository=${dockerRegistry}/voltha/voltha-ofagent-go,ofagent-go.images.ofagent.tag=voltha-scale "
244 fi
245
246 if [ '\$GERRIT_PROJECT' == 'voltha-onos' ]; then
247 EXTRA_HELM_FLAGS+="--set onos-classic.image.repository=${dockerRegistry}/voltha/voltha-onos,onos-classic.image.tag=voltha-scale "
248 fi
249
250 if [ '\$GERRIT_PROJECT' == 'bbsim' ]; then
251 EXTRA_HELM_FLAGS+="--set images.bbsim.repository=${dockerRegistry}/voltha/bbsim,images.bbsim.tag=voltha-scale "
252 fi
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000253 echo \$EXTRA_HELM_FLAGS
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700254
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000255 """).trim()
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700256
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000257 def extraHelmFlags = returned_flags
258 // The added space before params.extraHelmFlags is required due to the .trim() above
259 def infraHelmFlags =
Matteo Scandolo529e8822021-07-21 10:20:18 -0700260 "--set global.log_level=${logLevel} " +
261 "--set radius.enabled=${withEapol} " +
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000262 "--set onos-classic.onosSshPort=30115 " +
263 "--set onos-classic.onosApiPort=30120 " +
264 extraHelmFlags + " " + params.extraHelmFlags
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700265
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000266 println "Passing the following parameters to the VOLTHA infra deploy: ${infraHelmFlags}."
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700267
Matteo Scandoloc11b0ad2021-07-13 10:55:08 -0700268 // in a released version we always want to use the local version of the helm-charts
269 def localCharts = true
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000270
271 volthaInfraDeploy([
272 workflow: workflow,
273 infraNamespace: "default",
274 extraHelmFlags: infraHelmFlags,
275 localCharts: localCharts,
276 onosReplica: onosReplicas,
277 atomixReplica: atomixReplicas,
278 kafkaReplica: kafkaReplicas,
279 etcdReplica: etcdReplicas,
280 ])
281
282 stackHelmFlags = " --set onu=${onus},pon=${pons} --set global.log_level=${logLevel.toLowerCase()} "
283 stackHelmFlags += " --set voltha.ingress.enabled=true --set voltha.ingress.enableVirtualHosts=true --set voltha.fullHostnameOverride=voltha.scale1.dev "
284 stackHelmFlags += extraHelmFlags + " " + params.extraHelmFlags
285
286 volthaStackDeploy([
287 bbsimReplica: olts.toInteger(),
288 infraNamespace: "default",
289 volthaNamespace: "default",
290 stackName: "voltha1", // TODO support custom charts
291 workflow: workflow,
292 extraHelmFlags: stackHelmFlags,
293 localCharts: localCharts,
Matteo Scandoloce8fd782021-10-12 07:32:06 -0700294 adaptersToWait: 0, // no need to wait for adapters, 2.8 is kafka based
Andrea Campanella8977b4d2021-12-29 15:41:39 +0100295 onosReplica: onosReplicas,
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000296 ])
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700297 sh """
298 set +x
299
300 echo -ne "\nWaiting for VOLTHA and ONOS to start..."
301 voltha=\$(kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
302 onos=\$(kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l)
303 while [[ \$voltha != 0 || \$onos != 0 ]]; do
304 sleep 5
305 echo -ne "."
306 voltha=\$(kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
307 onos=\$(kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l)
308 done
309 echo -ne "\nVOLTHA and ONOS pods ready\n"
310 kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l
311 kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l
312 """
313 start_port_forward(olts)
314 }
315 }
316 }
317 }
318 stage('Configuration') {
319 steps {
320 script {
321 def tech_prof_directory = "XGS-PON"
322 sh returnStdout: false, script: """
323 #Setting link discovery
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700324 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700325
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700326 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
327 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700328
329
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700330 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 900
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700331
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700332 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700333
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700334 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.opencord.olt.impl.Olt provisionDelay 1000
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700335
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700336 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set ${logLevel} org.onosproject
337 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set ${logLevel} org.opencord
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700338
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700339 # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.opencord.cordmcast
340 # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.onosproject.mcast
341 # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.opencord.igmpproxy
342 # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.opencord.olt
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000343 # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.onosproject.net.flowobjective.impl.FlowObjectiveManager
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700344
345 kubectl exec \$(kubectl get pods | grep -E "bbsim[0-9]" | awk 'NR==1{print \$1}') -- bbsimctl log ${logLevel.toLowerCase()} false
346
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000347 # Set Flows/Ports/Meters/Groups poll frequency
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700348 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
349 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000350 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.group.impl.OpenFlowGroupProvider groupPollInterval ${onosGroupInterval}
351 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.FlowObjectiveManager numThreads ${flowObjWorkerThreads}
352 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager objectiveTimeoutMs 300000
353
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700354 if [ ${withFlows} = false ]; then
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700355 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 app deactivate org.opencord.olt
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700356 fi
357
358 if [ '${workflow}' = 'tt' ]; then
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000359 etcd_container=\$(kubectl get pods --all-namespaces | grep etcd-0 | awk 'NR==1{print \$2}')
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700360 kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-HSIA.json \$etcd_container:/tmp/hsia.json
361 put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64')
362 kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-VoIP.json \$etcd_container:/tmp/voip.json
363 put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65')
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000364 kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST-AdditionalBW-NA.json \$etcd_container:/tmp/mcast.json
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700365 put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66')
366 fi
367
368 if [ ${withPcap} = true ] ; then
369 # Start the tcp-dump in ofagent
370 export OF_AGENT=\$(kubectl get pods -l app=ofagent -o name)
371 kubectl exec \$OF_AGENT -- apk update
372 kubectl exec \$OF_AGENT -- apk add tcpdump
373 kubectl exec \$OF_AGENT -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
374 _TAG=ofagent-tcpdump kubectl exec \$OF_AGENT -- tcpdump -nei eth0 -w out.pcap&
375
376 # Start the tcp-dump in radius
377 export RADIUS=\$(kubectl get pods -l app=radius -o name)
378 kubectl exec \$RADIUS -- apt-get update
379 kubectl exec \$RADIUS -- apt-get install -y tcpdump
380 _TAG=radius-tcpdump kubectl exec \$RADIUS -- tcpdump -w out.pcap&
381
382 # Start the tcp-dump in ONOS
383 for i in \$(seq 0 \$ONOSES); do
384 INSTANCE="onos-onos-classic-\$i"
385 kubectl exec \$INSTANCE -- apt-get update
386 kubectl exec \$INSTANCE -- apt-get install -y tcpdump
387 kubectl exec \$INSTANCE -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
388 _TAG=\$INSTANCE kubectl exec \$INSTANCE -- /usr/bin/tcpdump -nei eth0 port 1812 -w out.pcap&
389 done
390 fi
391 """
392 }
393 }
394 }
Matteo Scandolofcb75632021-04-06 16:25:19 -0700395 stage('Load MIB Template') {
396 when {
397 expression {
398 return params.withMibTemplate
399 }
400 }
401 steps {
402 sh """
403 # load MIB template
Matteo Scandolob1a50612022-06-23 16:09:39 -0700404 wget https://raw.githubusercontent.com/opencord/voltha-openonu-adapter-go/voltha-2.8/templates/BBSM-12345123451234512345-00000000000001-v1.json
Andrea Campanella78eb69b2022-01-26 16:54:50 +0100405 cat BBSM-12345123451234512345-BBSM_IMG_00001-v1.json | kubectl exec -it \$(kubectl get pods |grep etcd-0 | awk 'NR==1{print \$1}') -- etcdctl put service/voltha/omci_mibs/go_templates/BBSM/12345123451234512345/BBSM_IMG_00001
Matteo Scandolofcb75632021-04-06 16:25:19 -0700406 """
407 }
408 }
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700409 stage('Run Test') {
410 steps {
411 sh '''
412 mkdir -p $WORKSPACE/RobotLogs
413 cd $WORKSPACE/voltha-system-tests
414 make vst_venv
415 '''
416 sh '''
417 if [ ${withProfiling} = true ] ; then
418 mkdir -p $LOG_FOLDER/pprof
419 echo $PATH
420 #Creating Python script for ONU Detection
421 cat << EOF > $WORKSPACE/pprof.sh
422timestamp() {
423 date +"%T"
424}
425
426i=0
427while [[ true ]]; do
428 ((i++))
429 ts=$(timestamp)
430 go tool pprof -png http://127.0.0.1:6060/debug/pprof/heap > $LOG_FOLDER/pprof/rw-core-heap-\\$i-\\$ts.png
431 go tool pprof -png http://127.0.0.1:6060/debug/pprof/goroutine > $LOG_FOLDER/pprof/rw-core-goroutine-\\$i-\\$ts.png
432 curl -o $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof http://127.0.0.1:6060/debug/pprof/profile?seconds=10
433 go tool pprof -png $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.png
434
435 go tool pprof -png http://127.0.0.1:6061/debug/pprof/heap > $LOG_FOLDER/pprof/openolt-heap-\\$i-\\$ts.png
436 go tool pprof -png http://127.0.0.1:6061/debug/pprof/goroutine > $LOG_FOLDER/pprof/openolt-goroutine-\\$i-\\$ts.png
437 curl -o $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof http://127.0.0.1:6061/debug/pprof/profile?seconds=10
438 go tool pprof -png $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.png
439
440 go tool pprof -png http://127.0.0.1:6062/debug/pprof/heap > $LOG_FOLDER/pprof/ofagent-heap-\\$i-\\$ts.png
441 go tool pprof -png http://127.0.0.1:6062/debug/pprof/goroutine > $LOG_FOLDER/pprof/ofagent-goroutine-\\$i-\\$ts.png
442 curl -o $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof http://127.0.0.1:6062/debug/pprof/profile?seconds=10
443 go tool pprof -png $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.png
444
445 sleep 10
446done
447EOF
448
449 _TAG="pprof"
450 _TAG=$_TAG bash $WORKSPACE/pprof.sh &
451 fi
452 '''
453 timeout(time: 15, unit: 'MINUTES') {
454 sh '''
455 ROBOT_PARAMS="--exitonfailure \
456 -v olt:${olts} \
457 -v pon:${pons} \
458 -v onu:${onus} \
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000459 -v ONOS_SSH_PORT:30115 \
460 -v ONOS_REST_PORT:30120 \
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700461 -v workflow:${workflow} \
462 -v withEapol:${withEapol} \
463 -v withDhcp:${withDhcp} \
464 -v withIgmp:${withIgmp} \
465 --noncritical non-critical \
466 -e igmp -e teardown "
467
468 if [ ${withEapol} = false ] ; then
469 ROBOT_PARAMS+="-e authentication "
470 fi
471
472 if [ ${withDhcp} = false ] ; then
473 ROBOT_PARAMS+="-e dhcp "
474 fi
475
476 if [ ${provisionSubscribers} = false ] ; then
477 # if we're not considering subscribers then we don't care about authentication and dhcp
478 ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
479 fi
480
481 if [ ${withFlows} = false ] ; then
482 ROBOT_PARAMS+="-i setup -i activation "
483 fi
484
485 cd $WORKSPACE/voltha-system-tests
486 source ./vst_venv/bin/activate
487 robot -d $WORKSPACE/RobotLogs \
488 $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
489 '''
490 }
491 }
492 }
Matteo Scandolo36d897c2021-07-14 10:26:55 -0700493 }
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700494 post {
495 always {
496 // collect result, done in the "post" step so it's executed even in the
497 // event of a timeout in the tests
498 sh '''
499
500 # stop the kail processes
501 list=($APPS_TO_LOG)
502 for app in "${list[@]}"
503 do
504 echo "Stopping logs for: ${app}"
505 _TAG="kail-$app"
506 P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
507 if [ -n "$P_IDS" ]; then
508 echo $P_IDS
509 for P_ID in $P_IDS; do
510 kill -9 $P_ID
511 done
512 fi
513 done
514
515 if [ ${withPcap} = true ] ; then
516 # stop ofAgent tcpdump
517 P_ID="\$(ps e -ww -A | grep "_TAG=ofagent-tcpdump" | grep -v grep | awk '{print \$1}')"
518 if [ -n "\$P_ID" ]; then
519 kill -9 \$P_ID
520 fi
521
522 # stop radius tcpdump
523 P_ID="\$(ps e -ww -A | grep "_TAG=radius-tcpdump" | grep -v grep | awk '{print \$1}')"
524 if [ -n "\$P_ID" ]; then
525 kill -9 \$P_ID
526 fi
527
528 # stop onos tcpdump
529 LIMIT=$(($NUM_OF_ONOS - 1))
530 for i in $(seq 0 $LIMIT); do
531 INSTANCE="onos-onos-classic-$i"
532 P_ID="\$(ps e -ww -A | grep "_TAG=$INSTANCE" | grep -v grep | awk '{print \$1}')"
533 if [ -n "\$P_ID" ]; then
534 kill -9 \$P_ID
535 fi
536 done
537
538 # copy the file
539 export OF_AGENT=$(kubectl get pods -l app=ofagent | awk 'NR==2{print $1}') || true
540 kubectl cp $OF_AGENT:out.pcap $LOG_FOLDER/ofagent.pcap || true
541
542 export RADIUS=$(kubectl get pods -l app=radius | awk 'NR==2{print $1}') || true
543 kubectl cp $RADIUS:out.pcap $LOG_FOLDER/radius.pcap || true
544
545 LIMIT=$(($NUM_OF_ONOS - 1))
546 for i in $(seq 0 $LIMIT); do
547 INSTANCE="onos-onos-classic-$i"
548 kubectl cp $INSTANCE:out.pcap $LOG_FOLDER/$INSTANCE.pcap || true
549 done
550 fi
551
552 cd voltha-system-tests
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000553 source ./vst_venv/bin/activate || true
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700554 python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time.txt || true
555 cat $WORKSPACE/execution-time.txt
556 '''
557 sh '''
558 if [ ${withProfiling} = true ] ; then
559 _TAG="pprof"
560 P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
561 if [ -n "$P_IDS" ]; then
562 echo $P_IDS
563 for P_ID in $P_IDS; do
564 kill -9 $P_ID
565 done
566 fi
567 fi
568 '''
569 plot([
570 csvFileName: 'scale-test.csv',
571 csvSeries: [
572 [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
573 [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
574 [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
575 [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
576 [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
577 [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
578 [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
579 [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
580 [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
581 [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
582 ],
583 group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus})", yaxis: 'Time (s)', useDescr: true
584 ])
585 step([$class: 'RobotPublisher',
586 disableArchiveOutput: false,
587 logFileName: '**/log*.html',
588 otherFiles: '',
589 outputFileName: '**/output*.xml',
590 outputPath: 'RobotLogs',
591 passThreshold: 100,
592 reportFileName: '**/report*.html',
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000593 onlyCritical: true,
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700594 unstableThreshold: 0]);
595 // get all the logs from kubernetes PODs
596 sh returnStdout: false, script: '''
597
598 # store information on running charts
599 helm ls > $LOG_FOLDER/helm-list.txt || true
600
601 # store information on the running pods
602 kubectl get pods -o wide > $LOG_FOLDER/pods.txt || true
603 kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
604 kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
605
606 # copy the ONOS logs directly from the container to avoid the color codes
607 printf '%s\n' $(kubectl get pods -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
608
609 # get ONOS cfg from the 3 nodes
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000610 # kubectl exec -t voltha-infra-onos-classic-0 -- sh /root/onos/apache-karaf-4.2.9/bin/client cfg get > ~/voltha-infra-onos-classic-0-cfg.txt || true
611 # kubectl exec -t voltha-infra-onos-classic-1 -- sh /root/onos/apache-karaf-4.2.9/bin/client cfg get > ~/voltha-infra-onos-classic-1-cfg.txt || true
612 # kubectl exec -t voltha-infra-onos-classic-2 -- sh /root/onos/apache-karaf-4.2.9/bin/client cfg get > ~/voltha-infra-onos-classic-2-cfg.txt || true
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700613
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000614 # kubectl exec -t voltha-infra-onos-classic-0 -- sh /root/onos/apache-karaf-4.2.9/bin/client obj-next-ids > ~/voltha-infra-onos-classic-0-next-objs.txt || true
615 # kubectl exec -t voltha-infra-onos-classic-1 -- sh /root/onos/apache-karaf-4.2.9/bin/client obj-next-ids > ~/voltha-infra-onos-classic-1-next-objs.txt || true
616 # kubectl exec -t voltha-infra-onos-classic-2 -- sh /root/onos/apache-karaf-4.2.9/bin/client obj-next-ids > ~/voltha-infra-onos-classic-2-next-objs.txt || true
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700617
618 # get radius logs out of the container
619 kubectl cp $(kubectl get pods -l app=radius --no-headers | awk '{print $1}'):/var/log/freeradius/radius.log $LOG_FOLDER/radius.log || true
620 '''
621 // dump all the BBSim(s) ONU information
622 sh '''
623 BBSIM_IDS=$(kubectl get pods | grep bbsim | grep -v server | awk '{print $1}')
624 IDS=($BBSIM_IDS)
625
626 for bbsim in "${IDS[@]}"
627 do
628 kubectl exec -t $bbsim -- bbsimctl onu list > $LOG_FOLDER/$bbsim-device-list.txt || true
629 kubectl exec -t $bbsim -- bbsimctl service list > $LOG_FOLDER/$bbsim-service-list.txt || true
630 kubectl exec -t $bbsim -- bbsimctl olt resources GEM_PORT > $LOG_FOLDER/$bbsim-flows-gem-ports.txt || true
631 kubectl exec -t $bbsim -- bbsimctl olt resources ALLOC_ID > $LOG_FOLDER/$bbsim-flows-alloc-ids.txt || true
632 kubectl exec -t $bbsim -- bbsimctl olt pons > $LOG_FOLDER/$bbsim-pon-resources.txt || true
633 done
634 '''
635 script {
636 // first make sure the port-forward is still running,
637 // sometimes Jenkins kills it relardless of the JENKINS_NODE_COOKIE=dontKillMe
638 def running = sh (
639 script: 'ps aux | grep port-forw | grep -E "onos|voltha" | grep -v grep | wc -l',
640 returnStdout: true
641 ).trim()
642 // if any of the voltha-api, onos-rest, onos-ssh port-forwards are not there
643 // kill all and restart
644 if (running != "3") {
645 start_port_forward(olts)
646 }
647 }
648 // get ONOS debug infos
649 sh '''
650
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700651 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt
652 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt
653 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt
654 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt
655 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg get > $LOG_FOLDER/onos-cfg.txt
656 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 netcfg > $LOG_FOLDER/onos-netcfg.txt
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700657
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700658 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt
659 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700660
661 if [ ${withFlows} = true ] ; then
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700662 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt
663 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt
664 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700665 fi
666
667 if [ ${provisionSubscribers} = true ]; then
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700668 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt
669 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt
670 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt
671 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700672 fi
673
674 if [ ${withEapol} = true ] ; then
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700675 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt
676 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700677 fi
678
679 if [ ${withDhcp} = true ] ; then
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700680 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700681 fi
682
683 if [ ${withIgmp} = true ] ; then
Matteo Scandolo8731a1f2021-04-09 13:05:06 -0700684 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 mcast-host-routes > $LOG_FOLDER/onos-mcast-host-routes.txt
685 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 mcast-host-show > $LOG_FOLDER/onos-mcast-host-show.txt
686 sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 groups > $LOG_FOLDER/onos-groups.txt
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700687 fi
688 '''
689 // collect etcd metrics
690 sh '''
691 mkdir -p $WORKSPACE/etcd-metrics
692 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
693 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
694 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
695 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time.json || true
696 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time-sum.json || true
697 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time-bucket.json || true
698 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_wal_fsync_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-wal-fsync-time-bucket.json || true
699 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_network_peer_round_trip_time_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-network-peer-round-trip-time-seconds.json || true
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000700 etcd_namespace=\$(kubectl get pods --all-namespaces | grep etcd-0 | awk 'NR==1{print \$1}')
701 etcd_container=\$(kubectl get pods --all-namespaces | grep etcd-0 | awk 'NR==1{print \$2}')
702 kubectl exec -it -n \$etcd_namespace \$etcd_container -- etcdctl defrag --cluster || true
703 kubectl exec -it -n \$etcd_namespace \$etcd_container -- etcdctl endpoint status -w table > $WORKSPACE/etcd-metrics/etcd-status-table.txt || true
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700704
705 '''
706 // get VOLTHA debug infos
707 script {
708 try {
709 sh '''
710 voltctl -m 8MB device list -o json > $LOG_FOLDER/device-list.json || true
711 python -m json.tool $LOG_FOLDER/device-list.json > $LOG_FOLDER/voltha-devices-list.json || true
712 rm $LOG_FOLDER/device-list.json || true
713 voltctl -m 8MB device list > $LOG_FOLDER/voltha-devices-list.txt || true
714
715 printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device flows # > $LOG_FOLDER/voltha-device-flows-#.txt" || true
716 printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/voltha-device-ports-#.txt" || true
717
718 printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice flows # > $LOG_FOLDER/voltha-logicaldevice-flows-#.txt" || true
719 printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice port list # > $LOG_FOLDER/voltha-logicaldevice-ports-#.txt" || true
720 '''
721 } catch(e) {
722 sh '''
723 echo "Can't get device list from voltclt"
724 '''
725 }
726 }
727 // get cpu usage by container
728 sh '''
729 if [ ${withMonitoring} = true ] ; then
730 cd $WORKSPACE/voltha-system-tests
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000731 source ./vst_venv/bin/activate || true
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700732 sleep 60 # we have to wait for prometheus to collect all the information
Hardik Windlass5cfb29a2021-08-10 09:39:29 +0000733 python scripts/sizing.py -o $WORKSPACE/plots || true
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700734 fi
735 '''
736 archiveArtifacts artifacts: 'execution-time.txt,logs/*,logs/pprof/*,RobotLogs/**/*,plots/*,etcd-metrics/*'
737 }
738 }
739}
740
741def start_port_forward(olts) {
742 sh """
Matteo Scandolofed6bab2021-03-31 14:19:57 -0700743 bbsimRestPortFwd=50071
744 for i in {0..${olts.toInteger() - 1}}; do
745 daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward --address 0.0.0.0 -n default svc/bbsim\${i} \${bbsimRestPortFwd}:50071
746 ((bbsimRestPortFwd++))
747 done
748 """
749}