blob: 21ed15131786baa065e3a31b73ed42bdaa6492d7 [file] [log] [blame]
Matteo Scandolo2bc660a2021-02-12 10:52:25 -08001// Copyright 2019-present Open Networking Foundation
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// deploy VOLTHA using kind-voltha and performs a scale test
16
17// NOTE we are importing the library even if it's global so that it's
18// easier to change the keywords during a replay
19library identifier: 'cord-jenkins-libraries@master',
20 retriever: modernSCM([
21 $class: 'GitSCMSource',
22 remote: 'https://gerrit.opencord.org/ci-management.git'
23])
24
Matteo Scandolo433f60d2021-03-26 12:04:34 -070025def ofAgentConnections(numOfOnos, releaseName, namespace) {
26 def params = " "
27 numOfOnos.times {
28 params += "--set voltha.services.controller[${it}].address=${releaseName}-onos-classic-${it}.${releaseName}-onos-classic-hs.${namespace}.svc:6653 "
29 }
30 return params
31}
32
Matteo Scandolo2bc660a2021-02-12 10:52:25 -080033pipeline {
34
35 /* no label, executor is determined by JJB */
36 agent {
37 label "${params.buildNode}"
38 }
39 options {
40 timeout(time: 120, unit: 'MINUTES')
41 }
42 environment {
43 JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
44 KUBECONFIG="$HOME/.kube/config"
45 SSHPASS="karaf"
46 PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
47
48 APPS_TO_LOG="etcd kafka onos-onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server"
49 LOG_FOLDER="$WORKSPACE/logs"
50 }
51
52 stages {
53 stage ('Cleanup') {
54 steps {
55 timeout(time: 11, unit: 'MINUTES') {
Matteo Scandolo557cd8b2021-02-25 10:42:45 -080056 sh """
57 # remove orphaned port-forward from different namespaces
58 ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9
59 """
Matteo Scandolo2bc660a2021-02-12 10:52:25 -080060 script {
61 def namespaces = ["infra"]
62 // FIXME we may have leftovers from more VOLTHA stacks (eg: run1 had 10 stacks, run2 had 2 stacks)
63 volthaStacks.toInteger().times {
64 namespaces += "voltha${it + 1}"
65 }
66 helmTeardown(namespaces)
67 }
68 sh returnStdout: false, script: """
69 helm repo add onf https://charts.opencord.org
70 helm repo add cord https://charts.opencord.org
71 helm repo update
72
73 # remove all port-forward from different namespaces
74 ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9
75 """
76 }
77 }
78 }
79 stage('Download Code') {
80 steps {
81 getVolthaCode([
82 branch: "${release}",
83 volthaSystemTestsChange: "${volthaSystemTestsChange}",
84 //volthaHelmChartsChange: "${volthaHelmChartsChange}",
85 ])
86 }
87 }
88 stage('Deploy common infrastructure') {
89 // includes monitoring
90 steps {
91 sh '''
92 if [ ${withMonitoring} = true ] ; then
93 helm install -n infra nem-monitoring cord/nem-monitoring \
94 -f $HOME/voltha-scale/grafana.yaml \
95 --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
96 --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
97 fi
98 '''
99 }
100 }
101 stage('Deploy VOLTHA infrastructure') {
102 steps {
103 sh returnStdout: false, script: '''
104
105 helm install kafka -n infra $HOME/teone/helm-charts/kafka --set replicaCount=${kafkaReplicas},replicas=${kafkaReplicas} --set persistence.enabled=false \
106 --set zookeeper.replicaCount=${kafkaReplicas} --set zookeeper.persistence.enabled=false \
107 --set prometheus.kafka.enabled=true,prometheus.operator.enabled=true,prometheus.jmx.enabled=true,prometheus.operator.serviceMonitor.namespace=default
108
109 # the ETCD chart use "auth" for resons different than BBsim, so strip that away
110 ETCD_FLAGS=$(echo ${extraHelmFlags} | sed -e 's/--set auth=false / /g') | sed -e 's/--set auth=true / /g'
111 ETCD_FLAGS+=" --set auth.rbac.enabled=false,persistence.enabled=false,statefulset.replicaCount=${etcdReplicas}"
112 ETCD_FLAGS+=" --set memoryMode=${inMemoryEtcdStorage} "
113 helm install -n infra --set replicas=${etcdReplicas} etcd $HOME/teone/helm-charts/etcd $ETCD_FLAGS
114
115 helm upgrade --install -n infra voltha-infra onf/voltha-infra \
116 -f $WORKSPACE/voltha-helm-charts/examples/${workflow}-values.yaml \
117 --set onos-classic.replicas=${onosReplicas},onos-classic.atomix.replicas=${atomixReplicas} \
118 --set radius.enabled=${withEapol} \
119 --set kafka.enabled=false \
120 --set etcd.enabled=false
121 '''
122 }
123 }
124 stage('Deploy Voltha') {
125 steps {
126 deploy_voltha_stacks(params.volthaStacks)
127 }
128 }
129 stage('Start logging') {
130 steps {
131 sh returnStdout: false, script: '''
132 # start logging with kail
133
134 mkdir -p $LOG_FOLDER
135
136 list=($APPS_TO_LOG)
137 for app in "${list[@]}"
138 do
139 echo "Starting logs for: ${app}"
140 _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
141 done
142 '''
143 }
144 }
145 stage('Configuration') {
146 steps {
147 script {
148 sh returnStdout: false, script: """
149
150 # forward ETCD port
151 _TAG=etcd-port-forward kubectl -n infra port-forward --address 0.0.0.0 service/etcd 9999:2379& 2>&1 > /dev/null
152
153 # forward ONOS ports
154 _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8101:8101& 2>&1 > /dev/null
155 _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8181:8181& 2>&1 > /dev/null
Matteo Scandolo2d30c7d2021-02-25 15:39:51 -0800156
157 # make sure the the port-forward has started before moving forward
158 sleep 5
Matteo Scandolo2bc660a2021-02-12 10:52:25 -0800159 """
160 sh returnStdout: false, script: """
161 # TODO this needs to be repeated per stack
162 # kubectl exec \$(kubectl get pods | grep -E "bbsim[0-9]" | awk 'NR==1{print \$1}') -- bbsimctl log ${logLevel.toLowerCase()} false
163
164 #Setting link discovery
165 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
166 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
167 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
Matteo Scandolo433f60d2021-03-26 12:04:34 -0700168 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 900
Matteo Scandolo2bc660a2021-02-12 10:52:25 -0800169 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
Matteo Scandolo433f60d2021-03-26 12:04:34 -0700170 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.opencord.olt.impl.Olt provisionDelay 1000
Matteo Scandolo2bc660a2021-02-12 10:52:25 -0800171
172 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.onosproject
173 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.opencord
174
175 # Set Flows/Ports/Meters poll frequency
176 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
177 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
178
179 if [ ${withFlows} = false ]; then
180 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.olt
181 fi
182 """
183 }
184 }
185 }
186 stage('Setup Test') {
187 steps {
188 sh '''
189 mkdir -p $WORKSPACE/RobotLogs
190 cd $WORKSPACE/voltha-system-tests
191 make vst_venv
192 '''
193 }
194 }
195 stage('Run Test') {
196 steps {
197 test_voltha_stacks(params.volthaStacks)
198 }
199 }
200 }
201 post {
202 always {
203 // collect result, done in the "post" step so it's executed even in the
204 // event of a timeout in the tests
205 sh '''
206
207 # stop the kail processes
208 list=($APPS_TO_LOG)
209 for app in "${list[@]}"
210 do
211 echo "Stopping logs for: ${app}"
212 _TAG="kail-$app"
213 P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
214 if [ -n "$P_IDS" ]; then
215 echo $P_IDS
216 for P_ID in $P_IDS; do
217 kill -9 $P_ID
218 done
219 fi
220 done
221 '''
Matteo Scandolob3d2727b2021-03-03 11:17:46 -0800222 // compressing the logs to save space on Jenkins
223 sh '''
224 cd $LOG_FOLDER
225 tar -czf logs.tar.gz *.log
226 rm *.log
227 '''
Matteo Scandolo2bc660a2021-02-12 10:52:25 -0800228 plot([
229 csvFileName: 'scale-test.csv',
230 csvSeries: [
231 [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
232 [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
233 [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
234 [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
235 [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
236 [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
237 [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
238 [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
239 [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
240 [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
241 ],
242 group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (Stacks: ${params.volthaStacks}, OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus})", yaxis: 'Time (s)', useDescr: true
243 ])
244 step([$class: 'RobotPublisher',
245 disableArchiveOutput: false,
246 logFileName: 'RobotLogs/**/log.html',
247 otherFiles: '',
248 outputFileName: 'RobotLogs/**/output.xml',
249 outputPath: '.',
250 passThreshold: 100,
251 reportFileName: 'RobotLogs/**/report.html',
252 unstableThreshold: 0]);
253 // get all the logs from kubernetes PODs
254 sh returnStdout: false, script: '''
255
256 # store information on running charts
257 helm ls --all-namespaces > $LOG_FOLDER/helm-list.txt || true
258
259 # store information on the running pods
260 kubectl get pods --all-namespaces -o wide > $LOG_FOLDER/pods.txt || true
261 kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
262 kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
263
264 # copy the ONOS logs directly from the container to avoid the color codes
265 printf '%s\n' $(kubectl -n \$INFRA_NS get pods -l app=onos-onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl -n \$INFRA_NS cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
266
267 # get radius logs out of the container
268 kubectl -n \$INFRA_NS cp $(kubectl -n \$INFRA_NS get pods -l app=radius --no-headers | awk '{print $1}'):/var/log/freeradius/radius.log $LOG_FOLDER//radius.log || true
269 '''
270 // dump all the BBSim(s) ONU information
271 script {
272 for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
273 stack_ns="voltha"+i
274 sh """
275 BBSIM_IDS=\$(kubectl -n ${stack_ns} get pods | grep bbsim | grep -v server | awk '{print \$1}')
276 IDS=(\$BBSIM_IDS)
277
278 for bbsim in "\${IDS[@]}"
279 do
280 kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl onu list > $LOG_FOLDER/${stack_ns}/\$bbsim-device-list.txt || true
281 kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl service list > $LOG_FOLDER/${stack_ns}/\$bbsim-service-list.txt || true
282 done
283 """
284 }
285 }
286 // get ONOS debug infos
287 sh '''
288
289 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt || true
290 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt || true
291 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt || true
292 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt || true
293
294 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt || true
295 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt || true
296
297 if [ ${withFlows} = true ] ; then
298 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt || true
299 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt || true
300 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt || true
301 fi
302
303 if [ ${provisionSubscribers} = true ]; then
304 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt || true
305 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt || true
306 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt || true
307 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt || true
308 fi
309
310 if [ ${withEapol} = true ] ; then
311 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt || true
312 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt || true
313 fi
314
315 if [ ${withDhcp} = true ] ; then
316 sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt || true
317 fi
318 '''
319 // collect etcd metrics
320 sh '''
321 mkdir -p $WORKSPACE/etcd-metrics
322 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
323 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
324 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
325 curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time.json || true
326 '''
327 // get VOLTHA debug infos
328 script {
329 for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
330 stack_ns="voltha"+i
331 voltcfg="~/.volt/config-voltha"+i
332 try {
333 sh """
334
335 _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555& 2>&1 > /dev/null
336
337 voltctl -m 8MB device list -o json > $LOG_FOLDER/${stack_ns}/device-list.json || true
338 python -m json.tool $LOG_FOLDER/${stack_ns}/device-list.json > $LOG_FOLDER/${stack_ns}/voltha-devices-list.json || true
339 rm $LOG_FOLDER/${stack_ns}/device-list.json || true
340 voltctl -m 8MB device list > $LOG_FOLDER/${stack_ns}/voltha-devices-list.txt || true
341
342 DEVICE_LIST=
343 printf '%s\n' \$(voltctl -m 8MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl-m 8MB device flows # > $LOG_FOLDER/${stack_ns}/voltha-device-flows-#.txt" || true
344 printf '%s\n' \$(voltctl -m 8MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/${stack_ns}/voltha-device-ports-#.txt" || true
345
346 printf '%s\n' \$(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice flows # > $LOG_FOLDER/${stack_ns}/voltha-logicaldevice-flows-#.txt" || true
347 printf '%s\n' \$(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice port list # > $LOG_FOLDER/${stack_ns}/voltha-logicaldevice-ports-#.txt" || true
348
349 # remove VOLTHA port-forward
350 ps aux | grep port-forw | grep voltha-api | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9
351 """
352 } catch(e) {
353 sh '''
354 echo "Can't get device list from voltclt"
355 '''
356 }
357 }
358 }
359 // get cpu usage by container
360 sh '''
361 if [ ${withMonitoring} = true ] ; then
362 cd $WORKSPACE/voltha-system-tests
363 source ./vst_venv/bin/activate
364 sleep 60 # we have to wait for prometheus to collect all the information
365 python tests/scale/sizing.py -o $WORKSPACE/plots || true
366 fi
367 '''
Matteo Scandolob3d2727b2021-03-03 11:17:46 -0800368 archiveArtifacts artifacts: 'kind-voltha/install-*.log,execution-time-*.txt,logs/**/*.txt,logs/**/*.tar.gz,RobotLogs/**/*,plots/*,etcd-metrics/*'
Matteo Scandolo2bc660a2021-02-12 10:52:25 -0800369 }
370 }
371}
372
373def deploy_voltha_stacks(numberOfStacks) {
374 for (int i = 1; i <= numberOfStacks.toInteger(); i++) {
375 stage("Deploy VOLTHA stack " + i) {
376 // ${logLevel}
Matteo Scandolo8877ab82021-02-24 15:54:23 -0800377 def extraHelmFlags = "${extraHelmFlags} --set global.log_level=${logLevel},enablePerf=true,onu=${onus},pon=${pons} "
Matteo Scandolo2bc660a2021-02-12 10:52:25 -0800378 extraHelmFlags += " --set securityContext.enabled=false,atomix.persistence.enabled=false "
Matteo Scandolo2bc660a2021-02-12 10:52:25 -0800379
Matteo Scandolo433f60d2021-03-26 12:04:34 -0700380 // FIXME having to set all of these values is annoying, is there a better solution?
381 def volthaHelmFlags = extraHelmFlags +
Matteo Scandolo2bc660a2021-02-12 10:52:25 -0800382 "--set voltha.services.kafka.adapter.address=kafka.infra.svc:9092 " +
383 "--set voltha.services.kafka.cluster.address=kafka.infra.svc:9092 " +
384 "--set voltha.services.etcd.address=etcd.infra.svc:2379 " +
385 "--set voltha-adapter-openolt.services.kafka.adapter.address=kafka.infra.svc:9092 " +
386 "--set voltha-adapter-openolt.services.kafka.cluster.address=kafka.infra.svc:9092 " +
387 "--set voltha-adapter-openolt.services.etcd.address=etcd.infra.svc:2379 " +
388 "--set voltha-adapter-openonu.services.kafka.adapter.address=kafka.infra.svc:9092 " +
389 "--set voltha-adapter-openonu.services.kafka.cluster.address=kafka.infra.svc:9092 " +
Matteo Scandolo433f60d2021-03-26 12:04:34 -0700390 "--set voltha-adapter-openonu.services.etcd.address=etcd.infra.svc:2379" +
391 ofAgentConnections(onosReplicas.toInteger(), "voltha-infra", "infra")
392
Matteo Scandolo2bc660a2021-02-12 10:52:25 -0800393 volthaStackDeploy([
394 bbsimReplica: olts.toInteger(),
395 infraNamespace: "infra",
396 volthaNamespace: "voltha${i}",
397 stackName: "voltha${i}",
398 stackId: i,
399 workflow: workflow,
400 extraHelmFlags: volthaHelmFlags
401 ])
402 }
403 }
404}
405
406def test_voltha_stacks(numberOfStacks) {
407 for (int i = 1; i <= numberOfStacks.toInteger(); i++) {
408 stage("Test VOLTHA stack " + i) {
409 timeout(time: 15, unit: 'MINUTES') {
410 sh """
411
412 # we are restarting the voltha-api port-forward for each stack, no need to have a different voltconfig file
413 voltctl -s 127.0.0.1:55555 config > $HOME/.volt/config
414 export VOLTCONFIG=$HOME/.volt/config
415
416 _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555& 2>&1 > /dev/null
417
418 ROBOT_PARAMS="-v stackId:${i} \
419 -v olt:${olts} \
420 -v pon:${pons} \
421 -v onu:${onus} \
422 -v workflow:${workflow} \
423 -v withEapol:${withEapol} \
424 -v withDhcp:${withDhcp} \
425 -v withIgmp:${withIgmp} \
426 --noncritical non-critical \
427 -e igmp \
428 -e teardown "
429
430 if [ ${withEapol} = false ] ; then
431 ROBOT_PARAMS+="-e authentication "
432 fi
433
434 if [ ${withDhcp} = false ] ; then
435 ROBOT_PARAMS+="-e dhcp "
436 fi
437
438 if [ ${provisionSubscribers} = false ] ; then
439 # if we're not considering subscribers then we don't care about authentication and dhcp
440 ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
441 fi
442
443 if [ ${withFlows} = false ] ; then
444 ROBOT_PARAMS+="-i setup -i activation "
445 fi
446
447 cd $WORKSPACE/voltha-system-tests
448 source ./vst_venv/bin/activate
449 robot -d $WORKSPACE/RobotLogs/voltha${i} \
450 \$ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
451
452 # collect results
453 python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/voltha${i}/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time-voltha${i}.txt || true
454 cat $WORKSPACE/execution-time-voltha${i}.txt
455 """
456 sh """
457 # remove VOLTHA port-forward
458 ps aux | grep port-forw | grep voltha-api | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 2>&1 > /dev/null
459 """
460 }
461 }
462 }
463}