blob: ddf1278c36a0871cb6cb4b5b9e63904b32e51bfd [file] [log] [blame]
Hardik Windlass6d9a82e2021-07-08 16:23:21 +00001
Joey Armstrongbeef4cd2023-01-18 09:59:58 -05002// Copyright 2017-2023 Open Networking Foundation (ONF) and the ONF Contributors
Hardik Windlass6d9a82e2021-07-08 16:23:21 +00003//
4// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
7//
8// http://www.apache.org/licenses/LICENSE-2.0
9//
10// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
15// used to deploy VOLTHA and configure ONOS physical PODs
16// NOTE we are importing the library even if it's global so that it's
17// easier to change the keywords during a replay
18library identifier: 'cord-jenkins-libraries@master',
19 retriever: modernSCM([
20 $class: 'GitSCMSource',
21 remote: 'https://gerrit.opencord.org/ci-management.git'
22])
23def infraNamespace = "infra"
24def volthaNamespace = "voltha"
25def clusterName = "kind-ci"
26pipeline {
27 /* no label, executor is determined by JJB */
28 agent {
29 label "${params.buildNode}"
30 }
31 options {
32 timeout(time: 120, unit: 'MINUTES')
33 }
34 environment {
35 PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
36 KUBECONFIG="$HOME/.kube/kind-${clusterName}"
37 VOLTCONFIG="$HOME/.volt/config"
38 LOG_FOLDER="$WORKSPACE/${workflow}/"
39 APPS_TO_LOG="etcd kafka onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server onos-config-loader"
40
41 }
42 stages{
43 stage('Download Code') {
44 steps {
45 getVolthaCode([
46 branch: "${branch}",
47 gerritProject: "${gerritProject}",
48 gerritRefspec: "${gerritRefspec}",
49 volthaSystemTestsChange: "${volthaSystemTestsChange}",
50 volthaHelmChartsChange: "${volthaHelmChartsChange}",
51 ])
52 }
53 }
54 stage ("Parse deployment configuration file") {
55 steps {
56 sh returnStdout: true, script: "rm -rf ${configBaseDir}"
Hardik Windlass6f854a12021-07-12 13:20:21 +000057 sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}"
Hardik Windlass6d9a82e2021-07-08 16:23:21 +000058 script {
59
60 if (params.workflow.toUpperCase() == "TT") {
61 error("The Tucson POD does not support TT workflow at the moment")
62 }
63
64 if ( params.workflow.toUpperCase() == "DT" ) {
65 deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
66 }
67 else if ( params.workflow.toUpperCase() == "TT" ) {
68 deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
69 }
70 else {
71 deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
72 }
73 }
74 }
75 }
76 stage('Clean up') {
77 steps {
78 timeout(15) {
79 script {
80 helmTeardown(["default", infraNamespace, volthaNamespace])
81 }
82 timeout(1) {
83 sh returnStdout: false, script: '''
84 # remove orphaned port-forward from different namespaces
85 ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
86 '''
87 }
88 }
89 }
90 }
91 stage('Build patch') {
92 steps {
93 // NOTE that the correct patch has already been checked out
94 // during the getVolthaCode step
95 buildVolthaComponent("${gerritProject}")
96 }
97 }
98 stage('Create K8s Cluster') {
99 steps {
100 script {
101 def clusterExists = sh returnStdout: true, script: """
102 kind get clusters | grep ${clusterName} | wc -l
103 """
104 if (clusterExists.trim() == "0") {
Hardik Windlass6f854a12021-07-12 13:20:21 +0000105 createKubernetesCluster([branch: "${branch}", nodes: 3, name: clusterName])
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000106 }
107 }
108 }
109 }
110 stage('Load image in kind nodes') {
111 steps {
112 loadToKind()
113 }
114 }
115 stage('Install Voltha') {
116 steps {
117 timeout(20) {
118 script {
119 imageFlags = getVolthaImageFlags(gerritProject)
120 // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
121 def localCharts = false
122 if (volthaHelmChartsChange != "" || gerritProject == "voltha-helm-charts" || branch != "master") {
123 localCharts = true
124 }
125 def flags = "-f $WORKSPACE/${configBaseDir}/${configKubernetesDir}/voltha/${configFileName}.yml ${imageFlags} "
126 // NOTE temporary workaround expose ONOS node ports (pod-config needs to be updated to contain these values)
127 flags = flags + "--set onos-classic.onosSshPort=30115 " +
128 "--set onos-classic.onosApiPort=30120 " +
129 "--set onos-classic.onosOfPort=31653 " +
130 "--set onos-classic.individualOpenFlowNodePorts=true " + extraHelmFlags
131 volthaDeploy([
132 workflow: workFlow.toLowerCase(),
133 extraHelmFlags: flags,
134 localCharts: localCharts,
135 kubeconfig: "$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf",
136 onosReplica: 3,
137 atomixReplica: 3,
138 kafkaReplica: 3,
139 etcdReplica: 3,
140 ])
141 }
142 // start logging
143 sh """
144 rm -rf $WORKSPACE/${workFlow}/
145 mkdir -p $WORKSPACE/${workFlow}
146 _TAG=kail-${workFlow} kail -n infra -n voltha > $WORKSPACE/${workFlow}/onos-voltha-combined.log &
147 """
148 sh returnStdout: false, script: '''
149 # start logging with kail
150
151 mkdir -p $LOG_FOLDER
152
153 list=($APPS_TO_LOG)
154 for app in "${list[@]}"
155 do
156 echo "Starting logs for: ${app}"
157 _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
158 done
159 '''
160 sh """
161 JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
162 JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd 2379:2379; done"&
163 JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
164 ps aux | grep port-forward
165 """
166 getPodsInfo("$WORKSPACE")
167 }
168 }
169 }
170 stage('Deploy Kafka Dump Chart') {
171 steps {
172 script {
173 sh returnStdout: false, script: """
174 helm repo add cord https://charts.opencord.org
175 helm repo update
176 if helm version -c --short|grep v2 -q; then
177 helm install -n voltha-kafka-dump cord/voltha-kafka-dump
178 else
179 helm install voltha-kafka-dump cord/voltha-kafka-dump
180 fi
181 """
182 }
183 }
184 }
185 stage('Push Tech-Profile') {
186 when {
187 expression { params.profile != "Default" }
188 }
189 steps {
190 sh returnStdout: false, script: """
191 etcd_container=\$(kubectl get pods -n voltha | grep voltha-etcd-cluster | awk 'NR==1{print \$1}')
192 kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-${profile}.json voltha/\$etcd_container:/tmp/flexpod.json
193 kubectl exec -it \$etcd_container -n voltha -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/XGS-PON/64'
194 """
195 }
196 }
197
198 stage('Push Sadis-config') {
199 steps {
200 sh returnStdout: false, script: """
201 ssh-keygen -R [${deployment_config.nodes[0].ip}]:30115
202 ssh-keyscan -p 30115 -H ${deployment_config.nodes[0].ip} >> ~/.ssh/known_hosts
203 sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.dhcpl2relay"
204 sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.aaa"
205 sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.olt"
206 sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.FlowObjectiveManager"
207 sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager"
208
209 if [[ "${workFlow.toUpperCase()}" == "DT" ]]; then
210 curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-DT.json
211 elif [[ "${workFlow.toUpperCase()}" == "TT" ]]; then
212 curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-TT.json
213 else
214 # this is the ATT case, rename the file in *-sadis-ATT.json so that we can avoid special cases and just load the file
215 curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis.json
216 fi
217 """
218 }
219 }
220 stage('Reinstall OLT software') {
221 when {
222 expression { params.reinstallOlt }
223 }
224 steps {
225 script {
226 deployment_config.olts.each { olt ->
227 sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --remove asfvolt16 && dpkg --purge asfvolt16'"
228 waitUntil {
229 olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
230 return olt_sw_present.toInteger() == 0
231 }
232 if ( params.branch == 'voltha-2.3' ) {
233 oltDebVersion = oltDebVersionVoltha23
234 } else {
235 oltDebVersion = oltDebVersionMaster
236 }
237 sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --install ${oltDebVersion}'"
238 waitUntil {
239 olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
240 return olt_sw_present.toInteger() == 1
241 }
242 if ( olt.fortygig ) {
243 // If the OLT is connected to a 40G switch interface, set the NNI port to be downgraded
244 sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'echo port ce128 sp=40000 >> /broadcom/qax.soc ; /opt/bcm68620/svk_init.sh'"
245 }
246 }
247 }
248 }
249 }
250
251 stage('Restart OLT processes') {
252 steps {
253 script {
254 deployment_config.olts.each { olt ->
255 sh returnStdout: false, script: """
256 ssh-keyscan -H ${olt.ip} >> ~/.ssh/known_hosts
257 sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; reboot'
258 sleep 120
259 """
260 waitUntil {
261 onu_discovered = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'grep \"onu discover indication\" /var/log/openolt.log | wc -l'"
262 return onu_discovered.toInteger() > 0
263 }
264 }
265 }
266 }
267 }
268 stage('Run E2E Tests') {
269 steps {
270 script {
271 // different workflows need different make targets and different robot files
272 if ( params.workflow.toUpperCase() == "DT" ) {
273 robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
274 robotFile = "Voltha_DT_PODTests.robot"
275 makeTarget = "voltha-dt-test"
276 robotFunctionalKeyword = "-i functionalDt"
277 robotDataplaneKeyword = "-i dataplaneDt"
278 }
279 else if ( params.workflow.toUpperCase() == "TT" ) {
280 // TODO the TT tests have diffent tags, address once/if TT is support on the Tucson POD
281 robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
282 robotFile = "Voltha_TT_PODTests.robot"
283 makeTarget = "voltha-tt-test"
284 robotFunctionalKeyword = "-i functionalTt"
285 robotDataplaneKeyword = "-i dataplaneTt"
286 }
287 else {
288 robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
289 robotFile = "Voltha_PODTests.robot"
290 makeTarget = "voltha-test"
291 robotFunctionalKeyword = "-i functional"
292 robotDataplaneKeyword = "-i dataplane"
293 }
294 }
295 sh returnStdout: false, script: """
296 mkdir -p $WORKSPACE/RobotLogs
297
298 export ROBOT_CONFIG_FILE="$WORKSPACE/${robotConfigFile}"
299 export ROBOT_MISC_ARGS="${params.extraRobotArgs} --removekeywords wuks -d $WORKSPACE/RobotLogs -v container_log_dir:$WORKSPACE "
300 export ROBOT_FILE="${robotFile}"
301
302 # If the Gerrit comment contains a line with "functional tests" then run the full
303 # functional test suite. This covers tests tagged either 'sanity' or 'functional'.
304 # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
305 REGEX="functional tests"
306 if [[ "${gerritComment}" =~ \$REGEX ]]; then
307 ROBOT_MISC_ARGS+="${robotFunctionalKeyword} "
308 fi
309 # Likewise for dataplane tests
310 REGEX="dataplane tests"
311 if [[ "${gerritComment}" =~ \$REGEX ]]; then
312 ROBOT_MISC_ARGS+="${robotDataplaneKeyword}"
313 fi
314
Hardik Windlass806505d2021-10-27 08:15:48 +0000315 ROBOT_MISC_ARGS+=" -v NAMESPACE:${volthaNamespace} -v INFRA_NAMESPACE:${infraNamespace}"
Hardik Windlass6d9a82e2021-07-08 16:23:21 +0000316 make -C $WORKSPACE/voltha-system-tests ${makeTarget} || true
317 """
318 }
319 }
320 }
321 post {
322 always {
323 // stop logging
324 sh """
325 P_IDS="\$(ps e -ww -A | grep "_TAG=kail-${workFlow}" | grep -v grep | awk '{print \$1}')"
326 if [ -n "\$P_IDS" ]; then
327 echo \$P_IDS
328 for P_ID in \$P_IDS; do
329 kill -9 \$P_ID
330 done
331 fi
332 gzip $WORKSPACE/${workFlow}/onos-voltha-combined.log || true
333 """
334 sh '''
335 # stop the kail processes
336 list=($APPS_TO_LOG)
337 for app in "${list[@]}"
338 do
339 echo "Stopping logs for: ${app}"
340 _TAG="kail-$app"
341 P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
342 if [ -n "$P_IDS" ]; then
343 echo $P_IDS
344 for P_ID in $P_IDS; do
345 kill -9 $P_ID
346 done
347 fi
348 done
349 '''
350 step([$class: 'RobotPublisher',
351 disableArchiveOutput: false,
352 logFileName: 'RobotLogs/log*.html',
353 otherFiles: '',
354 outputFileName: 'RobotLogs/output*.xml',
355 outputPath: '.',
356 passThreshold: 100,
357 reportFileName: 'RobotLogs/report*.html',
358 unstableThreshold: 0,
359 onlyCritical: true]);
360 archiveArtifacts artifacts: '**/*.txt,**/*.gz,*.gz,**/*.log'
361 }
362 }
363}
364
365// refs/changes/06/24206/5