blob: af568daf39804da7e0158de861551a94ec9a655a [file] [log] [blame]
Matteo Scandolo9b644ba2021-04-19 11:21:07 -07001
2// Copyright 2017-present Open Networking Foundation
3//
4// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
7//
8// http://www.apache.org/licenses/LICENSE-2.0
9//
10// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
15// used to deploy VOLTHA and configure ONOS physical PODs
16// NOTE we are importing the library even if it's global so that it's
17// easier to change the keywords during a replay
18library identifier: 'cord-jenkins-libraries@master',
19 retriever: modernSCM([
20 $class: 'GitSCMSource',
21 remote: 'https://gerrit.opencord.org/ci-management.git'
22])
23def infraNamespace = "infra"
24def volthaNamespace = "voltha"
25def clusterName = "kind-ci"
26pipeline {
27 /* no label, executor is determined by JJB */
28 agent {
29 label "${params.buildNode}"
30 }
31 options {
32 timeout(time: 120, unit: 'MINUTES')
33 }
34 environment {
35 PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
36 KUBECONFIG="$HOME/.kube/kind-${clusterName}"
37 VOLTCONFIG="$HOME/.volt/config"
38 }
39 stages{
40 stage('Download Code') {
41 steps {
42 getVolthaCode([
43 branch: "${branch}",
44 gerritProject: "${gerritProject}",
45 gerritRefspec: "${gerritRefspec}",
46 volthaSystemTestsChange: "${volthaSystemTestsChange}",
47 volthaHelmChartsChange: "${volthaHelmChartsChange}",
48 ])
49 }
50 }
51 stage ("Parse deployment configuration file") {
52 steps {
53 sh returnStdout: true, script: "rm -rf ${configBaseDir}"
54 sh returnStdout: true, script: "git clone -b master ${cordRepoUrl}/${configBaseDir}"
55 script {
Matteo Scandolob6d80732021-05-05 14:06:42 -070056
57 if (params.workflow.toUpperCase() == "TT") {
58 error("The Tucson POD does not support TT workflow at the moment")
59 }
60
Matteo Scandolo9b644ba2021-04-19 11:21:07 -070061 if ( params.workflow.toUpperCase() == "DT" ) {
62 deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
63 }
64 else if ( params.workflow.toUpperCase() == "TT" ) {
65 deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
66 }
67 else {
68 deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
69 }
70 }
71 }
72 }
73 stage('Clean up') {
74 steps {
75 timeout(15) {
76 script {
77 helmTeardown(["default", infraNamespace, volthaNamespace])
78 }
79 timeout(1) {
80 sh returnStdout: false, script: '''
81 # remove orphaned port-forward from different namespaces
Andrea Campanella4c8af942021-05-12 10:12:13 +020082 ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
Matteo Scandolo9b644ba2021-04-19 11:21:07 -070083 '''
84 }
85 }
86 }
87 }
88 stage('Build patch') {
89 steps {
90 // NOTE that the correct patch has already been checked out
91 // during the getVolthaCode step
92 buildVolthaComponent("${gerritProject}")
93 }
94 }
95 stage('Create K8s Cluster') {
96 steps {
97 script {
98 def clusterExists = sh returnStdout: true, script: """
99 kind get clusters | grep ${clusterName} | wc -l
100 """
101 if (clusterExists.trim() == "0") {
102 createKubernetesCluster([nodes: 3, name: clusterName])
103 }
104 }
105 }
106 }
107 stage('Load image in kind nodes') {
108 steps {
109 loadToKind()
110 }
111 }
112 stage('Install Voltha') {
113 steps {
114 timeout(20) {
115 script {
116 imageFlags = getVolthaImageFlags(gerritProject)
117 // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
118 def localCharts = false
119 if (volthaHelmChartsChange != "" || gerritProject == "voltha-helm-charts") {
120 localCharts = true
121 }
Matteo Scandolobb7382d2021-05-05 08:32:35 -0700122 def flags = "-f $WORKSPACE/${configBaseDir}/${configKubernetesDir}/voltha/${configFileName}.yml ${imageFlags} "
Matteo Scandolo9b644ba2021-04-19 11:21:07 -0700123 // NOTE temporary workaround expose ONOS node ports (pod-config needs to be updated to contain these values)
Matteo Scandolobb7382d2021-05-05 08:32:35 -0700124 flags = flags + "--set onos-classic.onosSshPort=30115 " +
Matteo Scandolo9b644ba2021-04-19 11:21:07 -0700125 "--set onos-classic.onosApiPort=30120 " +
126 "--set onos-classic.onosOfPort=31653 " +
Matteo Scandolobb7382d2021-05-05 08:32:35 -0700127 "--set onos-classic.individualOpenFlowNodePorts=true " + extraHelmFlags
Matteo Scandolo9b644ba2021-04-19 11:21:07 -0700128 volthaDeploy([
129 workflow: workFlow.toLowerCase(),
Matteo Scandolobb7382d2021-05-05 08:32:35 -0700130 extraHelmFlags: flags,
Matteo Scandolo9b644ba2021-04-19 11:21:07 -0700131 localCharts: localCharts,
132 kubeconfig: "$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf",
133 onosReplica: 3,
134 atomixReplica: 3,
135 kafkaReplica: 3,
136 etcdReplica: 3,
137 ])
138 }
139 // start logging
140 sh """
Andrea Campanella1be85ad2021-06-14 13:01:15 +0200141 rm -rf $WORKSPACE/${workFlow}/
Matteo Scandolo9b644ba2021-04-19 11:21:07 -0700142 mkdir -p $WORKSPACE/${workFlow}
143 _TAG=kail-${workFlow} kail -n infra -n voltha > $WORKSPACE/${workFlow}/onos-voltha-combined.log &
144 """
145 sh """
146 JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
147 JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd 2379:2379; done"&
148 JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
149 ps aux | grep port-forward
150 """
151 getPodsInfo("$WORKSPACE")
152 }
153 }
154 }
155 stage('Deploy Kafka Dump Chart') {
156 steps {
157 script {
158 sh returnStdout: false, script: """
159 helm repo add cord https://charts.opencord.org
160 helm repo update
161 if helm version -c --short|grep v2 -q; then
162 helm install -n voltha-kafka-dump cord/voltha-kafka-dump
163 else
164 helm install voltha-kafka-dump cord/voltha-kafka-dump
165 fi
166 """
167 }
168 }
169 }
170 stage('Push Tech-Profile') {
171 when {
172 expression { params.profile != "Default" }
173 }
174 steps {
175 sh returnStdout: false, script: """
176 etcd_container=\$(kubectl get pods -n voltha | grep voltha-etcd-cluster | awk 'NR==1{print \$1}')
177 kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-${profile}.json voltha/\$etcd_container:/tmp/flexpod.json
178 kubectl exec -it \$etcd_container -n voltha -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/XGS-PON/64'
179 """
180 }
181 }
182
183 stage('Push Sadis-config') {
184 steps {
185 sh returnStdout: false, script: """
186 ssh-keygen -R [${deployment_config.nodes[0].ip}]:30115
187 ssh-keyscan -p 30115 -H ${deployment_config.nodes[0].ip} >> ~/.ssh/known_hosts
188 sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.dhcpl2relay"
189 sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.aaa"
190 sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.olt"
191 sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.FlowObjectiveManager"
192 sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager"
193
194 if [[ "${workFlow.toUpperCase()}" == "DT" ]]; then
195 curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-DT.json
196 elif [[ "${workFlow.toUpperCase()}" == "TT" ]]; then
197 curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-TT.json
198 else
199 # this is the ATT case, rename the file in *-sadis-ATT.json so that we can avoid special cases and just load the file
200 curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis.json
201 fi
202 """
203 }
204 }
205 stage('Reinstall OLT software') {
206 when {
207 expression { params.reinstallOlt }
208 }
209 steps {
210 script {
211 deployment_config.olts.each { olt ->
212 sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --remove asfvolt16 && dpkg --purge asfvolt16'"
213 waitUntil {
214 olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
215 return olt_sw_present.toInteger() == 0
216 }
217 if ( params.branch == 'voltha-2.3' ) {
218 oltDebVersion = oltDebVersionVoltha23
219 } else {
220 oltDebVersion = oltDebVersionMaster
221 }
222 sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --install ${oltDebVersion}'"
223 waitUntil {
224 olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
225 return olt_sw_present.toInteger() == 1
226 }
227 if ( olt.fortygig ) {
228 // If the OLT is connected to a 40G switch interface, set the NNI port to be downgraded
229 sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'echo port ce128 sp=40000 >> /broadcom/qax.soc ; /opt/bcm68620/svk_init.sh'"
230 }
231 }
232 }
233 }
234 }
235
236 stage('Restart OLT processes') {
237 steps {
238 script {
239 deployment_config.olts.each { olt ->
240 sh returnStdout: false, script: """
241 ssh-keyscan -H ${olt.ip} >> ~/.ssh/known_hosts
242 sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; reboot'
243 sleep 120
244 """
245 waitUntil {
246 onu_discovered = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'grep \"onu discover indication\" /var/log/openolt.log | wc -l'"
247 return onu_discovered.toInteger() > 0
248 }
249 }
250 }
251 }
252 }
253 stage('Run E2E Tests') {
254 steps {
255 script {
Matteo Scandolob6d80732021-05-05 14:06:42 -0700256 // different workflows need different make targets and different robot files
Matteo Scandolo9b644ba2021-04-19 11:21:07 -0700257 if ( params.workflow.toUpperCase() == "DT" ) {
258 robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
Matteo Scandolob6d80732021-05-05 14:06:42 -0700259 robotFile = "Voltha_DT_PODTests.robot"
260 makeTarget = "voltha-dt-test"
261 robotFunctionalKeyword = "-i functionalDt"
262 robotDataplaneKeyword = "-i dataplaneDt"
Matteo Scandolo9b644ba2021-04-19 11:21:07 -0700263 }
264 else if ( params.workflow.toUpperCase() == "TT" ) {
Matteo Scandolob6d80732021-05-05 14:06:42 -0700265 // TODO the TT tests have diffent tags, address once/if TT is support on the Tucson POD
Matteo Scandolo9b644ba2021-04-19 11:21:07 -0700266 robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
Matteo Scandolob6d80732021-05-05 14:06:42 -0700267 robotFile = "Voltha_TT_PODTests.robot"
268 makeTarget = "voltha-tt-test"
269 robotFunctionalKeyword = "-i functionalTt"
270 robotDataplaneKeyword = "-i dataplaneTt"
Matteo Scandolo9b644ba2021-04-19 11:21:07 -0700271 }
272 else {
273 robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
Matteo Scandolob6d80732021-05-05 14:06:42 -0700274 robotFile = "Voltha_PODTests.robot"
275 makeTarget = "voltha-test"
276 robotFunctionalKeyword = "-i functional"
277 robotDataplaneKeyword = "-i dataplane"
Matteo Scandolo9b644ba2021-04-19 11:21:07 -0700278 }
279 }
280 sh returnStdout: false, script: """
281 mkdir -p $WORKSPACE/RobotLogs
282
283 export ROBOT_CONFIG_FILE="$WORKSPACE/${robotConfigFile}"
284 export ROBOT_MISC_ARGS="${params.extraRobotArgs} --removekeywords wuks -d $WORKSPACE/RobotLogs -v container_log_dir:$WORKSPACE "
Matteo Scandolob6d80732021-05-05 14:06:42 -0700285 export ROBOT_FILE="${robotFile}"
Matteo Scandolo9b644ba2021-04-19 11:21:07 -0700286
287 # If the Gerrit comment contains a line with "functional tests" then run the full
288 # functional test suite. This covers tests tagged either 'sanity' or 'functional'.
289 # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
290 REGEX="functional tests"
291 if [[ "${gerritComment}" =~ \$REGEX ]]; then
Matteo Scandolob6d80732021-05-05 14:06:42 -0700292 ROBOT_MISC_ARGS+="${robotFunctionalKeyword} "
Matteo Scandolo9b644ba2021-04-19 11:21:07 -0700293 fi
294 # Likewise for dataplane tests
295 REGEX="dataplane tests"
296 if [[ "${gerritComment}" =~ \$REGEX ]]; then
Matteo Scandolob6d80732021-05-05 14:06:42 -0700297 ROBOT_MISC_ARGS+="${robotDataplaneKeyword}"
Matteo Scandolo9b644ba2021-04-19 11:21:07 -0700298 fi
299
Matteo Scandolob6d80732021-05-05 14:06:42 -0700300 make -C $WORKSPACE/voltha-system-tests ${makeTarget} || true
Matteo Scandolo9b644ba2021-04-19 11:21:07 -0700301 """
302 }
303 }
304 }
305 post {
306 always {
307 // stop logging
308 sh """
309 P_IDS="\$(ps e -ww -A | grep "_TAG=kail-${workFlow}" | grep -v grep | awk '{print \$1}')"
310 if [ -n "\$P_IDS" ]; then
311 echo \$P_IDS
312 for P_ID in \$P_IDS; do
313 kill -9 \$P_ID
314 done
315 fi
316 gzip $WORKSPACE/${workFlow}/onos-voltha-combined.log || true
317 """
318 step([$class: 'RobotPublisher',
319 disableArchiveOutput: false,
320 logFileName: 'RobotLogs/log*.html',
321 otherFiles: '',
322 outputFileName: 'RobotLogs/output*.xml',
323 outputPath: '.',
324 passThreshold: 100,
325 reportFileName: 'RobotLogs/report*.html',
326 unstableThreshold: 0]);
327 archiveArtifacts artifacts: '**/*.txt,**/*.gz,*.gz'
328 }
329 }
330}
331
332// refs/changes/06/24206/5