Woojoong Kim | 1c75d40 | 2019-07-30 16:40:19 -0700 | [diff] [blame] | 1 | // Copyright 2019-present Open Networking Foundation |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | // you may not use this file except in compliance with the License. |
| 5 | // You may obtain a copy of the License at |
| 6 | // |
| 7 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | // See the License for the specific language governing permissions and |
| 13 | // limitations under the License. |
| 14 | |
Zack Williams | 5ce76d4 | 2020-02-28 15:45:56 -0700 | [diff] [blame] | 15 | node ("${buildNode}") { |
Woojoong Kim | 1c75d40 | 2019-07-30 16:40:19 -0700 | [diff] [blame] | 16 | timeout (100) { |
| 17 | try { |
| 18 | // Start Jenkins steps from here |
| 19 | stage ("Parse deployment configuration files") { |
| 20 | sh returnStdout: true, script: "rm -rf helm-charts helm-repo-tools ${configBaseDir}" |
| 21 | sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/helm-repo-tools" |
| 22 | sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/helm-charts" |
| 23 | sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}" |
| 24 | deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml" |
| 25 | } |
| 26 | |
| 27 | // Define KUBECONFIG & HELMCONFIG environment variable to use in steps |
| 28 | env.KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf" |
| 29 | env.HELMCONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.yml" |
| 30 | |
| 31 | stage('Clean up') { |
| 32 | timeout(10) { |
| 33 | // Force helm client to reload server pod, otherwise it'll possible fail on version compatible issue. |
| 34 | sh returnStdout: true, script: "helm init --upgrade --force-upgrade" |
| 35 | |
| 36 | timeout(1) { |
| 37 | waitUntil { |
| 38 | tillerpod_running = sh returnStdout: true, script: """ |
| 39 | kubectl -n kube-system get pods | grep tiller-deploy | grep Running | wc -l |
| 40 | """ |
| 41 | return tillerpod_running.toInteger() == 1 |
| 42 | } |
| 43 | } |
| 44 | |
| 45 | // Deleted all of charts |
| 46 | sh returnStdout: true, script: """ |
| 47 | for hchart in \$(helm list -q | grep -E -v 'docker-registry|mavenrepo|ponnet'); |
| 48 | do |
| 49 | echo "Purging chart: \${hchart}" |
| 50 | helm delete --purge "\${hchart}" |
| 51 | done |
| 52 | """ |
| 53 | |
| 54 | timeout(3) { |
| 55 | waitUntil { |
| 56 | charts_deleted = sh returnStdout: true, script: """ |
| 57 | helm ls -q | grep -E -v 'docker-registry|mavenrepo|ponnet' | wc -l |
| 58 | """ |
| 59 | return charts_deleted.toInteger() == 0 |
| 60 | } |
| 61 | } |
| 62 | |
| 63 | timeout(3) { |
| 64 | waitUntil { |
| 65 | allpods_deleted = sh returnStdout: true, script: """ |
| 66 | kubectl get pods --all-namespaces --no-headers |\ |
| 67 | grep -E -v 'kube-system|docker-registry|mavenrepo|ponnet|test' | wc -l |
| 68 | """ |
| 69 | return allpods_deleted.toInteger() == 0 |
| 70 | } |
| 71 | } |
| 72 | } |
| 73 | } |
| 74 | |
| 75 | stage('Add Helm repositories') { |
| 76 | sh returnStdout: true, script: """ |
| 77 | helm repo add cord https://charts.opencord.org |
| 78 | helm repo add incubator http://storage.googleapis.com/kubernetes-charts-incubator |
| 79 | helm repo update |
| 80 | """ |
| 81 | |
| 82 | timeout(1) { |
| 83 | waitUntil { |
| 84 | tillerpod_running = sh returnStdout: true, script: """ |
| 85 | kubectl -n kube-system get pods | grep tiller-deploy | grep Running | wc -l |
| 86 | """ |
| 87 | return tillerpod_running.toInteger() == 1 |
| 88 | } |
| 89 | } |
| 90 | |
| 91 | timeout(1) { |
| 92 | waitUntil { |
| 93 | cord_repo_present = sh returnStdout: true, script: """ |
| 94 | helm repo list | grep cord | wc -l |
| 95 | """ |
| 96 | return cord_repo_present.toInteger() == 1 |
| 97 | } |
| 98 | } |
| 99 | } |
| 100 | |
| 101 | stage('Install CORD Platform') { |
| 102 | sh returnStdout: true, script: """ |
| 103 | helm install -f $HELMCONFIG -n cord-platform --version 7.0.0 cord/cord-platform |
| 104 | """ |
| 105 | |
| 106 | timeout(1) { |
| 107 | waitUntil { |
| 108 | cord_helm_installed = sh returnStdout: true, script: """ |
| 109 | helm ls | grep -i cord-platform | wc -l |
| 110 | """ |
| 111 | return cord_helm_installed.toInteger() == 1 |
| 112 | } |
| 113 | } |
| 114 | } |
| 115 | |
| 116 | stage('Wait for etcd-operator to be installed') { |
| 117 | timeout(10) { |
| 118 | waitUntil { |
| 119 | etcd_operator_installed = sh returnStdout: true, script: """ |
| 120 | kubectl get pods | grep -i etcd-operator | grep -i running | wc -l |
| 121 | """ |
| 122 | crd_present = sh returnStdout: true, script: """ |
| 123 | kubectl get crd | grep -i etcd | wc -l |
| 124 | """ |
| 125 | return etcd_operator_installed.toInteger() + crd_present.toInteger() == 6 |
| 126 | } |
| 127 | } |
| 128 | } |
| 129 | |
| 130 | stage('Verify that CORD Kafka service is ready') { |
| 131 | timeout(3) { |
| 132 | waitUntil { |
| 133 | kafka_instances_running = sh returnStdout: true, script: """ |
| 134 | kubectl get pods | grep cord-platform-kafka | grep -i running | grep 1/1 | wc -l |
| 135 | """ |
| 136 | return kafka_instances_running.toInteger() == 1 |
| 137 | } |
| 138 | } |
| 139 | } |
| 140 | |
| 141 | stage('Verify that CORD Zookeeper service is ready') { |
| 142 | timeout(3) { |
| 143 | waitUntil { |
| 144 | zookeeper_instances_running = sh returnStdout: true, script: """ |
| 145 | kubectl get pods | grep cord-platform-zookeeper | grep -i running | grep 1/1 | wc -l |
| 146 | """ |
| 147 | return zookeeper_instances_running.toInteger() == 1 |
| 148 | } |
| 149 | } |
| 150 | } |
| 151 | |
| 152 | stage('Verify that XOS is ready') { |
| 153 | timeout(3) { |
| 154 | waitUntil { |
| 155 | xos_core_running = sh returnStdout: true, script: """ |
| 156 | kubectl get pods | grep -i xos | grep -i running | grep 1/1 | wc -l |
| 157 | """ |
| 158 | return xos_core_running.toInteger() == 6 |
| 159 | } |
| 160 | } |
| 161 | } |
| 162 | |
| 163 | stage('Verify that NEM is ready') { |
| 164 | // prometheus |
| 165 | timeout(10) { |
| 166 | waitUntil { |
| 167 | prometheus_running = sh returnStdout: true, script: """ |
Woojoong Kim | b44b81e | 2019-08-05 16:43:49 -0700 | [diff] [blame] | 168 | kubectl get pods | grep -i prometheus | grep -v Running | wc -l |
Woojoong Kim | 1c75d40 | 2019-07-30 16:40:19 -0700 | [diff] [blame] | 169 | """ |
Woojoong Kim | b44b81e | 2019-08-05 16:43:49 -0700 | [diff] [blame] | 170 | return prometheus_running.toInteger() == 0 |
Woojoong Kim | 1c75d40 | 2019-07-30 16:40:19 -0700 | [diff] [blame] | 171 | } |
| 172 | } |
| 173 | // grafana |
| 174 | timeout(10) { |
| 175 | waitUntil { |
| 176 | grafana_running = sh returnStdout: true, script: """ |
| 177 | kubectl get pods | grep -i grafana | grep -i running | grep 2/2 | wc -l |
| 178 | """ |
| 179 | return grafana_running.toInteger() == 1 |
| 180 | } |
| 181 | } |
| 182 | |
| 183 | // kpi-exporter |
| 184 | timeout(10) { |
| 185 | waitUntil { |
| 186 | kpiexporter_running = sh returnStdout: true, script: """ |
| 187 | kubectl get pods | grep -i kpi-exporter | grep -v Running | wc -l |
| 188 | """ |
| 189 | return kpiexporter_running.toInteger() == 0 |
| 190 | } |
| 191 | } |
| 192 | } |
| 193 | |
| 194 | stage('Verify that Logging is ready') { |
| 195 | // elasticsearch |
| 196 | elasticsearch_running = sh returnStdout: true, script: """ |
| 197 | kubectl get pods | grep -i elasticsearch | grep -v Running | wc -l |
| 198 | """ |
| 199 | return elasticsearch_running.toInteger() == 0 |
| 200 | |
| 201 | // kibana |
| 202 | kibana_running = sh returnStdout: true, script: """ |
| 203 | kubectl get pods | grep -i kibana | grep -i running | grep 1/1 | wc -l |
| 204 | """ |
| 205 | return kibana_running.toInteger() == 1 |
| 206 | |
| 207 | // logstash |
| 208 | logstash_running = sh returnStdout: true, script: """ |
| 209 | kubectl get pods | grep -i logstash | grep -i running | grep 1/1 | wc -l |
| 210 | """ |
| 211 | return logstash_running.toInteger() == 1 |
| 212 | } |
| 213 | |
| 214 | |
| 215 | stage('Install COMAC-Platform') { |
| 216 | sh returnStdout: true, script: """ |
| 217 | kubectl delete crd network-attachment-definitions.k8s.cni.cncf.io || true |
| 218 | helm install -n comac-platform --set mcord-setup.sriov_vfio.devicepci=04:00.0 --set mcord-setup.sriov_netdevice.devicepci=04:00.0 --version 0.0.2 cord/comac-platform |
| 219 | """ |
| 220 | } |
| 221 | |
| 222 | stage('Verify and redploying OMEC-CNI when problem happens') { |
| 223 | // verification |
| 224 | timeout(1) { |
| 225 | waitUntil { |
| 226 | network_definition_crd_exist = sh returnStdout: true, script: """ |
| 227 | kubectl get crd -o json | jq -r '.items[].spec | select(.group=="k8s.cni.cncf.io").names.kind' |\ |
| 228 | grep -E 'NetworkAttachmentDefinition' | wc -l |
| 229 | """ |
| 230 | return network_definition_crd_exist.toInteger() == 1 |
| 231 | } |
| 232 | } |
| 233 | |
| 234 | // verification: mcord-setup pods will be deployed for each machine |
| 235 | timeout (1) { |
| 236 | waitUntil { |
| 237 | num_sriov_pods = sh returnStdout: true, script: """ |
| 238 | kubectl get pods -n kube-system | grep sriov | wc -l |
| 239 | """ |
| 240 | return num_sriov_pods.toInteger() == deployment_config.nodes.size() |
| 241 | } |
| 242 | } |
| 243 | |
| 244 | // redeployment: facing "MountVolume.Setup failed", mcord-setup will be redeployed |
| 245 | max_retry_index = 100; |
| 246 | for (int i = 0; i<max_retry_index; i++) { |
| 247 | // warming-up period |
| 248 | sh returnStdout: true, script: "sleep 30" |
| 249 | |
| 250 | sh script: """ |
| 251 | init_num_sriov=\$(kubectl get pods -n kube-system | grep sriov | grep Init | wc -l); |
| 252 | for pod in \$(kubectl get pods -n kube-system | grep sriov | grep Init | awk '{print \$1}'); |
| 253 | do |
| 254 | echo \$pod is initializing |
| 255 | num_err_msgs=\$(kubectl describe pods \$pod -n kube-system | tail -4 | grep -E 'MountVolume.SetUp failed' | wc -l) |
| 256 | if [ \$num_err_msgs > 0 ]; then |
| 257 | kubectl delete pod \$pod -n kube-system --force --grace-period=0 |
| 258 | fi |
| 259 | done |
| 260 | """ |
| 261 | |
| 262 | sriov_all_ready = sh returnStdout: true, script: """ |
| 263 | kubectl get pods -n kube-system | grep sriov | grep Running | wc -l |
| 264 | """ |
| 265 | if (sriov_all_ready.toInteger() == deployment_config.nodes.size()) { |
| 266 | break; |
| 267 | } |
| 268 | } |
| 269 | |
| 270 | // verification: tillerpod |
| 271 | // Sometimes tillerpod is up, but it's not ready to accept deployment yet |
| 272 | // use helm ls to make sure it's ready. |
| 273 | timeout(1) { |
| 274 | waitUntil { |
| 275 | helm_client_working = sh returnStatus: true, script: "helm ls" |
| 276 | return helm_client_working == 0 |
| 277 | } |
| 278 | } |
| 279 | |
| 280 | // Before Start to deploy services, we need to take 10 seconds wait |
| 281 | // Cooling down period |
| 282 | sh returnStdout: true, script: "sleep 10" |
| 283 | } |
| 284 | |
| 285 | stage('Wait for etcd-cluster to be installed') { |
| 286 | timeout(10) { |
| 287 | waitUntil { |
| 288 | etcd_cluster_running = sh returnStdout: true, script: """ |
| 289 | kubectl get pods | grep etcd-cluster | grep -i running | grep 1/1 | wc -l |
| 290 | """ |
| 291 | return etcd_cluster_running.toInteger() == 1 |
| 292 | } |
| 293 | } |
| 294 | } |
| 295 | |
| 296 | |
| 297 | stage('Verify that M-CORD Profile is ready') { |
| 298 | timeout(10) { |
| 299 | waitUntil { |
| 300 | mcord_tosca_completed = sh returnStdout: true, script: """ |
| 301 | kubectl get pods | grep -i comac-platform-mcord-tosca-loader | grep -i completed | wc -l |
| 302 | """ |
| 303 | return mcord_tosca_completed.toInteger() == 1 |
| 304 | } |
| 305 | } |
| 306 | } |
| 307 | |
| 308 | stage('Verify that base-kubernetes is ready') { |
| 309 | |
| 310 | timeout(5) { |
| 311 | waitUntil { |
| 312 | base_kubernetes_tosca_completed = sh returnStdout: true, script: """ |
| 313 | kubectl get pods | grep -i comac-platform-base-kubernetes-tosca-loader | grep -i completed | wc -l |
| 314 | """ |
| 315 | return base_kubernetes_tosca_completed.toInteger() == 1 |
| 316 | } |
| 317 | } |
| 318 | } |
| 319 | |
| 320 | stage("Deploy M-CORD Services") { |
| 321 | sh returnStatus: true, script: """ |
| 322 | helm install -f $HELMCONFIG --set mme.type=openmme --set mme.conf.mme.mcc.dig1=3 --set mme.conf.mme.mcc.dig2=0 --set mme.conf.mme.mcc.dig3=2 --set mme.conf.mme.mnc.dig1=7 --set mme.conf.mme.mnc.dig2=2 --set mme.conf.mme.mnc.dig3=0 --set enb.host=119.0.0.10 -n mcord-services cord/mcord-services |
| 323 | """ |
| 324 | |
| 325 | timeout (3) { |
| 326 | waitUntil { |
| 327 | // We should have 5 statefulsets: hss, hssdb, mme, spgwc, spgwu |
| 328 | mcord_services_running = sh returnStdout: true, script: """ |
| 329 | kubectl get statefulset.apps -o json | jq '.items[].metadata.name' | grep -v cord-platform | wc -l |
| 330 | """ |
| 331 | return mcord_services_running.toInteger() == 5 |
| 332 | } |
| 333 | } |
| 334 | } |
| 335 | |
| 336 | stage("Deploy CDN-Remote Services") { |
| 337 | sh returnStatus: true, script: """ |
| 338 | helm install -f $HELMCONFIG -n cdn-remote --set remote_streaming.antmedia_ip=${deployment_config.nodes[0].ip} \ |
| 339 | --set stream_name=360 --set remote_streaming.video_quality=360 cord/cdn-remote |
| 340 | """ |
| 341 | |
| 342 | timeout (3) { |
| 343 | waitUntil { |
| 344 | // Are AntMedia server and ffMPEG containers ready? |
| 345 | cdn_remote_service_running = sh returnStdout: true, script: """ |
| 346 | kubectl get statefulset.apps -o json | jq '.items[].metadata.name' | grep -E 'antmedia|remote-streaming' | wc -l |
| 347 | """ |
| 348 | return cdn_remote_service_running.toInteger() == 2 |
| 349 | } |
| 350 | } |
| 351 | |
| 352 | } |
| 353 | |
| 354 | stage("Deploy CDN-Local Services") { |
| 355 | //Is SPGW-U ready? |
| 356 | timeout(3) { |
| 357 | waitUntil { |
| 358 | spgwu_running = sh returnStdout: true, script: """ |
| 359 | kubectl get pods | grep spgwu | grep Running | grep 1/1 | wc -l |
| 360 | """ |
| 361 | return spgwu_running.toInteger() == 1 |
| 362 | } |
| 363 | } |
| 364 | |
| 365 | cdn_local_service_deployed = sh returnStatus: true, script: """ |
| 366 | spgwu_ip=\$(kubectl exec -it spgwu-0 -- ip -4 addr show dev sgi-net | grep inet | awk '{print \$2}' | awk -F '/' '{print \$1}'); |
| 367 | helm install -f $HELMCONFIG -n cdn-local --set remote_ip=${deployment_config.nodes[0].ip} --set stream_name=360 \ |
| 368 | --set spgwu_sgiip=\$spgwu_ip cord/cdn-local |
| 369 | """ |
| 370 | return cdn_local_service_deployed == 0 |
| 371 | |
| 372 | timeout(3) { |
| 373 | waitUntil { |
| 374 | // Is NGINX ready? |
| 375 | cdn_local_service_running = sh returnStdout: true, script: """ |
| 376 | kubectl get statefulset.apps -o json | jq '.items[].metadata.name' | grep -E 'nginx-rtmp' | wc -l |
| 377 | """ |
| 378 | return cdn_local_service_running.toInteger() == 1 |
| 379 | } |
| 380 | } |
| 381 | |
| 382 | } |
| 383 | |
| 384 | // To-Do: SEBA profile and VNF deployment codes will be added |
| 385 | |
| 386 | if ( params.configurePod ) { |
| 387 | dir ("${configBaseDir}/${configToscaDir}/mcord") { |
| 388 | stage('Configure MCORD - Fabric') { |
| 389 | timeout(1) { |
| 390 | waitUntil { |
| 391 | out_fabric = sh returnStdout: true, script: """ |
| 392 | curl -s -H "xos-username:admin@opencord.org" -H "xos-password:letmein" -X POST \ |
| 393 | --data-binary @${configFileName}-fabric-cavium.yaml http://${deployment_config.nodes[0].ip}:30007/run |\ |
| 394 | grep -i "created models" | wc -l |
| 395 | """ |
| 396 | return out_fabric.toInteger() == 1 |
| 397 | } |
| 398 | } |
| 399 | } |
| 400 | } |
| 401 | } |
| 402 | |
| 403 | currentBuild.result = 'SUCCESS' |
| 404 | } catch (err) { |
| 405 | println err.message |
| 406 | currentBuild.result = 'FAILURE' |
| 407 | step([$class: 'Mailer', notifyEveryUnstableBuild: true, recipients: "${notificationEmail}", sendToIndividuals: false]) |
| 408 | } |
| 409 | echo "RESULT: ${currentBuild.result}" |
| 410 | } |
Zack Williams | 5ce76d4 | 2020-02-28 15:45:56 -0700 | [diff] [blame] | 411 | } |