Wei-Yu Chen | d8de5d5 | 2019-04-18 15:55:57 -0700 | [diff] [blame^] | 1 | // Copyright 2017-present Open Networking Foundation |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | // you may not use this file except in compliance with the License. |
| 5 | // You may obtain a copy of the License at |
| 6 | // |
| 7 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | // See the License for the specific language governing permissions and |
| 13 | // limitations under the License. |
| 14 | |
| 15 | node ("${TestNodeName}") { |
| 16 | timeout (100) { |
| 17 | try { |
| 18 | stage ("Parse deployment configuration files") { |
| 19 | sh returnStdout: true, script: "rm -rf helm-charts helm-repo-tools ${configBaseDir}" |
| 20 | sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/helm-repo-tools" |
| 21 | sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/helm-charts" |
| 22 | sh returnStdout: true, script: "git clone -b ${branch} ${cordRepoUrl}/${configBaseDir}" |
| 23 | deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml" |
| 24 | } |
| 25 | |
| 26 | // Define KUBECONFIG & HELMCONFIG environment variable to use in steps |
| 27 | env.KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf" |
| 28 | env.HELMCONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.yml" |
| 29 | |
| 30 | stage('Clean up') { |
| 31 | timeout(10) { |
| 32 | // Force helm client to reload server pod, otherwise it'll possible fail on version compatible issue. |
| 33 | sh returnStdout: true, script: "helm init --upgrade --force-upgrade" |
| 34 | |
| 35 | timeout(1) { |
| 36 | waitUntil { |
| 37 | tillerpod_running = sh returnStdout: true, script: """ |
| 38 | kubectl -n kube-system get pods | grep tiller-deploy | grep Running | wc -l |
| 39 | """ |
| 40 | return tillerpod_running.toInteger() == 1 |
| 41 | } |
| 42 | } |
| 43 | |
| 44 | // Deleted all of charts |
| 45 | sh returnStdout: true, script: """ |
| 46 | for hchart in \$(helm list -q | grep -E -v 'docker-registry|mavenrepo|ponnet'); |
| 47 | do |
| 48 | echo "Purging chart: \${hchart}" |
| 49 | helm delete --purge "\${hchart}" |
| 50 | done |
| 51 | """ |
| 52 | |
| 53 | timeout(3) { |
| 54 | waitUntil { |
| 55 | charts_deleted = sh returnStdout: true, script: """ |
| 56 | helm ls -q | grep -E -v 'docker-registry|mavenrepo|ponnet' | wc -l |
| 57 | """ |
| 58 | return charts_deleted.toInteger() == 0 |
| 59 | } |
| 60 | } |
| 61 | |
| 62 | timeout(3) { |
| 63 | waitUntil { |
| 64 | allpods_deleted = sh returnStdout: true, script: """ |
| 65 | kubectl get pods --all-namespaces --no-headers |\ |
| 66 | grep -E -v 'kube-system|docker-registry|mavenrepo|ponnet|test' | wc -l |
| 67 | """ |
| 68 | return allpods_deleted.toInteger() == 0 |
| 69 | } |
| 70 | } |
| 71 | } |
| 72 | } |
| 73 | |
| 74 | |
| 75 | stage('Add Helm repositories') { |
| 76 | sh returnStdout: true, script: """ |
| 77 | helm repo add cord https://charts.opencord.org |
| 78 | helm repo add incubator http://storage.googleapis.com/kubernetes-charts-incubator |
| 79 | helm repo update |
| 80 | """ |
| 81 | |
| 82 | timeout(1) { |
| 83 | waitUntil { |
| 84 | tillerpod_running = sh returnStdout: true, script: """ |
| 85 | kubectl -n kube-system get pods | grep tiller-deploy | grep Running | wc -l |
| 86 | """ |
| 87 | return tillerpod_running.toInteger() == 1 |
| 88 | } |
| 89 | } |
| 90 | |
| 91 | timeout(1) { |
| 92 | waitUntil { |
| 93 | cord_repo_present = sh returnStdout: true, script: """ |
| 94 | helm repo list | grep cord | wc -l |
| 95 | """ |
| 96 | return cord_repo_present.toInteger() == 1 |
| 97 | } |
| 98 | } |
| 99 | } |
| 100 | |
| 101 | stage('Install etcd-cluster') { |
| 102 | sh returnStdout: true, script: """ |
| 103 | helm install -f $HELMCONFIG --version 0.8.3 -n etcd-operator stable/etcd-operator |
| 104 | """ |
| 105 | |
| 106 | timeout(1) { |
| 107 | waitUntil { |
| 108 | etcd_operator_running = sh returnStdout: true, script: """ |
| 109 | kubectl get pods | grep etcd-operator | grep -i running | grep 1/1 | wc -l |
| 110 | """ |
| 111 | return etcd_operator_running.toInteger() == 3 |
| 112 | } |
| 113 | } |
| 114 | |
| 115 | timeout(1) { |
| 116 | waitUntil { |
| 117 | etcd_operator_crd_present = sh returnStdout: true, script: """ |
| 118 | kubectl get crd -o json | jq -r '.items[].spec | select(.group=="etcd.database.coreos.com").names.kind' |\ |
| 119 | grep -E 'EtcdBackup|EtcdCluster|EtcdRestore' | wc -l |
| 120 | """ |
| 121 | return etcd_operator_crd_present.toInteger() == 3 |
| 122 | } |
| 123 | } |
| 124 | |
| 125 | sh returnStdout: true, script: """ |
| 126 | helm install -f $HELMCONFIG --set etcdNodePort=32379 --set clusterSize=1 -n etcd cord/etcd-cluster |
| 127 | """ |
| 128 | |
| 129 | timeout(3) { |
| 130 | waitUntil { |
| 131 | etcd_cluster_running = sh returnStdout: true, script: """ |
| 132 | kubectl get pods | grep etcd-cluster | grep -i running | grep 1/1 | wc -l |
| 133 | """ |
| 134 | return etcd_cluster_running.toInteger() == 1 |
| 135 | } |
| 136 | } |
| 137 | } |
| 138 | |
| 139 | dir ("helm-charts") { |
| 140 | stage('Install SR-IOV CNI and SR-IOV Network Device Plugin') { |
| 141 | sh returnStdout: true, script: """ |
| 142 | kubectl delete crd network-attachment-definitions.k8s.cni.cncf.io || true |
| 143 | helm install -f $HELMCONFIG -n mcord-setup cord/mcord-setup |
| 144 | """ |
| 145 | |
| 146 | timeout(1) { |
| 147 | waitUntil { |
| 148 | network_definition_crd_exist = sh returnStdout: true, script: """ |
| 149 | kubectl get crd -o json | jq -r '.items[].spec | select(.group=="k8s.cni.cncf.io").names.kind' |\ |
| 150 | grep -E 'NetworkAttachmentDefinition' | wc -l |
| 151 | """ |
| 152 | return network_definition_crd_exist.toInteger() == 1 |
| 153 | } |
| 154 | } |
| 155 | |
| 156 | // Sometimes tillerpod is up, but it's not ready to accept deployment yet |
| 157 | // use helm ls to make sure it's ready. |
| 158 | timeout(1) { |
| 159 | waitUntil { |
| 160 | helm_client_working = sh returnStatus: true, script: "helm ls" |
| 161 | return helm_client_working == 0 |
| 162 | } |
| 163 | } |
| 164 | |
| 165 | // Before Start to deploy services, we need to take 3 seconds wait |
| 166 | sh returnStdout: true, script: "sleep 3" |
| 167 | } |
| 168 | } |
| 169 | |
| 170 | stage("Deploy M-CORD Services") { |
| 171 | mcord_services_deployed = sh returnStatus: true, script: """ |
| 172 | helm install -f $HELMCONFIG -n mcord-services cord/mcord-services |
| 173 | """ |
| 174 | return mcord_services_deployed == 0 |
| 175 | |
| 176 | timeout (3) { |
| 177 | waitUntil { |
| 178 | // We should have 5 statefulsets: hss, hssdb, mme, spgwc, spgwu |
| 179 | mcord_services_running = sh returnStdout: true, script: """ |
| 180 | kubectl get statefulset.apps -o json | jq '.items | length' |
| 181 | """ |
| 182 | return mcord_services_running.toInteger() == 5 |
| 183 | } |
| 184 | } |
| 185 | } |
| 186 | |
| 187 | stage('Install CORD Kafka') { |
| 188 | sh returnStdout: true, script: """ |
| 189 | helm install --version 0.8.8 --set configurationOverrides."offsets\\.topic\\.replication\\.factor"=1 \ |
| 190 | --set configurationOverrides."log\\.retention\\.hours"=4 \ |
| 191 | --set configurationOverrides."log\\.message\\.timestamp\\.type"="LogAppendTime" \ |
| 192 | --set replicas=1 --set persistence.enabled=false --set zookeeper.replicaCount=1 \ |
| 193 | --set zookeeper.persistence.enabled=false -n cord-kafka incubator/kafka |
| 194 | """ |
| 195 | |
| 196 | timeout(3) { |
| 197 | waitUntil { |
| 198 | kafka_instances_running = sh returnStdout: true, script: """ |
| 199 | kubectl get pods | grep cord-kafka | grep -i running | grep 1/1 | wc -l |
| 200 | """ |
| 201 | return kafka_instances_running.toInteger() == 2 |
| 202 | } |
| 203 | } |
| 204 | } |
| 205 | |
| 206 | stage('Install Monitoring Infrastructure') { |
| 207 | timeout(3) { |
| 208 | sh returnStdout: true, script: """ |
| 209 | helm install -f $HELMCONFIG -n nem-monitoring cord/nem-monitoring --version 1.0.1 |
| 210 | helm-repo-tools/wait_for_pods.sh |
| 211 | """ |
| 212 | } |
| 213 | } |
| 214 | |
| 215 | stage('Install ONOS') { |
| 216 | sh returnStdout: true, script: """ |
| 217 | helm install -n onos -f $HELMCONFIG cord/onos --version 1.1.0 |
| 218 | """ |
| 219 | |
| 220 | timeout(3) { |
| 221 | waitUntil { |
| 222 | onos_completed = sh returnStdout: true, script: """ |
| 223 | kubectl get pods | grep -i onos | grep -i running | grep 1/1 | wc -l |
| 224 | """ |
| 225 | return onos_completed.toInteger() == 1 |
| 226 | } |
| 227 | } |
| 228 | } |
| 229 | |
| 230 | stage('Install xos-core') { |
| 231 | sh returnStdout: true, script: """ |
| 232 | helm install -f $HELMCONFIG -n xos-core cord/xos-core --version 2.3.3 |
| 233 | """ |
| 234 | |
| 235 | timeout(3) { |
| 236 | waitUntil { |
| 237 | xos_core_running = sh returnStdout: true, script: """ |
| 238 | kubectl get pods | grep -i xos | grep -i running | grep 1/1 | wc -l |
| 239 | """ |
| 240 | return xos_core_running.toInteger() == 6 |
| 241 | } |
| 242 | } |
| 243 | } |
| 244 | stage('Install M-CORD Profile') { |
| 245 | sh returnStdout: true, script: """ |
| 246 | helm install -f $HELMCONFIG -n mcord cord/mcord --version 1.0.11 |
| 247 | """ |
| 248 | |
| 249 | timeout(5) { |
| 250 | waitUntil { |
| 251 | mcord_tosca_completed = sh returnStdout: true, script: """ |
| 252 | kubectl get pods | grep -i mcord-tosca-loader | grep -i completed | wc -l |
| 253 | """ |
| 254 | return mcord_tosca_completed.toInteger() == 1 |
| 255 | } |
| 256 | } |
| 257 | } |
| 258 | |
| 259 | stage('Install base-kubernetes') { |
| 260 | sh returnStdout: true, script: """ |
| 261 | helm install -f $HELMCONFIG -n base-kubernetes cord/base-kubernetes --version 1.0.2 |
| 262 | """ |
| 263 | |
| 264 | timeout(5) { |
| 265 | waitUntil { |
| 266 | base_kubernetes_tosca_completed = sh returnStdout: true, script: """ |
| 267 | kubectl get pods | grep -i base-kubernetes-tosca-loader | grep -i completed | wc -l |
| 268 | """ |
| 269 | return base_kubernetes_tosca_completed.toInteger() == 1 |
| 270 | } |
| 271 | } |
| 272 | } |
| 273 | |
| 274 | if ( params.configurePod ) { |
| 275 | dir ("${configBaseDir}/${configToscaDir}/mcord") { |
| 276 | stage('Configure MCORD - Fabric') { |
| 277 | timeout(1) { |
| 278 | waitUntil { |
| 279 | out_fabric = sh returnStdout: true, script: """ |
| 280 | curl -s -H "xos-username:admin@opencord.org" -H "xos-password:letmein" -X POST \ |
| 281 | --data-binary @${configFileName}-fabric.yaml http://${deployment_config.nodes[0].ip}:30007/run |\ |
| 282 | grep -i "created models" | wc -l |
| 283 | """ |
| 284 | return out_fabric.toInteger() == 1 |
| 285 | } |
| 286 | } |
| 287 | } |
| 288 | } |
| 289 | } |
| 290 | currentBuild.result = 'SUCCESS' |
| 291 | } catch (err) { |
| 292 | println err.meesage |
| 293 | currentBuild.result = 'FAILURE' |
| 294 | step([$class: 'Mailer', notifyEveryUnstableBuild: true, recipients: "${notificationEmail}", sendToIndividuals: false]) |
| 295 | } |
| 296 | echo "RESULT: ${currentBuild.result}" |
| 297 | } |
| 298 | } |