[VOL-3780] Upgrading per-patchset validation to use the new charts

Change-Id: I1858f22032dd7b00215f3af0b2ffb038d1615cc2
diff --git a/vars/buildVolthaComponent.groovy b/vars/buildVolthaComponent.groovy
new file mode 100644
index 0000000..c0e4163
--- /dev/null
+++ b/vars/buildVolthaComponent.groovy
@@ -0,0 +1,15 @@
+def call(String project) {
+  // project is the gerrit project name
+
+  if (project != 'voltha-system-tests' &&
+    project != 'voltha-helm-charts' &&
+    project != '') {
+
+    sh """
+    make -C $WORKSPACE/${project} DOCKER_REPOSITORY=voltha/ DOCKER_TAG=citest docker-build
+    """
+  } else {
+    println "The project ${project} does not require to be built."
+  }
+
+}
diff --git a/vars/createKubernetesCluster.groovy b/vars/createKubernetesCluster.groovy
new file mode 100644
index 0000000..fccbaed
--- /dev/null
+++ b/vars/createKubernetesCluster.groovy
@@ -0,0 +1,69 @@
+// sets up a kubernetes cluster (using kind)
+
+def call(Map config) {
+    // note that I can't define this outside the function as there's no global scope in Groovy
+    def defaultConfig = [
+      nodes: 1,
+      name: "kind-ci"
+    ]
+
+    if (!config) {
+        config = [:]
+    }
+
+    def cfg = defaultConfig + config
+
+    println "Deploying Kind cluster with the following parameters: ${cfg}."
+
+    // TODO support different configs
+    def data = """
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+- role: worker
+- role: worker
+    """
+    writeFile(file: 'kind.cfg', text: data)
+
+    // TODO skip cluster creation if cluster is already there
+    sh """
+      mkdir -p $WORKSPACE/bin
+
+      # download kind (should we add it to the base image?)
+      curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.9.0/kind-linux-amd64
+      chmod +x ./kind
+      mv ./kind $WORKSPACE/bin/kind
+
+      # install voltctl
+      HOSTOS="\$(uname -s | tr "[:upper:]" "[:lower:"])"
+      HOSTARCH="\$(uname -m | tr "[:upper:]" "[:lower:"])"
+      if [ "\$HOSTARCH" == "x86_64" ]; then
+          HOSTARCH="amd64"
+      fi
+      curl -Lo ./voltctl https://github.com/opencord/voltctl/releases/download/v1.3.1/voltctl-1.3.1-\$HOSTOS-\$HOSTARCH
+      chmod +x ./voltctl
+      mv ./voltctl $WORKSPACE/bin/
+
+      # start the kind cluster
+      kind create cluster --name ${cfg.name} --config kind.cfg
+
+      # remove NoSchedule taint from nodes
+      for MNODE in \$(kubectl get node --selector='node-role.kubernetes.io/master' -o json | jq -r '.items[].metadata.name'); do
+          kubectl taint node "\$MNODE" node-role.kubernetes.io/master:NoSchedule-
+      done
+
+      mkdir -p $HOME/.volt
+      voltctl -s localhost:55555 config > $HOME/.volt/config
+
+      mkdir -p $HOME/.kube
+      kind get kubeconfig --name ${cfg.name} > $HOME/.kube/config
+
+      # add helm repositories
+      helm repo add onf https://charts.opencord.org
+      helm repo update
+
+      # download kail
+      bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/bin"
+  """
+}
diff --git a/vars/getVolthaCode.groovy b/vars/getVolthaCode.groovy
new file mode 100644
index 0000000..815e719
--- /dev/null
+++ b/vars/getVolthaCode.groovy
@@ -0,0 +1,109 @@
+// TODO the 3 stages are very similar, most of the code can be shared
+
+def call(Map config) {
+
+  def defaultConfig = [
+    branch: "master",
+    gerritProject: "",
+    gerritRefspec: "",
+    volthaSystemTestsChange: "",
+    volthaHelmChartsChange: "",
+  ]
+
+  if (!config) {
+      config = [:]
+  }
+
+  def cfg = defaultConfig + config
+
+  println "Downloading VOLTHA code with the following parameters: ${cfg}."
+
+  stage('Download Patch') {
+    // We are always downloading those repos, if the patch under test is in one of those
+    // just checkout the patch, no need to clone it again
+    if (cfg.gerritProject != 'voltha-system-tests' &&
+      cfg.gerritProject != 'voltha-helm-charts' &&
+      cfg.gerritProject != '') {
+      checkout([
+        $class: 'GitSCM',
+        userRemoteConfigs: [[
+          url: "https://gerrit.opencord.org/${cfg.gerritProject}",
+        ]],
+        branches: [[ name: "${cfg.branch}", ]],
+        extensions: [
+          [$class: 'WipeWorkspace'],
+          [$class: 'RelativeTargetDirectory', relativeTargetDir: "${cfg.gerritProject}"],
+          [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
+        ],
+      ])
+      sh """
+        pushd $WORKSPACE/${cfg.gerritProject}
+        git fetch https://gerrit.opencord.org/${cfg.gerritProject} ${cfg.gerritRefspec} && git checkout FETCH_HEAD
+
+        echo "Currently on commit: \n"
+        git log -1 --oneline
+        popd
+      """
+    }
+  }
+  stage('Clone voltha-system-tests') {
+    checkout([
+      $class: 'GitSCM',
+      userRemoteConfigs: [[
+        url: "https://gerrit.opencord.org/voltha-system-tests",
+      ]],
+      branches: [[ name: "${cfg.branch}", ]],
+      extensions: [
+        [$class: 'WipeWorkspace'],
+        [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
+        [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
+      ],
+    ])
+    if (cfg.volthaSystemTestsChange != '' && cfg.gerritProject != 'voltha-system-tests') {
+      sh """
+        cd $WORKSPACE/voltha-system-tests
+        git fetch https://gerrit.opencord.org/voltha-system-tests ${cfg.volthaSystemTestsChange} && git checkout FETCH_HEAD
+      """
+    }
+    else if (cfg.gerritProject == 'voltha-system-tests') {
+      sh """
+        pushd $WORKSPACE/${cfg.gerritProject}
+        git fetch https://gerrit.opencord.org/${cfg.gerritProject} ${cfg.gerritRefspec} && git checkout FETCH_HEAD
+
+        echo "Currently on commit: \n"
+        git log -1 --oneline
+        popd
+      """
+    }
+  }
+  stage('Clone voltha-helm-charts') {
+    checkout([
+      $class: 'GitSCM',
+      userRemoteConfigs: [[
+        url: "https://gerrit.opencord.org/voltha-helm-charts",
+      ]],
+      branches: [[ name: "master", ]],
+      extensions: [
+        [$class: 'WipeWorkspace'],
+        [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-helm-charts"],
+        [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
+      ],
+    ])
+    if (cfg.volthaHelmChartsChange != '' && cfg.gerritProject != 'voltha-helm-charts') {
+      sh """
+        cd $WORKSPACE/voltha-helm-charts
+        git fetch https://gerrit.opencord.org/voltha-helm-charts ${cfg.volthaHelmChartsChange} && git checkout FETCH_HEAD
+      """
+    }
+    else if (cfg.gerritProject == 'voltha-helm-charts') {
+      sh """
+        pushd $WORKSPACE/${cfg.gerritProject}
+        git fetch https://gerrit.opencord.org/${cfg.gerritProject} ${cfg.gerritRefspec} && git checkout FETCH_HEAD
+
+        echo "Currently on commit: \n"
+        git log -1 --oneline
+        popd
+      """
+    }
+  }
+}
diff --git a/vars/hello.groovy b/vars/hello.groovy
deleted file mode 100644
index b40894b..0000000
--- a/vars/hello.groovy
+++ /dev/null
@@ -1,6 +0,0 @@
-// vars/sayHello.groovy
-def call(String name = 'human') {
-    // Any valid steps can be called from this code, just like in other
-    // Scripted Pipeline
-    echo "Hello, ${name}."
-}
diff --git a/vars/helmTeardown.groovy b/vars/helmTeardown.groovy
new file mode 100644
index 0000000..3dff0ab
--- /dev/null
+++ b/vars/helmTeardown.groovy
@@ -0,0 +1,30 @@
+
+def call(List namespaces = ['default'], List excludes = ['docker-registry']) {
+
+    println "Tearing down charts in namespaces: ${namespaces.join(', ')}."
+
+    def exc = excludes.join("|")
+    for(int i = 0;i<namespaces.size();i++) {
+        def n = namespaces[i]
+        sh """
+          for hchart in \$(helm list -n ${n} -q | grep -E -v '${exc}');
+          do
+              echo "Purging chart: \${hchart}"
+              helm delete -n ${n} "\${hchart}"
+          done
+        """
+    }
+
+    println "Waiting for pods to be removed from namespaces: ${namespaces.join(', ')}."
+    for(int i = 0;i<namespaces.size();i++) {
+        def n = namespaces[i]
+        sh """
+        set +x
+        PODS=\$(kubectl get pods -n ${n} --no-headers | wc -l)
+        while [[ \$PODS != 0 ]]; do
+        sleep 5
+        PODS=\$(kubectl get pods -n ${n} --no-headers | wc -l)
+        done
+        """
+    }
+}
diff --git a/vars/loadToKind.groovy b/vars/loadToKind.groovy
new file mode 100644
index 0000000..520aee2
--- /dev/null
+++ b/vars/loadToKind.groovy
@@ -0,0 +1,29 @@
+// loads all the images tagged as citest on a Kind cluster
+
+def call(Map config) {
+  def defaultConfig = [
+    name: "kind-ci"
+  ]
+
+  if (!config) {
+      config = [:]
+  }
+
+  def cfg = defaultConfig + config
+
+  def images = sh (
+    script: 'docker images -f "reference=**/*citest" --format "{{.Repository}}"',
+    returnStdout: true
+  ).trim()
+
+  def list = images.split("\n")
+
+  for(int i = 0;i<list.size();i++) {
+    def image = list[i]
+    println "Loading image ${image} on Kind cluster ${cfg.name}"
+
+    sh """
+      kind load docker-image ${image}:citest --name ${cfg.name} --nodes ${cfg.name}-worker,${cfg.name}-worker2
+    """
+  }
+}
diff --git a/vars/volthaDeploy.groovy b/vars/volthaDeploy.groovy
new file mode 100644
index 0000000..eb9faca
--- /dev/null
+++ b/vars/volthaDeploy.groovy
@@ -0,0 +1,29 @@
+// this keyword is dedicated to deploy a single VOLTHA stack with infra
+// If you need to deploy different configurations you can use the volthaInfraDeploy and volthaStackDeploy keywords
+
+def call(Map config) {
+    // note that I can't define this outside the function as there's no global scope in Groovy
+    def defaultConfig = [
+      onosReplica: 1,
+      atomixReplica: 1,
+      kafkaReplica: 1,
+      etcdReplica: 1,
+      bbsimReplica: 1,
+      infraNamespace: "infra",
+      volthaNamespace: "voltha",
+      workflow: "att",
+      extraHelmFlags: "",
+    ]
+
+    if (!config) {
+        config = [:]
+    }
+
+    def cfg = defaultConfig + config
+
+    println "Deploying VOLTHA with the following parameters: ${cfg}."
+
+    volthaInfraDeploy(cfg)
+
+    volthaStackDeploy(cfg)
+}
diff --git a/vars/volthaInfraDeploy.groovy b/vars/volthaInfraDeploy.groovy
new file mode 100644
index 0000000..ecea1ad
--- /dev/null
+++ b/vars/volthaInfraDeploy.groovy
@@ -0,0 +1,41 @@
+// usage
+//
+// stage('test stage') {
+//   steps {
+//     volthaDeploy([
+//       onosReplica: 3
+//     ])
+//   }
+// }
+
+
+def call(Map config) {
+    // NOTE use params or directule extraHelmFlags??
+    def defaultConfig = [
+      onosReplica: 1,
+      atomixReplica: 1,
+      kafkaReplica: 1,
+      etcdReplica: 1,
+      infraNamespace: "infra",
+      workflow: "att",
+      extraHelmFlags: "",
+    ]
+
+    if (!config) {
+        config = [:]
+    }
+
+    def cfg = defaultConfig + config
+
+    println "Deploying VOLTHA Infra with the following parameters: ${cfg}."
+
+    sh """
+    kubectl create namespace ${cfg.infraNamespace} || true
+    kubectl create configmap -n ${cfg.infraNamespace} kube-config "--from-file=kube_config=$KUBECONFIG"  || true
+    """
+    // TODO support multiple replicas
+    sh """
+    helm upgrade --install --create-namespace -n ${cfg.infraNamespace} voltha-infra onf/voltha-infra ${cfg.extraHelmFlags} \
+          -f $WORKSPACE/voltha-helm-charts/examples/${cfg.workflow}-values.yaml
+    """
+}
diff --git a/vars/volthaStackDeploy.groovy b/vars/volthaStackDeploy.groovy
new file mode 100644
index 0000000..78d90bf
--- /dev/null
+++ b/vars/volthaStackDeploy.groovy
@@ -0,0 +1,63 @@
+
+def call(Map config) {
+    // note that I can't define this outside the function as there's no global scope in Groovy
+    def defaultConfig = [
+      onosReplica: 1,
+      atomixReplica: 1,
+      kafkaReplica: 1,
+      etcdReplica: 1,
+      bbsimReplica: 1,
+      infraNamespace: "infra",
+      volthaNamespace: "voltha",
+      stackName: "voltha",
+      workflow: "att",
+      extraHelmFlags: "",
+    ]
+
+    if (!config) {
+        config = [:]
+    }
+
+    def cfg = defaultConfig + config
+
+    println "Deploying VOLTHA Stack with the following parameters: ${cfg}."
+
+    sh """
+    helm upgrade --install --create-namespace -n ${cfg.volthaNamespace} ${cfg.stackName} onf/voltha-stack ${cfg.extraHelmFlags} \
+          --set global.stack_name=${cfg.stackName} \
+          --set global.voltha_infra_name=voltha-infra \
+          --set global.voltha_infra_namespace=${cfg.infraNamespace} \
+    """
+
+    for(int i = 0;i<cfg.bbsimReplica;i++) {
+      // TODO differentiate olt_id between different stacks
+       sh """
+         helm upgrade --install --create-namespace -n ${cfg.volthaNamespace} bbsim${i} onf/bbsim ${cfg.extraHelmFlags} \
+         --set olt_id="1${i}" \
+         -f $WORKSPACE/voltha-helm-charts/examples/${cfg.workflow}-values.yaml
+       """
+    }
+
+    println "Wait for VOLTHA Stack ${cfg.stackName} to start"
+
+    sh """
+        set +x
+        voltha=\$(kubectl get pods -n ${cfg.volthaNamespace} -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
+        while [[ \$voltha != 0 ]]; do
+          sleep 5
+          voltha=\$(kubectl get pods -n ${cfg.volthaNamespace} -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
+        done
+    """
+
+    // also make sure that the ONOS config is loaded
+    println "Wait for ONOS Config loader to complete"
+
+    sh """
+        set +x
+        config=\$(kubectl get jobs.batch -n ${cfg.infraNamespace} --no-headers | grep "0/" | wc -l)
+        while [[ \$config != 0 ]]; do
+          sleep 5
+          config=\$(kubectl get jobs.batch -n ${cfg.infraNamespace} --no-headers | grep "0/" | wc -l)
+        done
+    """
+}