Misc fixes
jjb/pipeline/voltha/voltha-2.12/bbsim-tests.groovy
--------------------------------------------------
o Refactor pgrep/pkill/ps calls into standalone groovy functions.
o Increase proc kill timeout to 5 seconds, should remove altogether.
o Cleanup() use pgrep/pkill in place of ps | kill
+ Verify port forwarding processes terminate.
o Use local vars to remove duplicate inlined file paths.
vars/createKubernetesCluster.groovy
-----------------------------------
o npm-groovy-lint cleanups.
o Display more banners to improve log readability.
o Set perms on ~/{.volt,.kube}/config to fix a write problem in the logs.
Change-Id: I6dc3789721cbb6af7e01a274d9441e725444aa5c
diff --git a/jjb/pipeline/voltha/voltha-2.12/bbsim-tests.groovy b/jjb/pipeline/voltha/voltha-2.12/bbsim-tests.groovy
index 45349d9..0023c04 100644
--- a/jjb/pipeline/voltha/voltha-2.12/bbsim-tests.groovy
+++ b/jjb/pipeline/voltha/voltha-2.12/bbsim-tests.groovy
@@ -69,6 +69,24 @@
}
// -----------------------------------------------------------------------
+// -----------------------------------------------------------------------
+void pgrep_proc(String proc)
+{
+ println("** Running: pgrep --list-full ${proc}")
+ sh("""pgrep --list-full "${proc}" || true""")
+ return
+}
+
+// -----------------------------------------------------------------------
+// -----------------------------------------------------------------------
+void pkill_proc(String proc)
+{
+ println("** Running: pkill ${proc}")
+ sh(""" [[ \$(pgrep --count "${proc}") -gt 0 ]] && pkill --echo "${proc}" """)
+ return
+}
+
+// -----------------------------------------------------------------------
// Intent:
// -----------------------------------------------------------------------
void execute_test(testTarget, workflow, testLogging, teardown, testSpecificHelmFlags='')
@@ -98,51 +116,29 @@
script {
helmTeardown(['default', infraNamespace, volthaNamespace])
}
+ } // timeout
- // -----------------------------------------------------------------------
- // Verify pgrep/pkill behavior before replacing ps | kill -9
- // -----------------------------------------------------------------------
- script {
- println('''
+ timeout(5) {
+ script {
+ String iam = getIam('Cleanup')
+ println("${iam}: ENTER")
-** -----------------------------------------------------------------------
-** pgrep process list for port-forward (pre-pkill)
-** -----------------------------------------------------------------------
-''')
- sh('''pgrep --list-full port-forward || true''')
- }
+ // remove orphaned port-forward from different namespaces
+ String proc = 'port-forw'
+ pgrep_proc(proc)
+ pkill_proc(proc)
- // Comment timeout() if we hang (fix it VS mask problem)
- // timeout(1) {
- // -----------------------------------------------------------------------
- // -----------------------------------------------------------------------
-sh(returnStdout:true, script: '''
- sync
- cat <<EOM
-
-** -----------------------------------------------------------------------
-** remove orphaned port-forward from different namespacse
-** -----------------------------------------------------------------------
-EOM
- [[ $(pgrep --count port-forward) -gt 0 ]] && pkill --echo 'port-forward'
- ''')
-
- // -----------------------------------------------------------------------
- // -----------------------------------------------------------------------
- script {
- println('''
-
-** -----------------------------------------------------------------------
-** pgrep process list for port-forward (post-pkill)
-** -----------------------------------------------------------------------
-''')
- sh('''pgrep --list-full port-forward || true''')
- }
-
- } // timeout(15)
- } // teardown()
- // timeout(1)
- } // stage(cleanup)
+ // Sanity check processes terminated
+ sh("""
+[[ \$(pgrep --count "${proc}") -gt 0 ]] && { \
+ echo "ERROR: Detected zombie port-forwarding processes"
+ pgrep --list-full "${proc}" || true ; }
+""")
+ println("${iam}: LEAVE")
+ } // script
+ } // timeout
+ } // teardown
+ } // stage
// -----------------------------------------------------------------------
// -----------------------------------------------------------------------
@@ -174,24 +170,12 @@
--set $dashargs
fi
''')
-
- /*
- sh '''
- helm repo add onf https://charts.opencord.org
- helm repo update
-
- echo -e "\nwithMonitoring=[$withMonitoring]"
- if [ ${withMonitoring} = true ] ; then
- helm install nem-monitoring onf/nem-monitoring \
- --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
- --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
- fi
- '''
- */
}
}
// -----------------------------------------------------------------------
+ // [TODO] Check onos_log output
+ // [TODO] kail-startup pgrep/pkill
// -----------------------------------------------------------------------
stage('Deploy Voltha')
{
@@ -201,9 +185,12 @@
{
script
{
- sh("""
+ String iam = getIam('Deploy Voltha')
+ String combinedLog = "${logsDir}/onos-voltha-startup-combined.log"
+sh("""
mkdir -p ${logsDir}
onos_log="${logsDir}/onos-voltha-startup-combined.log"
+ touch "$onos_log
echo "** kail-startup ENTER: \$(date)" > "$onos_log"
# Intermixed output (tee -a &) may get conflusing but let(s) see
@@ -250,13 +237,13 @@
testSpecificHelmFlags
].join(' ')
- println("** localHelmFlags = ${localHelmFlags}")
+ println("** ${iam} localHelmFlags = ${localHelmFlags}")
if (gerritProject != '') {
localHelmFlags = "${localHelmFlags} " + getVolthaImageFlags("${gerritProject}")
}
- println('volthaDeploy: ENTER')
+ println('** ${iam}: ENTER')
volthaDeploy([
infraNamespace: infraNamespace,
volthaNamespace: volthaNamespace,
@@ -267,7 +254,7 @@
bbsimReplica: olts.toInteger(),
dockerRegistry: registry,
])
- println('volthaDeploy: LEAVE')
+ println('** ${iam}: LEAVE')
} // script
// -----------------------------------------------------------------------
@@ -276,13 +263,7 @@
// Grep runs the risk of terminating stray commands (??-good <=> bad-??)
// -----------------------------------------------------------------------
script {
- println('''
-
-** -----------------------------------------------------------------------
-** pgrep process list for kail-startup (WIP)
-** -----------------------------------------------------------------------
-''')
- sh('''pgrep --list-full kail-startup || true''')
+ pgrep_proc('kail-startup')
println('''
@@ -293,10 +274,10 @@
sh('''ps e -ww -A | grep "_TAG=kail-startup"''')
}
- // -----------------------------------------------------------------------
+ // -----------------------------------------------------------------------
// stop logging
// -----------------------------------------------------------------------
- sh """
+ sh("""
P_IDS="\$(ps e -ww -A | grep "_TAG=kail-startup" | grep -v grep | awk '{print \$1}')"
if [ -n "\$P_IDS" ]; then
echo \$P_IDS
@@ -304,15 +285,26 @@
kill -9 \$P_ID
done
fi
- cd ${logsDir}
- echo "** kail-startup LEAVE: \$(date)" >> "${logsDir}/onos-voltha-startup-combined.log"
+""")
+ sh("""
+cat <<EOM
+
+** -----------------------------------------------------------------------
+** Combine an compress voltha startup log(s)
+** -----------------------------------------------------------------------
+EOM
+ pushd "${logsDir}" || { echo "ERROR: pushd $logsDir failed"; exit 1; }
gzip -k onos-voltha-startup-combined.log
rm onos-voltha-startup-combined.log
- """
- }
+ popd
+ """)
+ }
- sh """
+
+ // -----------------------------------------------------------------------
+ // -----------------------------------------------------------------------
+ sh """
JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-infra-etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd 2379:2379; done"&
JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-infra-kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
@@ -326,11 +318,12 @@
fi
ps aux | grep port-forward
"""
+ // [TODO] pgrep_proc('port-forward')
- // setting ONOS log level
- script
- {
- println('** setOnosLogLevels: ENTER')
+
+ // setting ONOS log level
+ script {
+ println('** setOnosLogLevels: ENTER')
setOnosLogLevels([
onosNamespace: infraNamespace,
apps: [
@@ -343,23 +336,37 @@
],
logLevel: logLevel
])
- println('** setOnosLogLevels: LEAVE')
+ println('** setOnosLogLevels: LEAVE')
} // script
} // if (teardown)
} // stage('Deploy Voltha')
+ // -----------------------------------------------------------------------
+ // -----------------------------------------------------------------------
stage("Run test ${testTarget} on workflow ${workflow}")
{
sh """
echo -e "\n** Monitor using mem_consumption.py ?"
+
if [ ${withMonitoring} = true ] ; then
+ cat <<EOM
+
+** -----------------------------------------------------------------------
+** Monitoring memory usage with mem_consumption.py
+** -----------------------------------------------------------------------
+EOM
mkdir -p "$WORKSPACE/voltha-pods-mem-consumption-${workflow}"
cd "$WORKSPACE/voltha-system-tests"
- make venv-activate-script
+
+ echo '** Installing python virtualenv'
+ make venv-activate-patched
+
set +u && source .venv/bin/activate && set -u
# Collect initial memory consumption
python scripts/mem_consumption.py -o $WORKSPACE/voltha-pods-mem-consumption-${workflow} -a 0.0.0.0:31301 -n ${volthaNamespace}
fi
+
+ echo -e '** Monitor memory consumption: LEAVE\n'
"""
sh """
@@ -385,11 +392,21 @@
echo -e '** Gather robot Framework logs: LEAVE\n'
"""
+ // -----------------------------------------------------------------------
+ // -----------------------------------------------------------------------
sh """
echo -e '** Monitor pod-mem-consumption: ENTER'
if [ ${withMonitoring} = true ] ; then
+ cat <<EOM
+
+** -----------------------------------------------------------------------
+** Monitoring pod-memory-consumption using mem_consumption.py
+** -----------------------------------------------------------------------
+EOM
cd "$WORKSPACE/voltha-system-tests"
- make venv-activate-script
+
+ echo '** Installing python virtualenv'
+ make venv-activate-patched
set +u && source .venv/bin/activate && set -u
# Collect memory consumption of voltha pods once all the tests are complete
python scripts/mem_consumption.py -o $WORKSPACE/voltha-pods-mem-consumption-${workflow} -a 0.0.0.0:31301 -n ${volthaNamespace}
@@ -515,7 +532,7 @@
'kail',
].join(' ')
- println(" ** Running: ${cmd}:\n")
+ println(" ** Running: ${cmd}")
sh("${cmd}")
} // script
} // steps
@@ -538,8 +555,8 @@
'install-command-kind',
].join(' ')
- println(" ** Running: ${cmd}:\n")
- sh("${cmd}")
+ println(" ** Running: ${cmd}")
+ sh("${cmd}")
} // script
} // steps
} // stage
@@ -550,19 +567,18 @@
{
steps
{
- script
+ script
{
def clusterExists = sh(
- returnStdout: true,
- script: """kind get clusters | grep "${clusterName}" | wc -l"""
- )
+ returnStdout: true,
+ script: """kind get clusters | grep "${clusterName}" | wc -l""")
if (clusterExists.trim() == '0')
- {
- createKubernetesCluster([nodes: 3, name: clusterName])
+ {
+ createKubernetesCluster([nodes: 3, name: clusterName])
}
- } // script
- } // steps
+ } // script
+ } // steps
} // stage('Create K8s Cluster')
// -----------------------------------------------------------------------
@@ -571,32 +587,31 @@
{
// if the project is voltctl, override the downloaded one with the built one
when {
- expression { return gerritProject == 'voltctl' }
+ expression { return gerritProject == 'voltctl' }
}
// Hmmmm(?) where did the voltctl download happen ?
// Likely Makefile but would be helpful to document here.
steps
{
- println("${iam} Running: installVoltctl($branch)")
- installVoltctl("$branch")
+ println("${iam} Running: installVoltctl($branch)")
+ installVoltctl("$branch")
} // steps
} // stage
// -----------------------------------------------------------------------
// -----------------------------------------------------------------------
- stage('voltctl [DEBUG]')
- {
- steps
- {
- println("${iam} Display umask")
- sh('umask')
-
- println("${iam} Checking voltctl config permissions")
- sh('/bin/ls -ld ~/.volt || true')
-
- println("${iam} Running find")
- sh('/bin/ls -l ~/.volt')
+ stage('voltctl [DEBUG]') {
+ steps {
+ script {
+ String iam = getIam('execute_test')
+
+ println("${iam} Display umask")
+ sh('umask')
+
+ println("${iam} Checking voltctl config permissions")
+ sh('/bin/ls -ld ~/.volt ~/.volt/* || true')
+ } // script
} // steps
} // stage
@@ -613,14 +628,15 @@
} // stage
// -----------------------------------------------------------------------
+ // [TODO] verify testing output
// -----------------------------------------------------------------------
stage('Parse and execute tests')
{
steps {
script {
- // Announce ourselves for log usability
- String iam = getIam('execute_test')
- println("${iam}: ENTER")
+ // Announce ourselves for log usability
+ String iam = getIam('execute_test')
+ println("${iam}: ENTER")
def tests = readYaml text: testTargets
@@ -642,11 +658,9 @@
String testLogging = (logging) ? 'True' : 'False'
print("""
-
** -----------------------------------------------------------------------
** Executing test ${target} on workflow ${workflow} with logging ${testLogging} and extra flags ${flags}"
** -----------------------------------------------------------------------
-
)
try {
diff --git a/vars/createKubernetesCluster.groovy b/vars/createKubernetesCluster.groovy
index a0df895..9653840 100644
--- a/vars/createKubernetesCluster.groovy
+++ b/vars/createKubernetesCluster.groovy
@@ -36,7 +36,7 @@
// note that I can't define this outside the function as there's no global scope in Groovy
def defaultConfig = [
- branch: "master",
+ branch: "master", // branch=master ?!?
nodes: 1,
name: "kind-ci"
]
@@ -50,7 +50,7 @@
println "Deploying Kind cluster with the following parameters: ${cfg}."
// TODO support different configs
- def data = """
+ def data = '''
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
@@ -74,10 +74,10 @@
hostPort: 30115
- containerPort: 30120
hostPort: 30120
- """
+'''
writeFile(file: 'kind.cfg', text: data)
- // TODO skip cluster creation if cluster is already there
+ // TODO: Skip kind install, make install-kind-command has done it already
sh """
mkdir -p "$WORKSPACE/bin"
@@ -91,6 +91,12 @@
installVoltctl("${cfg.branch}")
sh """
+cat <<EOM
+
+** -----------------------------------------------------------------------
+** Starting kind cluster
+** -----------------------------------------------------------------------
+EOM
# start the kind cluster
kind create cluster --name ${cfg.name} --config kind.cfg
@@ -99,13 +105,36 @@
kubectl taint node "\$MNODE" node-role.kubernetes.io/master:NoSchedule-
done
- mkdir -p $HOME/.volt
- voltctl -s localhost:55555 config > $HOME/.volt/config
+ ## ----------------------------------------------------------------------
+ ## This logic is problematic, when run on a node processing concurrent
+ ## jobs over-write will corrupt config for the other running job.
+ ## ----------------------------------------------------------------------
+ ## Future enhancement: Optimal answer would be to create and use configs
+ ## from a job-specific temp/config directory.
+ ## ----------------------------------------------------------------------
- mkdir -p $HOME/.kube
- kind get kubeconfig --name ${cfg.name} > $HOME/.kube/config
+ umask 022
- # install kail
+ echo
+ echo "** Generate ~/.volt/config"
+ mkdir -p "$HOME/.volt"
+ chmod -R u+w,go-rwx "$HOME/.volt"
+ chmod u=rwx "$HOME/.volt"
+ voltctl -s localhost:55555 config > "$HOME/.volt/config"
+
+ echo
+ echo "** Generate ~/.kube/config"
+ mkdir -p "$HOME/.kube"
+ chmod -R u+w,go-rwx "$HOME/.kube"
+ chmod u=rwx "$HOME/.kube"
+ kind get kubeconfig --name ${cfg.name} > "$HOME/.kube/config"
+
+ echo
+ echo "Display .kube/ and .volt"
+ /bin/ls -l "$HOME/.kube" "$HOME/.volt"
+
+ echo
+ echo "Install Kail"
make -C "$WORKSPACE/voltha-system-tests" KAIL_PATH="$WORKSPACE/bin" kail
"""