Merge "Add ntt-workflow-driver and voltha-epononu-adapter"
diff --git a/jjb/defaults.yaml b/jjb/defaults.yaml
index 4956258..77b33a3 100644
--- a/jjb/defaults.yaml
+++ b/jjb/defaults.yaml
@@ -35,7 +35,7 @@
artifact-num-to-keep: 30
# How long to keep builds and artifacts for jobs that generate a lot of artifacts
- big-build-days-to-keep: 5
+ big-build-days-to-keep: 10
big-artifact-num-to-keep: 5
# list of artifacts to archive
diff --git a/jjb/pipeline/voltha-scale-multi-stack.groovy b/jjb/pipeline/voltha-scale-multi-stack.groovy
index 1bc7b5b..d8d9be9 100644
--- a/jjb/pipeline/voltha-scale-multi-stack.groovy
+++ b/jjb/pipeline/voltha-scale-multi-stack.groovy
@@ -93,10 +93,14 @@
kill -9 \$P_ID
fi
- for hchart in \$(helm list -q | grep -E -v 'docker-registry|kafkacat');
+ NAMESPACES="voltha1 voltha2 infra default"
+ for NS in \$NAMESPACES
do
- echo "Purging chart: \${hchart}"
- helm delete "\${hchart}"
+ for hchart in \$(helm list -n \$NS -q | grep -E -v 'docker-registry|kafkacat');
+ do
+ echo "Purging chart: \${hchart}"
+ helm delete -n \$NS "\${hchart}"
+ done
done
bash /home/cord/voltha-scale/wait_for_pods.sh
@@ -198,6 +202,9 @@
sh returnStdout: false, script: '''
cd $WORKSPACE/kind-voltha/
+ ETCD_CHART=$HOME/teone/helm-charts/etcd
+ KAFKA_CHART=$HOME/teone/helm-charts/kafka
+
# KAFKA config
NUM_OF_KAFKA=${kafkaReplicas}
EXTRA_HELM_FLAGS+=' --set prometheus.kafka.enabled=true,prometheus.operator.enabled=true,prometheus.jmx.enabled=true,prometheus.operator.serviceMonitor.namespace=default '
@@ -256,9 +263,7 @@
kubectl -n \$INFRA_NS exec \$INSTANCE -- bash /root/onos/${karafHome}/bin/client log:set ${logLevel} org.onosproject
kubectl -n \$INFRA_NS exec \$INSTANCE -- bash /root/onos/${karafHome}/bin/client log:set ${logLevel} org.opencord
- kubectl -n \$INFRA_NS exec \$INSTANCE -- bash /root/onos/${karafHome}/bin/client log:set DEBUG org.opencord.olt
-
- kubectl -n \$INFRA_NS exec \$INSTANCE -- bash /root/onos/${karafHome}/bin/client log:set TRACE org.onosproject.net.meter.impl
+ kubectl -n \$INFRA_NS exec \$INSTANCE -- bash /root/onos/${karafHome}/bin/client log:set DEBUG org.opencord.dhcpl2relay
done
# Set Flows/Ports/Meters poll frequency
@@ -451,18 +456,18 @@
])
step([$class: 'RobotPublisher',
disableArchiveOutput: false,
- logFileName: 'RobotLogs/log.html',
+ logFileName: 'RobotLogs/**/log.html',
otherFiles: '',
- outputFileName: 'RobotLogs/output.xml',
+ outputFileName: 'RobotLogs/**/output.xml',
outputPath: '.',
passThreshold: 100,
- reportFileName: 'RobotLogs/report.html',
+ reportFileName: 'RobotLogs/**/report.html',
unstableThreshold: 0]);
// get all the logs from kubernetes PODs
sh returnStdout: false, script: '''
# store information on running charts
- helm ls > $LOG_FOLDER/helm-list.txt || true
+ helm ls --all-namespaces > $LOG_FOLDER/helm-list.txt || true
# store information on the running pods
kubectl get pods -o wide > $LOG_FOLDER/pods.txt || true
@@ -470,23 +475,27 @@
kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
# copy the ONOS logs directly from the container to avoid the color codes
- printf '%s\n' $(kubectl get pods -l app=onos-onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I# bash -c "kubectl cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
+ printf '%s\n' $(kubectl -n \$INFRA_NS get pods -l app=onos-onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I# bash -c "kubectl -n \$INFRA_NS cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
# get radius logs out of the container
- kubectl cp $(kubectl get pods -l app=radius --no-headers | awk '{print $1}'):/var/log/freeradius/radius.log $LOG_FOLDER//radius.log || true
+ kubectl -n \$INFRA_NS cp $(kubectl -n \$INFRA_NS get pods -l app=radius --no-headers | awk '{print $1}'):/var/log/freeradius/radius.log $LOG_FOLDER//radius.log || true
'''
// dump all the BBSim(s) ONU information
- sh '''
- # TODO parametrize for stacks
- BBSIM_IDS=$(kubectl get pods | grep bbsim | grep -v server | awk '{print $1}')
- IDS=($BBSIM_IDS)
+ script {
+ for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
+ stack_ns="voltha"+i
+ sh """
+ BBSIM_IDS=\$(kubectl -n ${stack_ns} get pods | grep bbsim | grep -v server | awk '{print \$1}')
+ IDS=(\$BBSIM_IDS)
- for bbsim in "${IDS[@]}"
- do
- kubectl exec -t $bbsim -- bbsimctl onu list > $LOG_FOLDER/$bbsim-device-list.txt || true
- kubectl exec -t $bbsim -- bbsimctl service list > $LOG_FOLDER/$bbsim-service-list.txt || true
- done
- '''
+ for bbsim in "\${IDS[@]}"
+ do
+ kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl onu list > $LOG_FOLDER/$bbsim-device-list.txt || true
+ kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl service list > $LOG_FOLDER/$bbsim-service-list.txt || true
+ done
+ """
+ }
+ }
// get ONOS debug infos
sh '''
@@ -530,24 +539,29 @@
'''
// get VOLTHA debug infos
script {
- try {
- sh '''
- # TODO parametrize for multiple stacks
- voltctl -m 8MB device list -o json > $LOG_FOLDER/device-list.json || true
- python -m json.tool $LOG_FOLDER/device-list.json > $LOG_FOLDER/voltha-devices-list.json || true
- rm $LOG_FOLDER/device-list.json || true
- voltctl -m 8MB device list > $LOG_FOLDER/voltha-devices-list.txt || true
+ for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
+ stack_ns="voltha"+i
+ voltcfg="~/.volt/config-voltha"+i
+ println stack_ns
+ try {
+ sh """
+ voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device list -o json > $LOG_FOLDER/device-list.json || true
+ python -m json.tool $LOG_FOLDER/device-list.json > $LOG_FOLDER/voltha-devices-list.json || true
+ rm $LOG_FOLDER/device-list.json || true
+ voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device list > $LOG_FOLDER/voltha-devices-list.txt || true
- printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs -I# bash -c "voltctl -m 8MB device flows # > $LOG_FOLDER/voltha-device-flows-#.txt" || true
- printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs -I# bash -c "voltctl -m 8MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/voltha-device-ports-#.txt" || true
+ DEVICE_LIST=
+ printf '%s\n' \$(voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device list | grep olt | awk '{print \$1}') | xargs -I# bash -c "voltctl -c $HOME/.volt/config-${stack_ns}-m 8MB device flows # > $LOG_FOLDER/voltha-device-flows-#.txt" || true
+ printf '%s\n' \$(voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device list | grep olt | awk '{print \$1}') | xargs -I# bash -c "voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/voltha-device-ports-#.txt" || true
- printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs -I# bash -c "voltctl -m 8MB logicaldevice flows # > $LOG_FOLDER/voltha-logicaldevice-flows-#.txt" || true
- printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs -I# bash -c "voltctl -m 8MB logicaldevice port list # > $LOG_FOLDER/voltha-logicaldevice-ports-#.txt" || true
- '''
- } catch(e) {
- sh '''
- echo "Can't get device list from voltclt"
- '''
+ printf '%s\n' \$(voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB logicaldevice list -q) | xargs -I# bash -c "voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB logicaldevice flows # > $LOG_FOLDER/voltha-logicaldevice-flows-#.txt" || true
+ printf '%s\n' \$(voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB logicaldevice list -q) | xargs -I# bash -c "voltctl -c $HOME/.volt/config-${stack_ns} -m 8MB logicaldevice port list # > $LOG_FOLDER/voltha-logicaldevice-ports-#.txt" || true
+ """
+ } catch(e) {
+ sh '''
+ echo "Can't get device list from voltclt"
+ '''
+ }
}
}
// get cpu usage by container
diff --git a/jjb/pipeline/voltha-scale-test.groovy b/jjb/pipeline/voltha-scale-test.groovy
index b83b3b3..ac6af3f 100644
--- a/jjb/pipeline/voltha-scale-test.groovy
+++ b/jjb/pipeline/voltha-scale-test.groovy
@@ -91,15 +91,23 @@
kill -9 \$P_ID
fi
- for hchart in \$(helm list -q | grep -E -v 'docker-registry|kafkacat');
+ NAMESPACES="voltha1 voltha2 infra default"
+ for NS in \$NAMESPACES
do
- echo "Purging chart: \${hchart}"
- helm delete "\${hchart}"
+ for hchart in \$(helm list -n \$NS -q | grep -E -v 'docker-registry|kafkacat');
+ do
+ echo "Purging chart: \${hchart}"
+ helm delete -n \$NS "\${hchart}"
+ done
done
+
bash /home/cord/voltha-scale/wait_for_pods.sh
test -e $WORKSPACE/kind-voltha/voltha && cd $WORKSPACE/kind-voltha && ./voltha down
+ # remove orphaned port-forward from different namespaces
+ ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs kill -9
+
cd $WORKSPACE
rm -rf $WORKSPACE/*
"""
@@ -177,8 +185,7 @@
// includes monitoring, kafka, etcd
steps {
sh '''
- helm repo add bitnami https://charts.bitnami.com/bitnami
- helm install kafka bitnami/kafka --set replicaCount=${kafkaReplicas} --set persistence.enabled=false \
+ helm install kafka $HOME/teone/helm-charts/kafka --set replicaCount=${kafkaReplicas} --set persistence.enabled=false \
--set zookeeper.replicaCount=${kafkaReplicas} --set zookeeper.persistence.enabled=false \
--set prometheus.kafka.enabled=true,prometheus.operator.enabled=true,prometheus.jmx.enabled=true,prometheus.operator.serviceMonitor.namespace=default
@@ -186,7 +193,7 @@
ETCD_FLAGS=$(echo ${extraHelmFlags} | sed -e 's/--set auth=false / /g') | sed -e 's/--set auth=true / /g'
ETCD_FLAGS+=" --set auth.rbac.enabled=false,persistence.enabled=false,statefulset.replicaCount=${etcdReplicas}"
ETCD_FLAGS+=" --set memoryMode=${inMemoryEtcdStorage} "
- helm install -f $WORKSPACE/kind-voltha/values.yaml --set replicas=${etcdReplicas} etcd bitnami/etcd $ETCD_FLAGS
+ helm install -f $WORKSPACE/kind-voltha/values.yaml --set replicas=${etcdReplicas} etcd $HOME/teone/helm-charts/etcd $ETCD_FLAGS
if [ ${withMonitoring} = true ] ; then
helm install nem-monitoring cord/nem-monitoring \