update to reference latest images
- updated to latest images based on reorg git repos
- adding a bunch of utility scripts
diff --git a/.gitignore b/.gitignore
index 3f497b2..db581da 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,3 +6,6 @@
install.log
*.swp
*.log
+logger/*
+voltha-debug-dump-minimal*
+voltha-system-tests
diff --git a/minimal-values.yaml b/minimal-values.yaml
index 7d439d3..62e6038 100644
--- a/minimal-values.yaml
+++ b/minimal-values.yaml
@@ -46,21 +46,21 @@
# adapter_open_onu:
# repository: voltha/voltha-openonu-adapter
# tag: 2.1.0
-# adapter_simulated_olt:
-# repository: voltha/voltha-adapter-simulated-olt
-# tag: 2.1.1
-# adapter_simulated_onu:
-# repository: voltha/voltha-adapter-simulated-onu
-# tag: 2.1.1
+ adapter_simulated_olt:
+ repository: voltha/voltha-adapter-simulated-olt
+ tag: master
+ adapter_simulated_onu:
+ repository: voltha/voltha-adapter-simulated-onu
+ tag: master
bbsim:
repository: voltha/voltha-bbsim
tag: voltha-2.1
-# afrouter:
-# repository: voltha/voltha-afrouter
-# tag: 2.1.1
-# afrouterd:
-# repository: voltha/voltha-afrouterd
-# tag: 2.1.1
+ afrouter:
+ repository: voltha/voltha-afrouter
+ tag: master
+ afrouterd:
+ repository: voltha/voltha-afrouterd
+ tag: master
# cli:
# repository: voltha/voltha-cli
# tag: 2.1.1
diff --git a/scripts/e2e-repeat.sh b/scripts/e2e-repeat.sh
new file mode 100755
index 0000000..2cd87b7
--- /dev/null
+++ b/scripts/e2e-repeat.sh
@@ -0,0 +1,151 @@
+#!/bin/bash
+# Copyright 2019 Ciena Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# this script repeatedly invokes the e2e system test on a VOLTHA instance
+
+set -o pipefail
+
+if [ ! -r voltha-system-tests ]; then
+ git clone http://gerrit.opencord.org/voltha-system-tests voltha-system-tests
+fi
+
+delta() {
+ local LEFT=$(echo $1 | numfmt --from=iec)
+ local RIGHT=$(echo $2 | numfmt --from=iec)
+ local V=$(expr $LEFT - $RIGHT)
+ echo ${V#-}
+}
+
+average() {
+ local MIN=0
+ local MAX=0
+ local COUNT=0
+ local SUM=0
+ for V in $*; do
+ COUNT=$(expr $COUNT + 1)
+ SUM=$(expr $SUM + $V)
+ if [ $COUNT -eq 1 ]; then
+ MIN=$V
+ MAX=$V
+ else
+ if [ $V -lt $MIN ]; then
+ MIN=$V
+ elif [ $V -gt $MAX ]; then
+ MAX=$V
+ fi
+ fi
+ done
+ if [ $COUNT -gt 3 ]; then
+ SUM=$(expr $SUM - $MIN - $MAX)
+ COUNT=$(expr $COUNT - 2)
+ fi
+ if [ $COUNT -lt 1 ]; then
+ echo 0
+ else
+ echo $(expr $SUM / $COUNT)
+ fi
+}
+
+export WAIT_FOR_DOWN=y
+export TERM=
+
+COMPLETED=0
+COUNT_OK=0
+COUNT_FAIL=0
+COUNT_SINCE_FAIL=0
+FAILURE_LIST=()
+RSS_DIFFS=()
+KEY_DIFFS=()
+SIZE_DIFFS=()
+LOG=$(mktemp)
+while true; do
+ RUN_TS=$(date -u +%Y-%m-%dT%H:%M:%SZ)
+ echo "START RUN $RUN @ $RUN_TS" | tee -a $LOG
+ DEPLOY_K8S=y WITH_BBSIM=y ./voltha up
+ # because BBSIM needs time
+ sleep 60
+ ETCD=$(kubectl -n voltha get pods | grep etcd-cluster | awk '{print $1}')
+ BEFORE_KEY_COUNT=$(kubectl -n voltha exec -ti $ETCD \
+ -- sh -c 'ETCDCTL_API=3 etcdctl get --command-timeout=60s --from-key=true --keys-only . | sed -e "/^$/d" | wc -l | tr -d "\r\n"')
+ BEFORE_SIZE=$(numfmt --to=iec \
+ $(kubectl -n voltha exec -ti $ETCD \
+ -- sh -c 'ETCDCTL_API=3 etcdctl endpoint status -w json' | tr -d '\r\n' | jq .[].Status.dbSize))
+ BEFORE_RSS=$(ps -eo rss,pid,cmd | grep /usr/local/bin/etcd | grep -v grep | cut -d\ -f1 | numfmt --to=iec)
+ (cd voltha-system-tests; make sanity-kind 2>&1 | tee $LOG)
+ FAIL=$?
+ AFTER_KEY_COUNT=$(kubectl -n voltha exec -ti $ETCD \
+ -- sh -c 'ETCDCTL_API=3 etcdctl get --command-timeout=60s --from-key=true --keys-only . | sed -e "/^$/d" | wc -l | tr -d "\r\n"')
+ AFTER_SIZE=$(numfmt --to=iec \
+ $(kubectl -n voltha exec -ti $ETCD \
+ -- sh -c 'ETCDCTL_API=3 etcdctl endpoint status -w json' | tr -d '\r\n' | jq .[].Status.dbSize))
+ AFTER_RSS=$(ps -eo rss,pid,cmd | grep /usr/local/bin/etcd | grep -v grep | cut -d\ -f1 | numfmt --to=iec)
+ if [ $FAIL -eq 0 ]; then
+ COUNT_OK=$(expr $COUNT_OK + 1)
+ COUNT_SINCE_FAIL=$(expr $COUNT_SINCE_FAIL + 1)
+ helm delete --purge bbsim
+ while [ $(kubectl get --all-namespaces pods,svc 2>&1 | grep -c bbsim) -gt 0 ]; do
+ sleep 3
+ done
+ else
+ COUNT_FAIL=$(expr $COUNT_FAIL + 1)
+ FAILURE_LIST+=($COUNT_SINCE_FAIL)
+ COUNT_SINCE_FAIL=0
+ DUMP_FROM=$RUN_TS ./voltha dump
+ ./voltha down
+ fi
+ echo "END RUN: $RUN @ $(date -u +%Y-%m-%dT%H:%M:%SZ)" | tee -a $LOG
+ COMPLETED=$(expr $COMPLETED + 1)
+ MTBF=0
+ if [ ${#FAILURE_LIST[@]} -gt 0 ]; then
+ for V in ${FAILURE_LIST[@]}; do
+ MTBF=$(expr $MTBF + $V)
+ done
+ MTBF=$(expr $MTBF \/ ${#FAILURE_LIST[@]})
+ fi
+
+ RSS_DIFFS+=($(delta $AFTER_RSS $BEFORE_RSS))
+ KEY_DIFFS+=($(delta $AFTER_KEY_COUNT $BEFORE_KEY_COUNT))
+ SIZE_DIFFS+=($(delta $AFTER_SIZE $BEFORE_SIZE))
+
+ echo "{" | tee -a $LOG
+ echo " NumberOfRuns: $COMPLETED," | tee -a $LOG
+ echo " Success: $COUNT_OK," | tee -a $LOG
+ echo " Failed: $COUNT_FAIL," | tee -a $LOG
+ echo " SinceLastFail: $COUNT_SINCE_FAIL," | tee -a $LOG
+ echo " MTBF: $MTBF," | tee -a $LOG
+ echo " FAILURES: ${FAILURE_LIST[@]}," | tee -a $LOG
+ echo " ETCd: {" | tee -a $LOG
+ echo " KeyCount: {" | tee -a $LOG
+ echo " Before: $BEFORE_KEY_COUNT," | tee -a $LOG
+ echo " After: $AFTER_KEY_COUNT," | tee -a $LOG
+ echo " Average: $(average ${KEY_DIFFS[@]})," | tee -a $LOG
+ echo " }," | tee -a $LOG
+ echo " DbSize: {" | tee -a $LOG
+ echo " Before: $BEFORE_SIZE," | tee -a $LOG
+ echo " After: $AFTER_SIZE," | tee -a $LOG
+ echo " Average: $(numfmt --to=iec $(average ${SIZE_DIFFS[@]}))," | tee -a $LOG
+ echo " }" | tee -a $LOG
+ echo " RSS: {" | tee -a $LOG
+ echo " Before: $BEFORE_RSS," | tee -a $LOG
+ echo " After: $AFTER_RSS," | tee -a $LOG
+ echo " Average: $(numfmt --to=iec $(average ${RSS_DIFFS[@]}))," | tee -a $LOG
+ echo " }" | tee -a $LOG
+ echo " }" | tee -a $LOG
+ echo "}" | tee -a $LOG
+ if [ $FAIL -ne 0 ]; then
+ mkdir -p failures
+ cp $LOG failures/$(date -u +"%Y%m%dT%H%M%SZ")-fail-output.log
+ fi
+done
diff --git a/scripts/etcd-compact.sh b/scripts/etcd-compact.sh
new file mode 100755
index 0000000..0fe4bf2
--- /dev/null
+++ b/scripts/etcd-compact.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# Copyright 2019 Ciena Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script compats the VOLTHA etcd cluster to the last version
+
+VER=$(kubectl -n voltha exec -ti \
+ $(kubectl -n voltha get pods | grep etcd-cluster | awk '{print $1}') -- \
+ sh -c 'ETCDCTL_API=3 etcdctl endpoint status -w json' | tr -d '\r\n' | jq .[].Status.header.revision)
+kubectl -n voltha exec -ti $(kubectl -n voltha get pods | grep etcd-cluster | awk '{print $1}') -- \
+ sh -c "ETCDCTL_API=3 etcdctl compact $VER"
diff --git a/scripts/etcd-db-size.sh b/scripts/etcd-db-size.sh
new file mode 100755
index 0000000..2c16966
--- /dev/null
+++ b/scripts/etcd-db-size.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# Copyright 2019 Ciena Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script calculates the SIZE of the ETCD database
+
+set -o pipefail
+
+ETCD=$(kubectl -n voltha get pods 2>&1 | grep etcd-cluster | awk '{print $1}')
+if [ -z "$ETCD" ]; then
+ echo "N/A"
+else
+ VALUE=$(kubectl -n voltha exec -ti $ETCD -- sh -c 'ETCDCTL_API=3 etcdctl --command-timeout=10s endpoint status -w json' 2>/dev/null | tr -d '\r\n' | jq .[].Status.dbSize 2>/dev/null)
+ if [ -z "$VALUE" ]; then
+ echo "N/A"
+ else
+ numfmt --to=iec $VALUE
+ fi
+fi
diff --git a/scripts/etcd-defrag.sh b/scripts/etcd-defrag.sh
new file mode 100755
index 0000000..742c302
--- /dev/null
+++ b/scripts/etcd-defrag.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+# Copyright 2019 Ciena Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script defragments the ETCD database to claim back space
+
+kubectl -n voltha exec -ti $(kubectl -n voltha get pods | grep etcd-cluster | awk '{print $1}') -- sh -c 'ETCDCTL_API=3 etcdctl defrag'
diff --git a/scripts/log-collector.sh b/scripts/log-collector.sh
new file mode 100755
index 0000000..b99d7d9
--- /dev/null
+++ b/scripts/log-collector.sh
@@ -0,0 +1,77 @@
+#!/bin/bash
+# Copyright 2019 Ciena Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script periodically collects the POD logs and puts them bin a "RAW"
+# location to be procssed into rolling log files
+
+RAW_LOG_DIR=${RAW_LOG_DIR:-./logger/raw}
+PERIOD=${PERIOOD:-15}
+
+# === END OF CONFIGURATION ===
+set -o pipefail
+
+# Ensure raw log area exists
+mkdir -p $RAW_LOG_DIR
+SINCE=
+
+# forever
+while true; do
+ TS=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
+ SINCE_FLAG=
+ SINCE_MSG=
+
+ # On iteraton 2+ we use the --since-time option to minimize the size of
+ # the logs collected as well as minimize the overlap with previous
+ # collection iteration
+ if [ ! -z "$SINCE" ]; then
+ SINCE_FLAG="--since-time=$SINCE"
+ SINCE_MSG="since $SINCE "
+ fi
+
+ # Build up the logs in a temp directory and then move that to the
+ # RAW area when complete
+ WORK=$(mktemp -d)
+
+ # All VOLTHA PODS + ONOS
+ PODS="$(kubectl -n default get pod -o name | grep onos | sed -e 's/^/default:/g') $(kubectl get -n voltha pod -o name | sed -e 's/^/voltha:/g')"
+ if [ $? -ne 0 ]; then
+ echo "Failed to get PODs from Kubernetes, will retry after sleep ..."
+ else
+ echo "Dumping POD logs at $TS $SINCE_MSG..."
+ for POD in $PODS; do
+ NS=$(echo $POD | cut -d: -f1)
+ POD=$(echo $POD | cut -d: -f2 | sed -e 's/^pod\///g')
+ echo " $POD"
+ kubectl logs --timestamps=true $SINCE_FLAG -n $NS --all-containers $LOG_ARGS $POD 2>&1 > $WORK/$POD.log
+ if [ $? -ne 0 ]; then
+ echo " ERROR: Encountered while getting POD log, removing failed entry"
+ rm -f $WORK/$POD.log
+ fi
+ done
+ if [ $(ls -1 $WORK/ | wc -l) -eq 0 ]; then
+ # Work directory is empty, no need to move it to raw area, just
+ # remove it
+ rm -rf $WORK
+ else
+ mv $WORK $RAW_LOG_DIR/$TS
+ fi
+ fi
+
+ # End iteration and sleep until next iteration
+ echo "====="
+ SINCE=$TS
+ echo "Sleep for $PERIOD seconds ..."
+ sleep $PERIOD
+done
diff --git a/scripts/log-combine.sh b/scripts/log-combine.sh
new file mode 100755
index 0000000..9f6f54a
--- /dev/null
+++ b/scripts/log-combine.sh
@@ -0,0 +1,128 @@
+#!/bin/sh
+# Copyright 2019 Ciena Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script reads RAW log data collected from the PODS and combines
+# this RAW data into a rolloing log file.
+
+RAW_LOG_DIR=${RAW_LOG_DIR:-./logger/raw}
+COMBINED_LOG_DIR=${COMBINED_LOG_DIR:-./logger/combined}
+ROLL_AT=${ROLL_AT:-100M}
+MAX_LOG_COUNT=${MAX_LOG_COUNT:-5}
+PERIOD=${PERIOD:-60}
+
+# === END OF CONFIGURATION ===
+
+# Convert the ROLL_AT value to byes
+ROLL_AT=$(numfmt --from=iec $ROLL_AT)
+
+# Ensure the combined directory exists
+mkdir -p $COMBINED_LOG_DIR
+
+# Get a working area
+WORK=$(mktemp)
+
+# forever ...
+while true; do
+
+ # Iterate over all existing raw entries
+ for MERGE_DIR in $(ls $RAW_LOG_DIR); do
+ echo "Merging from $RAW_LOG_DIR/$MERGE_DIR ..."
+
+ # Iterate over each file in the RAW data directory
+ for FILE_PATH in $(ls $RAW_LOG_DIR/$MERGE_DIR/*.log); do
+
+ # Get the base of the log file
+ FILE=$(basename $FILE_PATH)
+
+ # Find destination log file with largest index, if none
+ # exists this will end up with IDX == 1
+ IDX=2
+ while [ -f $COMBINED_LOG_DIR/$FILE.$(printf "%04d" $IDX) ]; do
+ IDX=$(expr $IDX + 1)
+ done
+ IDX=$(expr $IDX - 1)
+
+ # Get the NAME of the log file to write
+ NAME=$COMBINED_LOG_DIR/$FILE.$(printf "%04d" $IDX)
+
+ # different behavior if the file exists or not
+ if [ -f $NAME ]; then
+
+ # if the file exists, check the size of the file and see
+ # if we need to move to the next index
+ SIZE=$(stat -c %s $NAME)
+ if [ $SIZE -gt $ROLL_AT ]; then
+
+ # Combine the exists log file with the new data, this will
+ # have double entires for the overlap, then run it through
+ # uniq -D | sort -u to only end up with the overlap entries
+ cat $NAME $FILE_PATH | sort | uniq -D | sort -u > $WORK
+
+ # time to move
+ IDX=$(expr $IDX + 1)
+
+ # If the nex IDX is great than the max log file count thebn
+ # we shift each log file to index - 1, losing the first (.1)
+ # forever ...
+ if [ $IDX -gt $MAX_LOG_COUNT ]; then
+ echo " Shifting log files for $FILE ..."
+ I=1
+ while [ $I -lt $MAX_LOG_COUNT ]; do
+ rm -f $COMBINED_LOG_DIR/$FILE.$(printf "%04d" $I)
+ mv $COMBINED_LOG_DIR/$FILE.$(printf "%04d" $(expr $I + 1)) $COMBINED_LOG_DIR/$FILE.$(printf "%04d" $I)
+ I=$(expr $I + 1)
+ done
+
+ # Reset the IDX to the MAX
+ IDX=$MAX_LOG_COUNT
+ fi
+
+ #
+ NAME=$COMBINED_LOG_DIR/$FILE.$(printf "%04d" $IDX)
+ echo " Creating new log file $NAME ..."
+
+ # Combine the list of overlap entries (WORK), i.e. the ones
+ # that need to be removed from the new data set with the
+ # new data set so that there will be 2 of each of the overlaps.
+ # then pipe that to uniq -u to return only the uniq entries.
+ # this has the affect of removing the overlap from the new
+ # data set
+ cat $WORK $FILE_PATH | sort | uniq -u > $NAME
+ chmod 644 $NAME
+ else
+ # Not rolling so a simple combining of the new data set
+ # with the existing file and sorting to only unique entries
+ # will do
+ cat $NAME $FILE_PATH | sort -u > $WORK
+ rm -f $NAME
+ mv $WORK $NAME
+ chmod 644 $NAME
+ fi
+ else
+ # The destination log file does not exist, so just sort the
+ # the RAW data into the file. This really only happens on the
+ # first iteration
+ sort -u $FILE_PATH > $NAME
+ chmod 644 $NAME
+ fi
+ done
+
+ # Remove the RAW data directory so we don't try to merge it again
+ rm -rf $RAW_LOG_DIR/$MERGE_DIR
+ done
+ echo "====="
+ echo "Sleep for $PERIOD seconds ..."
+ sleep $PERIOD
+done
diff --git a/scripts/monitor.sh b/scripts/monitor.sh
new file mode 100755
index 0000000..84a8cae
--- /dev/null
+++ b/scripts/monitor.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# Copyright 2019 Ciena Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script sets up a watch with information that is valuable when
+# developing voltha with k8s
+
+watch '$HOME/kind-voltha/bin/kubectl get --all-namespaces pods,svc && echo "" \
+ && kubectl describe --all-namespaces pods | grep Image: | grep voltha | sed -e "s/^ *//g" -e "s/: */: /g" && echo "" \
+ && echo "DB SIZE: $(./scripts/etcd-db-size.sh)" && echo "" \
+ && echo "RSS SIZE: $(ps -eo rss,pid,cmd | grep /usr/local/bin/etcd | grep -v grep | cut -d\ -f1 | numfmt --to=iec)"'
diff --git a/voltha b/voltha
index 7a309fe..dd36731 100755
--- a/voltha
+++ b/voltha
@@ -599,6 +599,10 @@
espin - $VERIFIED
bspin - "Dumping VOLTHA POD details"
PODS="$(kubectl -n default get pod -o name | grep onos | sed -e 's/^/default:/g') $(kubectl get -n voltha pod -o name | sed -e 's/^/voltha:/g')"
+ SINCE=
+ if [ ! -z "$DUMP_FROM" ]; then
+ SINCE="--since-time=$DUMP_FROM"
+ fi
for POD in $PODS; do
NS=$(echo $POD | cut -d: -f1)
POD=$(echo $POD | cut -d: -f2)
@@ -606,9 +610,9 @@
mkdir -p $DATA/$POD
(set -x; kubectl describe -n $NS $POD >> $DATA/$POD/describe.txt 2>&1) >>$LOG 2>&1
sspin - "Dumping VOLTHA POD details: $POD"
- (set -x; kubectl logs -n $NS --all-containers --previous $LOG_ARGS $POD >> $DATA/$POD/logs-previous.txt 2>&1) >>$LOG 2>&1
+ (set -x; kubectl logs -n $NS --all-containers $SINCE --previous $LOG_ARGS $POD >> $DATA/$POD/logs-previous.txt 2>&1) >>$LOG 2>&1
sspin - "Dumping VOLTHA POD details: $POD"
- (set -x; kubectl logs -n $NS --all-containers $LOG_ARGS $POD >> $DATA/$POD/logs-current.txt 2>&1) >>$LOG 2>&1
+ (set -x; kubectl logs -n $NS --all-containers $SINCE $LOG_ARGS $POD >> $DATA/$POD/logs-current.txt 2>&1) >>$LOG 2>&1
sspin - "Dumping VOLTHA POD details: $POD"
done
espin - "$VERIFIED Dumping VOLTHA POD details$CEOL"