blob: 2a54df54c5073d212f6ac67b3655255173863272 [file] [log] [blame]
#!/bin/bash
# Copyright 2019 Ciena Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -f
unalias -a
shopt -s extglob
TOTAL_START_TIME="$(date +%s)"
mkdir -p "$(pwd)/bin"
PATH="$(pwd)/bin:$PATH"; export PATH
GOPATH="$(pwd)"; export GOPATH
FANCY=${FANCY:-1}
if [ "$TERM X" == " X" ]; then
FANCY=0
fi
# trap ctrl-c and call ctrl_c()
trap ctrl_c INT
function ctrl_c() {
echo -en "$CNORM"
echo ""
echo "ctrl-c trapped"
echo "Thank you for trying 'voltha up'"
exit
}
HELM_VERSION=${HELM_VERSION:-v3.2.4}
VOLTCTL_VERSION=${VOLTCTL_VERSION:-latest}
KIND_VERSION=${KIND_VERSION:-v0.8.1}
VK_RELEASE=${VK_RELEASE:-master}
EXTRA_TOOLS=
RED=
GREEN=
YELLOW=
BLUE=
BOLD=
NORMAL=
ERROR=
CEOL=
CNORM=
CIVIS=
if [ "$FANCY" -eq 1 ]; then
RED="$(tput setaf 1)"
GREEN="$(tput setaf 2)"
YELLOW="$(tput setaf 3)"
BLUE="$(tput setaf 4)"
BOLD="$(tput bold)"
NORMAL="$(tput sgr0)"
ERROR="\xe2\x9c\x97\x20"
CEOL="$(tput el)"
CNORM="$(tput cnorm)"
CIVIS="$(tput civis)"
fi
NAME=${NAME:-minimal}
ENABLE_ONOS_EXTRANEOUS_RULES=${ENABLE_ONOS_EXTRANEOUS_RULES:-no}
INFRA_NS=${INFRA_NS:-default}
VOLTHA_NS=${VOLTHA_NS:-voltha}
BBSIM_NS=${BBSIM_NS:-voltha}
ADAPTER_NS=${ADAPTER_NS:-voltha}
WITH_TIMINGS=${WITH_TIMINGS:-no}
WITH_BBSIM=${WITH_BBSIM:-no}
WITH_EFK=${WITH_EFK:-no}
WITH_TRACING=${WITH_TRACING:-no}
WITH_RADIUS=${WITH_RADIUS:-no}
WITH_EAPOL=${WITH_EAPOL:-yes}
WITH_DHCP=${WITH_DHCP:-yes}
WITH_IGMP=${WITH_IGMP:-no}
WITH_ONOS=${WITH_ONOS:-yes}
NUM_OF_ONOS=${NUM_OF_ONOS:-1}
NUM_OF_ATOMIX=${NUM_OF_ATOMIX:-0}
WITH_CHAOS=${WITH_CHAOS:-no}
WITH_ADAPTERS=${WITH_ADAPTERS:-yes}
WITH_SIM_ADAPTERS=${WITH_SIM_ADAPTERS:-no}
WITH_OPEN_ADAPTERS=${WITH_OPEN_ADAPTERS:-yes}
WITH_PORT_FORWARDS=${WITH_PORT_FORWARDS:-yes}
WITH_KAFKA=${WITH_KAFKA:-yes}
WITH_ETCD=${WITH_ETCD:-yes}
WITH_PPROF=${WITH_PPROF:-no}
WITH_INCREMENTAL_EVTO_UPDATE=${WITH_INCREMENTAL_EVTO_UPDATE:-no}
SCHEDULE_ON_CONTROL_NODES=${SCHEDULE_ON_CONTROL_NODES:-no}
CONFIG_SADIS=${CONFIG_SADIS:-no} # yes | no | file | bbsim | external | URL
SADIS_CFG=${SADIS_CFG:-onos-files/onos-sadis-sample.json}
BBSIM_CFG=${BBSIM_CFG:-configs/bbsim-sadis-att.yaml}
INSTALL_ONOS_APPS=${INSTALL_ONOS_APPS:-no}
JUST_K8S=${JUST_K8S:-no}
JUST_INFRA=${JUST_INFRA:-no}
DEPLOY_K8S=${DEPLOY_K8S:-yes}
INSTALL_KUBECTL=${INSTALL_KUBECTL:-yes}
INSTALL_HELM=${INSTALL_HELM:-yes}
HELM_USE_UPGRADE=${HELM_USE_UPGRADE:-no}
UPDATE_HELM_REPOS=${UPDATE_HELM_REPOS:-yes}
WAIT_ON_DOWN=${WAIT_ON_DOWN:-yes}
WAIT_TIMEOUT=${WAIT_TIMEOUT:-30m}
VOLTHA_DOWN_ON_TIMEOUT=${VOLTHA_DOWN_ON_TIMEOUT:-no}
VOLTHA_LOG_LEVEL=${VOLTHA_LOG_LEVEL:-WARN}
VOLTHA_CHART=${VOLTHA_CHART:-onf/voltha}
VOLTHA_CHART_VERSION=${VOLTHA_CHART_VERSION:-latest}
VOLTHA_BBSIM_CHART=${VOLTHA_BBSIM_CHART:-onf/bbsim}
VOLTHA_BBSIM_CHART_VERSION=${VOLTHA_BBSIM_CHART_VERSION:-latest}
ELASTICSEARCH_CHART=${ELASTICSEARCH_CHART:-elastic/elasticsearch}
ELASTICSEARCH_CHART_VERSION=${ELASTICSEARCH_CHART_VERSION:-latest}
KIBANA_CHART=${KIBANA_CHART:-elastic/kibana}
KIBANA_CHART_VERSION=${KIBANA_CHART_VERSION:-latest}
FLUENTD_ELASTICSEARCH_CHART=${FLUENTD_ELASTICSEARCH_CHART:-kiwigrid/fluentd-elasticsearch}
FLUENTD_ELASTICSEARCH_CHART_VERSION=${FLUENTD_ELASTICSEARCH_CHART_VERSION:-latest}
VOLTHA_TRACING_CHART=${VOLTHA_TRACING_CHART:-onf/voltha-tracing}
VOLTHA_TRACING_CHART_VERSION=${VOLTHA_TRACING_CHART_VERSION:-latest}
VOLTHA_ADAPTER_SIM_CHART=${VOLTHA_ADAPTER_SIM_CHART:-onf/voltha-adapter-simulated}
VOLTHA_ADAPTER_SIM_CHART_VERSION=${VOLTHA_ADAPTER_SIM_CHART_VERSION:-latest}
VOLTHA_ADAPTER_OPEN_OLT_CHART=${VOLTHA_ADAPTER_OPEN_OLT_CHART:-onf/voltha-adapter-openolt}
VOLTHA_ADAPTER_OPEN_OLT_CHART_VERSION=${VOLTHA_ADAPTER_OPEN_OLT_CHART_VERSION:-latest}
VOLTHA_ADAPTER_OPEN_ONU_CHART=${VOLTHA_ADAPTER_OPEN_ONU_CHART:-onf/voltha-adapter-openonu}
VOLTHA_ADAPTER_OPEN_ONU_CHART_VERSION=${VOLTHA_ADAPTER_OPEN_ONU_CHART_VERSION:-latest}
ONOS_CHART=${ONOS_CHART:-onf/onos}
ONOS_CHART_VERSION=${ONOS_CHART_VERSION:-latest}
ONOS_CLASSIC_CHART=${ONOS_CLASSIC_CHART:-onos/onos-classic}
ONOS_CLASSIC_CHART_VERSION=${ONOS_CLASSIC_CHART_VERSION:-latest}
KAFKA_CHART=${KAFKA_CHART:-bitnami/kafka}
KAFKA_CHART_VERSION=${KAFKA_CHART_VERSION:=latest}
BBSIM_SADIS_SERVER_CHART=${BBSIM_SADIS_SERVER_CHART:-onf/bbsim-sadis-server}
BBSIM_SADIS_SERVER_CHART_VERSION=${BBSIM_SADIS_SERVER_CHART_VERSION:-latest}
ETCD_CHART=${ETCD_CHART:-bitnami/etcd}
ETCD_CHART_VERSION=${ETCD_CHART_VERSION:-latest}
RADIUS_CHART=${RADIUS_CHART:-onf/freeradius}
RADIUS_CHART_VERSION=${RADIUS_CHART_VERSION:-latest}
EXTRA_HELM_INSTALL_ARGS=${EXTRA_HELM_INSTALL_ARGS:-}
INTERNAL_EXTRA_HELM_INSTALL_ARGS=
NUM_OF_BBSIM=${NUM_OF_BBSIM:-1}
BBSIM_BASE_INDEX=${BBSIM_BASE_INDEX:-1}
NUM_OF_WORKER_NODES=${NUM_OF_WORKER_NODES:-2}
NUM_OF_CONTROLLER_NODES=${NUM_OF_CONTROLLER_NODES:-1}
NUM_OF_OPENONU=${NUM_OF_OPENONU:-1}
NUM_OF_KAFKA=${NUM_OF_KAFKA:-1}
NUM_OF_ETCD=${NUM_OF_ETCD:-1}
MAX_NUM_OF_BBSIM=10
MAX_NUM_OF_OPENONU=10
LEGACY_BBSIM_INDEX=${LEGACY_BBSIM_INDEX:-no}
PF_ADDRESS=${PF_ADDRESS:-0.0.0.0}
KIND_CFG_FILE=${KIND_CFG_FILE:-}
HOSTOS="$(uname -s | tr "[:upper:]" "[:lower:"])"
HOSTARCH="$(uname -m | tr "[:upper:]" "[:lower:"])"
if [ "$HOSTARCH" == "x86_64" ]; then
HOSTARCH="amd64"
fi
BBSIM_LABEL="-l app=bbsim"
NO_LABEL=
# checks to see if a given WORD is in the given LIST of words
function is_in() {
local WORD LIST
WORD="$1"; shift
LIST="$*"
LIST=${LIST//+([[:space:],|])/:}
[ "$(echo ":$LIST:" | grep -ic ":$WORD:")" -ne 0 ]
}
function parseDuration() {
local DUR RESULT TERM TERMS VALUE UNIT
DUR=$1
RESULT=0
TERMS="$(echo "$DUR" | sed -Ee 's/([sSmMhHdD])/\1 /g' -e 's/,$//g')"
for TERM in $TERMS; do
VALUE="$(echo "$TERM" | sed -Ee 's/([0-9]+)[sSmMhHdD]/\1/')"
UNIT="$(echo "$TERM" | sed -Ee 's/[0-9]+([sSmMhHdD])/\1/')"
case $UNIT in
s|S)
RESULT=$((RESULT + VALUE)) ;;
m|M)
RESULT=$((RESULT + (VALUE * 60))) ;;
h|H)
RESULT=$((RESULT + (VALUE * 3600))) ;;
d|D)
RESULT=$((RESULT + (VALUE * 86400))) ;;
*) ;;
esac
done
echo $RESULT
}
function doTimeout() {
local MSG=$1
2>&1 echo -e "\n${RED}${BOLD}${ERROR}TIMEOUT:${NORMAL}${RED} Operation timed out after '$WAIT_TIMEOUT': $MSG"
exit 125
}
function get_service_ep() {
local NS=$1
local NAME=$2
kubectl -n "$NS" get service "$NAME" -o json | jq -r '.spec.clusterIP + ":" + (.spec.ports[0].port|tostring)'
}
# returns the greater version (0 if equal)
# inspired by https://stackoverflow.com/questions/4023830/how-to-compare-two-strings-in-dot-separated-version-format-in-bash
function semvercompare() {
if [[ "$1" == "$2" ]]
then
echo 0
return
fi
local tmp1 tmp2
tmp1=$(echo "${1//v/}" | awk -F'-' '{print $1}')
tmp2=$(echo "${2//v/}" | awk -F'-' '{print $1}')
local IFS=.
# shellcheck disable=SC2206
local i ver1=($tmp1) ver2=($tmp2)
# fill empty fields in ver1 with zeros
for ((i=${#ver1[@]}; i<${#ver2[@]}; i++))
do
ver1[i]=0
done
for ((i=0; i<${#ver1[@]}; i++))
do
if [[ -z ${ver2[i]} ]]
then
# fill empty fields in ver2 with zeros
ver2[i]=0
fi
if ((10#${ver1[i]} > 10#${ver2[i]}))
then
echo 1
return
fi
if ((10#${ver1[i]} < 10#${ver2[i]}))
then
echo 2
return
fi
done
echo 0
return
}
# returns true if v1 is greater than v2 (false if they are the same)
function semver_greater() {
local v1
local v2
v1="$1" v2="$2"
# shellcheck disable=SC2128
if [[ ! "$v1" =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then
# if the version is custom, we assume it's newer than master
echo "true"
return
fi
# shellcheck disable=SC2128
res=$(semvercompare "$v1" "$v2")
if [[ $res == 1 ]]; then
echo "true"
else
echo "false"
fi
}
# returns true if v1 is lesser than v2 (false if they are the same)
function semver_lesser() {
local v1
local v2
v1="$1" v2="$2"
# shellcheck disable=SC2128
if [[ ! "$v1" =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then
# if the version is custom, we assume it's newer than master
echo "false"
return
fi
# shellcheck disable=SC2128
res=$(semvercompare "$v1" "$v2")
if [[ $res == 2 ]]; then
echo "true"
else
echo "false"
fi
}
# Used to verify configuration values are set to "yes" or "no" value or convert
# equivalents to "yes" or "no"
YES="y,yes,t,true,1"
NO="n,no,f,false,0"
YES_OR_NO="$YES,$NO"
# If the given VAR matches a truth value then normalize that
# value, else return original value
function normalize_yes_no() {
local VAR VAL
VAR=$1
VAL="$(eval echo "\$$VAR")"
if ! is_in "$VAL" "$YES_OR_NO"; then
echo "$VAL"
return 1
fi
if is_in "$VAL" "$YES"; then
echo "yes"
else
echo "no"
fi
return 0
}
# If the given VAR matches a truth value then normalize that
# value, else display an error
function verify_yes_no() {
local VAR VAL
VAR=$1
VAL="$(eval echo "\$$VAR")"
if ! is_in "$VAL" "$YES_OR_NO"; then
>&2 echo -e "${RED}${BOLD}${ERROR}ERROR:${NORMAL}${RED} Invalid '$VAR' value of '$VAL'. Should be 'yes' or 'no'${NORMAL}"
echo "INVALID"
return 1
fi
if is_in "$VAL" "$YES"; then
echo "yes"
else
echo "no"
fi
return 0
}
ALL_YES_NO="\
ENABLE_ONOS_EXTRANEOUS_RULES \
WITH_TIMINGS \
WITH_BBSIM \
WITH_EFK \
WITH_TRACING \
WITH_EAPOL \
WITH_DHCP \
WITH_IGMP \
WITH_CHAOS \
WITH_ADAPTERS \
WITH_SIM_ADAPTERS \
WITH_OPEN_ADAPTERS \
WITH_PORT_FORWARDS \
WITH_PPROF \
WITH_INCREMENTAL_EVTO_UPDATE \
JUST_K8S \
JUST_INFRA \
DEPLOY_K8S \
INSTALL_ONOS_APPS \
INSTALL_KUBECTL \
INSTALL_HELM \
HELM_USE_UPGRADE \
UPDATE_HELM_REPOS \
WAIT_ON_DOWN \
VOLTHA_DOWN_ON_TIMEOUT \
LEGACY_BBSIM_INDEX \
SCHEDULE_ON_CONTROL_NODES \
"
OPT_YES_NO="\
CONFIG_SADIS \
WITH_KAFKA \
WITH_RADIUS \
WITH_ETCD \
WITH_ONOS \
"
ALL_OPTIONS="\
NAME \
$ALL_YES_NO \
$OPT_YES_NO \
WAIT_TIMEOUT \
VOLTHA_LOG_LEVEL \
VOLTHA_CHART \
VOLTHA_CHART_VERSION \
VOLTHA_BBSIM_CHART \
VOLTHA_BBSIM_CHART_VERSION \
BBSIM_SADIS_SERVER_CHART \
BBSIM_SADIS_SERVER_CHART_VERSION \
VOLTHA_TRACING_CHART \
VOLTHA_TRACING_CHART_VERSION \
NUM_OF_BBSIM \
NUM_OF_WORKER_NODES \
NUM_OF_CONTROLLER_NODES \
NUM_OF_KAFKA \
NUM_OF_ETCD \
ELASTICSEARCH_CHART \
ELASTICSEARCH_CHART_VERSION \
KIBANA_CHART \
KIBANA_CHART_VERSION \
FLUENTD_ELASTICSEARCH_CHART \
FLUENTD_ELASTICSEARCH_CHART_VERSION \
NUM_OF_OPENONU \
VOLTHA_ADAPTER_SIM_CHART \
VOLTHA_ADAPTER_SIM_CHART_VERSION \
VOLTHA_ADAPTER_OPEN_OLT_CHART \
VOLTHA_ADAPTER_OPEN_OLT_CHART_VERSION \
VOLTHA_ADAPTER_OPEN_ONU_CHART \
VOLTHA_ADAPTER_OPEN_ONU_CHART_VERSION \
ONOS_CHART \
ONOS_CHART_VERSION \
ONOS_CLASSIC_CHART \
ONOS_CLASSIC_CHART_VERSION \
KAFKA_CHART \
KAFKA_CHART_VERSION \
ETCD_CHART \
ETCD_CHART_VERSION \
RADIUS_CHART \
RADIUS_CHART_VERSION \
ONOS_API_PORT \
ONOS_SSH_PORT \
SADIS_CFG \
BBSIM_CFG \
VOLTHA_API_PORT \
VOLTHA_SSH_PORT \
VOLTHA_ETCD_PORT \
ELASTICSEARCH_PORT \
KIBANA_PORT \
VOLTHA_KAFKA_PORT \
VOLTHA_PPROF_PORT \
OPENOLT_PPROF_PORT \
OFAGENT_PPROF_PORT \
TRACING_GUI_PORT \
VK_RELEASE \
KIND_VERSION \
VOLTCTL_VERSION \
HELM_VERSION \
NUM_OF_ONOS \
NUM_OF_ATOMIX \
VOLTHA_NS \
ADAPTER_NS \
INFRA_NS \
BBSIM_NS \
SADIS_BANDWIDTH_PROFILES \
SADIS_SUBSCRIBERS \
PF_ADDRESS \
KIND_CFG_FILE \
"
# Iterate over yes/no configuration options and validate
for VAR in $ALL_YES_NO; do
eval "$VAR"="$(verify_yes_no "$VAR")"
if [ "$(eval echo "\$$VAR")" == "INVALID" ]; then
exit 1;
fi
done
# Iterate over optional yes/no configuration options and
# normalize values if they are truth values
for VAR in $OPT_YES_NO; do
eval "$VAR"="$(normalize_yes_no "$VAR")"
done
# check number (range) of bbsim, max bbsim must not exceed 10 instances!
# note: instances will be numbered from 0 to 9
if [ "$1" == "up" ]; then
if [ "$NUM_OF_BBSIM" -lt 1 ]; then
>&2 echo -e "${RED}${BOLD}${ERROR}ERROR:${NORMAL}${RED} Invalid setting of BBSIM instances. NUM_OF_BBSIM (${NUM_OF_BBSIM}) is less than 1${NORMAL}"
exit 1
fi
if [ "$NUM_OF_BBSIM" -gt "$MAX_NUM_OF_BBSIM" ]; then
>&2 echo -e "${RED}${BOLD}${ERROR}ERROR:${NORMAL}${RED} Invalid setting of BBSIM instances. NUM_OF_BBSIM (${NUM_OF_BBSIM}) is greater than $MAX_NUM_OF_BBSIM${NORMAL}"
exit 1
fi
if [ "$NUM_OF_OPENONU" -lt 1 ]; then
>&2 echo -e "${RED}${BOLD}${ERROR}ERROR:${NORMAL}${RED} Invalid setting of OPENONU instances. NUM_OF_OPENONU (${NUM_OF_OPENONU}) is less than 1${NORMAL}"
exit 1
fi
if [ "$NUM_OF_OPENONU" -gt "$MAX_NUM_OF_OPENONU" ]; then
>&2 echo -e "${RED}${BOLD}${ERROR}ERROR:${NORMAL}${RED} Invalid setting of OPENONU instances. NUM_OF_OPENONU (${NUM_OF_OPENONU}) is greater than $MAX_NUM_OF_OPENONU${NORMAL}"
exit 1
fi
HAVE_CLUSTER="$(kind get clusters 2>/dev/null | grep -c "voltha-$NAME")"
if [ "$HAVE_CLUSTER" -eq 0 ]; then
# If the user has specified a kind cluster configuration file,
# 'KIND_CFG_FILE` then we need to gleem from that file the number
# of worker and controller nodes as this overrides any setting of
# these values
if [ -n "$KIND_CFG_FILE" ]; then
NUM_OF_WORKER_NODES="$(sed 's/ //g' "$KIND_CFG_FILE" | grep -c "^-role:worker$")"
NUM_OF_CONTROLLER_NODES="$(sed 's/ //g' "$KIND_CFG_FILE" | grep -c "^-role:control-plane$")"
fi
# check that NUM_OF_KAFKA, NUM_OF_ONOS, NUM_OF_ATOMIX, NUM_OF_ETCD is:
# <= NUM_OF_WORKER_NODES + 1 if SCHEDULE_ON_CONTROL_NODES == y
# <= NUM_OF_WORKER_NODES if SCHEDULE_ON_CONTROL_NODES == n
SCHEDULABLE_NODES=$NUM_OF_WORKER_NODES
if [ "$SCHEDULE_ON_CONTROL_NODES" == "yes" ]; then
SCHEDULABLE_NODES=$((NUM_OF_CONTROLLER_NODES+NUM_OF_WORKER_NODES))
fi
else
TOTAL_NODES=$(kubectl get --all-namespaces nodes -o name | wc -l)
NUM_OF_CONTROLLER_NODES=$(kubectl get --all-namespaces nodes -l node-role.kubernetes.io/master -o name | wc -l)
NUM_OF_WORKER_NODES=$((TOTAL_NODES - NUM_OF_CONTROLLER_NODES))
# shellcheck disable=SC2016
SCHEDULABLE_NODES=$(kubectl get no -o 'go-template={{range .items}}{{$taints:=""}}{{range .spec.taints}}{{if eq .effect "NoSchedule"}}{{$taints = print $taints .key ","}}{{end}}{{end}}{{if not $taints}}{{.metadata.name}}{{ "\n"}}{{end}}{{end}}' | wc -l | sed -e 's/ //g')
rm -f "$TMP_KUBECFG"
fi
NODES="SCHEDULE_ON_CONTROL_NODES: $SCHEDULE_ON_CONTROL_NODES, SCHEDULABLE_NODES: $SCHEDULABLE_NODES, NUM_OF_CONTROLLER_NODES: $NUM_OF_CONTROLLER_NODES, NUM_OF_WORKER_NODES: $NUM_OF_WORKER_NODES"
if is_in "$WITH_KAFKA" "yes,external" && [ ! "$NUM_OF_KAFKA" -le "$SCHEDULABLE_NODES" ]; then
>&2 echo -e "${RED}${BOLD}${ERROR}ERROR:${NORMAL}${RED} Invalid setting of KAFKA replicas. NUM_OF_KAFKA (${NUM_OF_KAFKA}) is greater than the available nodes ($NODES)${NORMAL}"
exit 1
fi
if is_in "$WITH_ETCD" "yes,external" && [ ! "$NUM_OF_ETCD" -le "$SCHEDULABLE_NODES" ]; then
>&2 echo -e "${RED}${BOLD}${ERROR}ERROR:${NORMAL}${RED} Invalid setting of ETCD replicas. NUM_OF_ETCD (${NUM_OF_ETCD}) is greater than the available nodes ($NODES)${NORMAL}"
exit 1
fi
if is_in "$WITH_ONOS" "yes,legacy,classic,micro"; then
if [ ! "$NUM_OF_ATOMIX" -le "$SCHEDULABLE_NODES" ]; then
>&2 echo -e "${RED}${BOLD}${ERROR}ERROR:${NORMAL}${RED} Invalid setting of ATOMIX replicas. NUM_OF_ATOMIX (${NUM_OF_ATOMIX}) is greater than the available nodes ($NODES)${NORMAL}"
exit 1
fi
if [ ! "$NUM_OF_ONOS" -le "$SCHEDULABLE_NODES" ]; then
>&2 echo -e "${RED}${BOLD}${ERROR}ERROR:${NORMAL}${RED} Invalid setting of ONOS replicas. NUM_OF_ONOS (${NUM_OF_ONOS}) is greater than the available nodes ($NODES)${NORMAL}"
exit 1
fi
fi
fi
# normalize non-truth value options
if is_in "$WITH_KAFKA" "e,external"; then
WITH_KAFKA="external"
fi
if is_in "$WITH_ETCD" "e,external"; then
WITH_ETCD="external"
fi
if is_in "$WITH_ONOS" "l,legacy"; then
WITH_ONOS="legacy"
elif is_in "$WITH_ONOS" "c,classic"; then
WITH_ONOS="classic"
elif is_in "$WITH_ONOS" "u,m,micro"; then
WITH_ONOS="micro"
fi
if [ "$WITH_ONOS" == "micro" ]; then
>&2 echo -e "${YELLOW}${BOLD}${ERROR}WARNING:${NORMAL}${YELLOW} The value specified as WITH_ONOS, '$WITH_ONOS', is not valid.${NORMAL}"
exit 1
fi
if is_in "$WITH_ONOS" "yes,classic"; then
# there is an extra utility required if we are using ONOS CLASSIC as
# the chart for classic specifies images differently
EXTRA_TOOLS+=" yq"
fi
if [ "$LEGACY_BBSIM_INDEX" == "no" ]; then
BBSIM_SADIS_SVC="bbsim0"
else
BBSIM_SADIS_SVC="bbsim"
fi
SADIS_SUBSCRIBERS=${SADIS_SUBSCRIBERS:-http://$BBSIM_SADIS_SVC.$VOLTHA_NS.svc:50074/v2/subscribers/%s}
SADIS_BANDWIDTH_PROFILES=${SADIS_BANDWIDTH_PROFILES:-http://$BBSIM_SADIS_SVC.$VOLTHA_NS.svc:50074/v2/bandwidthprofiles/%s}
if [ "$HELM_USE_UPGRADE" == "yes" ]; then
_HELM_DESC="Upgrade/Install"
else
_HELM_DESC="Install"
fi
# Check for prerequiste tools
TOOLS="curl sed jq $EXTRA_TOOLS"
if [ "$DEPLOY_K8S" == "yes" ]; then
TOOLS+=" docker"
fi
NOT_FOUND=""
for T in $TOOLS; do
if [ -z "$(command -v "$T")" ]; then
NOT_FOUND+=" $T"
fi
done
if [ -n "$NOT_FOUND" ]; then
>&2 echo -e "${RED}${BOLD}${ERROR}ERROR:${NORMAL}${RED} The following required tools where not found, please install them:$NOT_FOUND"
exit 1
fi
TIMEOUT_SECONDS="$(parseDuration "$WAIT_TIMEOUT")"
mkdir -p .voltha
touch .voltha/ports
# check if the stack deployed with $NAME already exists
STACK_EXISTS=false
if [ "$(grep -c "$NAME" .voltha/ports)" -eq 1 ]; then
STACK_EXISTS=true
fi
if [ "$STACK_EXISTS" == true ]; then
echo "stack exists"
# if the stack was previously deployed reuse the ports
HAVE="$(grep "$NAME" .voltha/ports)"
VALUES="$(echo "$HAVE" | sed -e 's/\s//g' | cut -d= -f2)"
ONOS_API_PORT=$(echo "$VALUES" | cut -d, -f1)
ONOS_SSH_PORT=$(echo "$VALUES" | cut -d, -f2)
VOLTHA_API_PORT=$(echo "$VALUES" | cut -d, -f3)
VOLTHA_SSH_PORT=$(echo "$VALUES" | cut -d, -f4)
VOLTHA_ETCD_PORT=$(echo "$VALUES" | cut -d, -f5)
VOLTHA_KAFKA_PORT=$(echo "$VALUES" | cut -d, -f6)
VOLTHA_PPROF_PORT=$(echo "$VALUES" | cut -d, -f7)
OPENOLT_PPROF_PORT=$(echo "$VALUES" | cut -d, -f8)
OFAGENT_PPROF_PORT=$(echo "$VALUES" | cut -d, -f9)
ELASTICSEARCH_PORT=$(echo "$VALUES" | cut -d, -f10)
KIBANA_PORT=$(echo "$VALUES" | cut -d, -f11)
TRACING_GUI_PORT=$(echo "$VALUES" | cut -d, -f12)
# Some ports were added after the .voltha/ports files was created.
# Calculate the original DELTA from the VOLTHA_SSH_PORT so that it can
# be used to set up newer port-forwards.
DELTA="$((VOLTHA_SSH_PORT/100-50))"
if [ -z "$VOLTHA_KAFKA_PORT" ]; then
VOLTHA_KAFKA_PORT=${VOLTHA_KAFKA_PORT:-$((90 + DELTA))92}
fi
if [ -z "$VOLTHA_PPROF_PORT" ]; then
VOLTHA_PPROF_PORT=${VOLTHA_PPROF_PORT:-$((60 + DELTA))60}
fi
if [ -z "$OPENOLT_PPROF_PORT" ]; then
OPENOLT_PPROF_PORT=${OPENOLT_PPROF_PORT:-$((60 + DELTA))61}
fi
if [ -z "$OFAGENT_PPROF_PORT" ]; then
OFAGENT_PPROF_PORT=${OFAGENT_PPROF_PORT:-$((60 + DELTA))62}
fi
if [ -z "$ELASTICSEARCH_PORT" ]; then
ELASTICSEARCH_PORT=${ELASTICSEARCH_PORT:-$((92 + DELTA))00}
fi
if [ -z "$KIBANA_PORT" ]; then
KIBANA_PORT=${KIBANA_PORT:-$((56 + DELTA))01}
fi
if [ -z "$TRACING_GUI_PORT" ]; then
TRACING_GUI_PORT=${TRACING_GUI_PORT:-1$((66 + DELTA))86}
fi
else
# Find free port prefix
START=81
while true; do
if [ "$(grep -c $START .voltha/ports)" -eq 0 ]; then
break
fi
START="$((START + 1))"
done
# create new ports starting from the first available
DELTA="$((START - 81))"
ONOS_API_PORT=${ONOS_API_PORT:-${START}81}
ONOS_SSH_PORT=${ONOS_SSH_PORT:-${START}01}
VOLTHA_API_PORT=${VOLTHA_API_PORT:-5$((55 + DELTA))55}
VOLTHA_SSH_PORT=${VOLTHA_SSH_PORT:-$((50 + DELTA))22}
VOLTHA_ETCD_PORT=${VOLTHA_ETCD_PORT:-$((23 + DELTA))79}
VOLTHA_KAFKA_PORT=${VOLTHA_KAFKA_PORT:-$((90 + DELTA))92}
VOLTHA_PPROF_PORT=${VOLTHA_PPROF_PORT:-$((60 + DELTA))60}
OPENOLT_PPROF_PORT=${OPENOLT_PPROF_PORT:-$((60 + DELTA))61}
OFAGENT_PPROF_PORT=${OFAGENT_PPROF_PORT:-$((60 + DELTA))62}
ELASTICSEARCH_PORT=${ELASTICSEARCH_PORT:-$((92 + DELTA))00}
KIBANA_PORT=${KIBANA_PORT:-$((56 + DELTA))01}
TRACING_GUI_PORT=${TRACING_GUI_PORT:-1$((66 + DELTA))86}
fi
PORTTMP="$(mktemp -u)"
grep -v "$NAME" .voltha/ports > "$PORTTMP"
echo "$NAME=$ONOS_API_PORT,$ONOS_SSH_PORT,$VOLTHA_API_PORT,$VOLTHA_SSH_PORT,$VOLTHA_ETCD_PORT,$VOLTHA_KAFKA_PORT,$VOLTHA_PPROF_PORT,$OPENOLT_PPROF_PORT,$OFAGENT_PPROF_PORT,$ELASTICSEARCH_PORT,$KIBANA_PORT,$TRACING_GUI_PORT" >> "$PORTTMP"
cp "$PORTTMP" .voltha/ports
rm -f "$PORTTMP"
export ONOS_API_PORT ONOS_SSH_PORT
IDX=1
CLOCK="TIME:"
SPIN_PARTS=
NOT_VERIFIED=
THEX=
BUILD=
VERIFIED=
HELM=
OLD_KEY=
BIRD=
HIGH_VOLTAGE=
PLUG=
FORWARD=
GO=
DOWNLOAD=
GEAR=
NO_ENTRY=
LOCK=
if [ "$FANCY" -eq 1 ]; then
SPIN_PARTS="\xe2\xa2\x8e\xe2\xa1\xb0 \xe2\xa2\x8e\xe2\xa1\xa1 \xe2\xa2\x8e\xe2\xa1\x91 \xe2\xa2\x8e\xe2\xa0\xb1 \xe2\xa0\x8e\xe2\xa1\xb1 \xe2\xa2\x8a\xe2\xa1\xb1 \xe2\xa2\x8c\xe2\xa1\xb1 \xe2\xa2\x86\xe2\xa1\xb1"
CLOCK="\xe2\x8f\xb1"
THEX="${RED}${BOLD}\xe2\x9c\x97\x20${NORMAL}"
BUILD="${YELLOW}${BOLD}\xf0\x9f\x8f\x97${NORMAL}"
NOT_VERIFIED="$BUILD"
VERIFIED="${GREEN}${BOLD}\xe2\x9c\x93\x20${NORMAL}"
HELM="${BLUE}${BOLD}\xE2\x8E\x88${NORMAL}"
OLD_KEY="\xF0\x9F\x97\x9D"
BIRD="\xF0\x9F\x90\xA6"
HIGH_VOLTAGE="\xE2\x9A\xA1"
PLUG="\xF0\x9F\xa7\xa9"
FORWARD="\xE2\x87\xA8"
GO="\xf0\x9f\x9a\x80"
DOWNLOAD="\xf0\x9f\x93\xa5"
GEAR="\xe2\x9a\x99"
NO_ENTRY="\xe2\x9b\x94"
LOCK="\xf0\x9f\x94\x92"
fi
duration() {
local h m s t
h="$(($1 / 3600))"
m="$(($1 % 3600 / 60))"
s="$(($1 % 60))"
t=""
if [ "$h" -gt 0 ]; then
t="$t${h}h"
fi
if [ "$m" -gt 0 ]; then
t="$t${m}m"
fi
echo "$t${s}s"
}
printtime() {
local INDENT
if [ "$1" == "-" ]; then
INDENT=" "
shift
fi
echo -e "$INDENT $CLOCK $(duration "$1")"
}
bspin() {
IDX=1
local INDENT
if [ "$1" == "-" ]; then
INDENT=" "
shift
fi
if [ "$FANCY" -eq 0 ]; then
LINE="${*//[[:space:]+-]}"
if [ "$LINE X" == " X" ]; then
return
fi
echo -e "$CIVIS$INDENT$*"
else
echo -en "$CIVIS$INDENT $*"
fi
}
sspin() {
local INDENT
if [ "$1" == "-" ]; then
INDENT=" "
shift
fi
if [ "$FANCY" -eq 0 ]; then
LINE="${*//[[:space:]+-]}"
if [ "$LINE X" == " X" ]; then
return
fi
echo -e "$INDENT$*"
else
C="$(echo "$SPIN_PARTS" | cut '-d ' -f "$IDX")"
echo -en "\r$INDENT$C $*"
IDX="$((IDX + 1))"
if [ "$IDX" -gt 8 ]; then
IDX=1
fi
fi
}
espin() {
local INDENT
if [ "$1" == "-" ]; then
INDENT=" "
shift
fi
if [ "$FANCY" -eq 0 ]; then
LINE="${*//[[:space:]+-]}"
if [ "$LINE X" == " X" ]; then
return
fi
echo -e "$INDENT$*"
else
echo -e "\r$INDENT$*$CNORM"
fi
}
if [ "$1" == "get" ] && [ "$2" == "voltconfig" ]; then
echo "$HOME/.volt/config-$NAME"
exit
fi
if [ $# -ne 1 ] || ! is_in "$1" "up,down,dump,clean"; then
>&2 echo "What wouild you like to do today:"
>&2 echo " up - bring up voltha"
>&2 echo " down - tear down voltha"
>&2 echo " clean - remove everything that kind-voltha created"
>&2 echo " dump - create a debug dump of running system"
exit 1
fi
do_curl() {
local DATA_ARGS CREDS CMD_ECHO CMD_OUTPUT SC_OUTPUT WAIT_START INDENTA EXTRAA NOW
local INDENT=$1
local OP=$2
local USER=$3
local PASS=$4
local URL=$5
local DATA_TYPE=$6
local DATA=$7
local MSG=$8
local VALID_RESULTS=$9
local EXTRA=${10}
local ICON=${11}
# Turn into an array for passing
INDENTA=()
if [ -n "$INDENT" ]; then
INDENTA=("$INDENT")
fi
IFS=' ' read -r -a EXTRAA <<< "$EXTRA"
# Thanks to the latest version of ONOS using the return code 207 this gets a
# whole lot nastier. Can't thank them enough for doing this. So in order to
# capture the command and the output in the log file as well as capture the
# status code to verify it is 200 and not 207 mutltiple files and a bit of
# hackery must be used. Thanks again ONOS.
CMD_ECHO="$(mktemp -u)"
CMD_OUTPUT="$(mktemp -u)"
SC_OUTPUT="$(mktemp -u)"
WAIT_START="$(date +%s)"
CREDS=()
if [ -n "$USER" ]; then
CREDS=("--user" "$USER:$PASS")
fi
DATA_ARGS=()
if [ -n "$DATA" ]; then
if [ "$DATA_TYPE" == "file" ]; then
DATA_ARGS=("--data" "@$DATA")
elif [ "$DATA_TYPE" == "json" ]; then
DATA_ARGS=("--data" "$DATA")
fi
fi
bspin "${INDENTA[@]}" "$MSG" "$ICON"
while true; do
NOW="$(date +%s)"
if [ "$((NOW - WAIT_START))" -gt "$TIMEOUT_SECONDS" ]; then
espin "${INDENTA[@]}" "$THEX"
rm -f "$CMD_ECHO" "$CMD_OUTPUT" "$SC_OUTPUT"
doTimeout "$MSG"
fi
(set -x; curl -sSL "${CREDS[@]}" -w "%{http_code}" -o "$CMD_OUTPUT" -X "$OP" "${EXTRAA[@]}" "$URL" "${DATA_ARGS[@]}" >"$SC_OUTPUT" 2>/dev/null) >>"$CMD_ECHO" 2>&1
RESULT=$?
# Dump everything to the log
cat "$CMD_ECHO" >> "$LOG"
test -r "$CMD_OUTPUT" && cat "$CMD_OUTPUT" >> "$LOG"
SC="$(cat "$SC_OUTPUT")"
echo "RESPONSE CODE: $SC" >> "$LOG"
echo "ERROR CODE: $RESULT" >> "$LOG"
# clean up temp files
rm -f "$CMD_ECHO" "$CMD_OUTPUT" "$SC_OUTPUT"
if [ "$RESULT" -eq 0 ] && is_in "$SC" "$VALID_RESULTS"; then
break
fi
sleep 1
sspin "${INDENTA[@]}"
done
espin "${INDENTA[@]}" "$VERIFIED"
}
push_onos_config() {
local DATA_TYPE=$1
local MSG=$2
local RESOURCE=$3
local DATA=$4
do_curl "-" "POST" "karaf" "karaf" "http://$_ONOS_API_EP/onos/v1/$RESOURCE" "$DATA_TYPE" "$DATA" "$MSG" "200" "--fail -H Content-Type:application/json" "$GEAR"
}
check_onos_app_active() {
local APP_ID WAIT_START RESULT NOW
APP_ID=$1
WAIT_START="$(date +%s)"
bspin - "Checking that $APP_ID is active $CLOCK"
while true; do
NOW="$(date +%s)"
if [ "$((NOW - WAIT_START))" -gt "$TIMEOUT_SECONDS" ]; then
espin - "$THEX"
doTimeout "waiting for ONOS application activation '$APP_ID'"
fi
if (set -x; curl --fail -sSL --user karaf:karaf -X GET "http://$_ONOS_API_EP/onos/v1/applications/$APP_ID" | grep ACTIVE >>"$LOG" 2>&1) >>"$LOG" 2>&1; then
break
fi
sleep 1
sspin -
done
sleep 5 # OSGI components take a little longer that the app to activate
espin - "$VERIFIED"
}
override_onos_app() {
local APP NAME WAIT_START NOW
APP=$1
NAME="$(basename "$APP" | sed -e 's/^[0-9][0-9]*-//g' -e 's/-.*$//g')"
WAIT_START="$(date +%s)"
while true; do
NOW="$(date +%s)"
if [ "$((NOW - WAIT_START))" -gt "$TIMEOUT_SECONDS" ]; then
espin - "$THEX"
doTimeout "waiting to overwrite ONOS application '$APP'"
fi
sspin -
# Attempt to delete old version (if it exists)
(set -x; curl --fail -sSL --user karaf:karaf -X DELETE "http://$_ONOS_API_EP/onos/v1/applications/$NAME" >>"$LOG" 2>&1) >>"$LOG" 2>&1
RESULT=$?
sspin -
if [ $RESULT -ne 0 ]; then
continue
fi
if (set -x; curl --fail -sSL --user karaf:karaf -X POST -H Content-Type:application/octet-stream "http://$_ONOS_API_EP/onos/v1/applications?activate=true" --data-binary "@$APP" >>"$LOG" 2>&1) >>"$LOG" 2>&1; then
break
fi
sleep .2
done
}
activate_onos_app() {
local MSG APP WAIT_START NOW
MSG="$1"
APP=$2
WAIT_START="$(date +%s)"
bspin - "$MSG $GO"
while true; do
NOW="$(date +%s)"
if [ "$((NOW - WAIT_START))" -gt "$TIMEOUT_SECONDS" ]; then
espin - "$THEX"
doTimeout "waiting to activate ONOS application '$APP'"
fi
sspin -
if (set -x; curl --fail -sSL --user karaf:karaf -X POST "http://$_ONOS_API_EP/onos/v1/applications/$APP/active" >>"$LOG" 2>&1) >>"$LOG" 2>&1; then
break
fi
sleep .2
done
espin - "$VERIFIED"
}
count_pods() {
local NAMESPACE STATES LABELS CMD PODS
NAMESPACE=$1; shift
if [ "$NAMESPACE" == "all-namespaces" ]; then
NAMESPACE="--all-namespaces"
else
NAMESPACE="-n $NAMESPACE"
fi
STATES=$1; shift
LABELS=$1; shift
CMD=("kubectl get $NAMESPACE $LABELS pod")
PODS=$(${CMD[*]} -o go-template="{{range .items}}{{.metadata.name}}/{{.status.phase}}/_{{range .status.containerStatuses}}{{.ready}}_{{end}} {{end}}")
local COUNT=0
local PATTERNS="$*"
for POD in $PODS; do
local NAME STATE CONTAINERS TOTAL FOUND
NAME="$(echo "$POD" | cut -d/ -f 1)"
STATE="$(echo "$POD" | cut -d/ -f 2)"
CONTAINERS="$(echo "$POD" | cut -d/ -f 3 | sed -e 's/_/ /g')"
if [ "$STATES" == "*" ] || [ "$(echo "$STATES" | grep -c ":$STATE:")" -ne 0 ]; then
TOTAL="$(echo "$CONTAINERS" | wc -w)"
FOUND="$(echo "$CONTAINERS" | grep -o true | wc -l)"
if [ "$TOTAL" -eq "$FOUND" ]; then
for PATTERN in $PATTERNS; do
if [[ "$NAME" =~ $PATTERN ]]; then
COUNT="$((COUNT + 1))"
break
fi
done
fi
fi
done
echo $COUNT
}
wait_for_pods() {
local INDENT NAMESPACE EXPECT TYPE MESSAGE LABELS PATTERNS STATES HAVE ALL WAIT_START NOW
if [ "$1" == "-" ]; then
INDENT=$1; shift
fi
NAMESPACE=$1; shift
EXPECT=$1; shift
TYPE=$1; shift
MESSAGE=$1; shift
LABELS=$1; shift
PATTERNS=("$*")
STATES=":Running:"
if [ "$TYPE" == "not" ]; then
STATES="*"
fi
HAVE="$(count_pods "$NAMESPACE" "$STATES" "$LABELS" "${PATTERNS[@]}")"
ALL=$HAVE
if [ "$TYPE" == "only" ]; then
ALL="$(count_pods "all-namespaces" "*" ".*")"
fi
COUNT="$((300 / 15))"
WAIT_START="$(date +%s)"
bspin "$INDENT" "$MESSAGE"
sspin "$INDENT"
if [ "$HAVE" -ne "$EXPECT" ] || [ "$ALL" -ne "$HAVE" ]; then
while [ "$HAVE" -ne "$EXPECT" ] || [ "$ALL" -ne "$HAVE" ]; do
NOW="$(date +%s)"
if [ "$((NOW - WAIT_START))" -gt "$TIMEOUT_SECONDS" ]; then
espin "$INDENT" "$THEX"
doTimeout "waiting for PODs to start"
fi
sspin "$INDENT"
COUNT="$((COUNT - 1))"
if [ "$COUNT" -eq 0 ]; then
HAVE="$(count_pods "$NAMESPACE" "$STATES" "$LABELS" "${PATTERNS[@]}")"
ALL="$HAVE"
if [ "$TYPE" == "only" ]; then
ALL="$(count_pods "all-namespaces" "*" ".*")"
fi
COUNT="$((300 / 15))"
fi
sleep .15
done
fi
espin "$INDENT" "$VERIFIED"
if [ "$HAVE" -ne "$EXPECT" ]; then
return 1
fi
return 0
}
port_forward() {
local NS=$1; shift
local SVC=$1; shift
local PORTS="$*"
local TAG=$SVC-$NS-$NAME
(set -x; _TAG="$TAG" bash -c "while true; do kubectl port-forward --address $PF_ADDRESS -n $NS service/$SVC $PORTS; done" >>"$PFLOG" 2>&1 &) >>"$PFLOG" 2>&1
}
kill_port_forward() {
local TAG P_IDS PARENTS KIDS UNKNOWN PP_ID NS
NS=$1; shift
SVC=$1; shift
TAG=
if [ "$SVC" != "__ALL__" ]; then
TAG=$SVC-$NS-$NAME
fi
PARENTS=
KIDS=
UNKNOWN=
# shellcheck disable=SC2009
P_IDS="$(ps e -ww -A | grep "_TAG=$TAG" | grep -v grep | awk '{print $1}')"
if [ -n "$P_IDS" ]; then
for P_ID in $P_IDS; do
PP_ID="$(ps -o ppid "$P_ID" | tail -n +2)"
if [ -n "$PP_ID" ]; then
if [ "$PP_ID" -eq 1 ]; then
PARENTS="$PARENTS $P_ID"
else
KIDS="$KIDS $P_ID"
fi
else
UNKNOWN="$UNKNOWN $P_ID"
fi
done
if [ -n "$PARENTS" ]; then
# shellcheck disable=SC2086
while ps -h $PARENTS >/dev/null 2>&1; do
(set -x; eval "kill -9 $PARENTS" >>"$LOG" 2>&1) >>"$LOG" 2>&1
done
fi
if [ -n "$KIDS" ]; then
# shellcheck disable=SC2086
while ps -h $KIDS >/dev/null 2>&1; do
(set -x; eval "kill -9 $KIDS" >>"$LOG" 2>&1) >>"$LOG" 2>&1
done
fi
if [ -n "$UNKNOWN" ]; then
# shellcheck disable=SC2086
while ps -h $UNKNOWN >/dev/null 2>&1; do
(set -x; eval "kill -9 $UNKNOWN" >>"$LOG" 2>&1) >>"$LOG" 2>&1
done
fi
fi
}
resolve_chart_name() {
helm inspect chart "$1" | grep "^name:" | awk '{print $2}'
}
resolve_chart_version() {
[ "$2" != "latest" ] && echo "$2" && return
helm inspect chart "$1" | grep "^version:" | awk '{print $2}'
}
# Compares two semver-formatted version strings (e.g. 2.4.3)
# Returns 0 if $1 == $2, returns 1 if $1 > $2 and returns 2 if $1 < $2
compare_versions() {
[ "$1" == "$2" ] && return 0
local lowest
lowest=$(echo -e "$1\n$2" | sort -V | head -1)
[ "$lowest" == "$1" ] && return 2
return 1
}
if [ "$1" == "down" ]; then
# NOTE ./voltha down tears down all the stacks, it should only remove components in the specified namespaces
echo "Tearing down voltha cluster $NAME"
LOG="down-$NAME.log"
date -u +"%Y%m%dT%H%M%SZ" >"$LOG"
HELM_MAJOR=$(helm version --client --short | sed -E -e 's/^.*v([0-9]+)\.[0-9]+\.[0-9]+.*$/\1/')
if is_in "$WITH_ONOS" "yes,legacy,classic"; then
bspin "Remove port-forwards: onos-ui-$NAME"
kill_port_forward "$INFRA_NS" onos-ui
sspin "Remove port-forwards: onos-ssh-$NAME$CEOL"
kill_port_forward "$INFRA_NS" onos-ssh
sspin "Remove port-forwards: onos-onos-classic-hs-$NAME$CEOL"
kill_port_forward "$INFRA_NS" onos-onos-classic-hs
fi
sspin "Remove port-forwards: voltha-api-$NAME$CEOL"
kill_port_forward "$VOLTHA_NS" voltha-api
kill_port_forward "$VOLTHA_NS" voltha-voltha-api
if is_in "$WITH_ETCD" "yes,external"; then
sspin "Remove port-forwards: etcd-$NAME$CEOL"
kill_port_forward "$INFRA_NS" "etcd"
fi
if is_in "$WITH_KAFKA" "yes,external"; then
sspin "Remove port-forwards: kafka-$NAME$CEOL"
kill_port_forward "$INFRA_NS" "kafka"
fi
if is_in "$WITH_BBSIM" "yes"; then
sspin "Remove port-forwards: bbsim$CEOL"
kill_port_forward "$VOLTHA_NS" bbsim
fi
if [ "$WITH_PPROF" == "yes" ]; then
sspin "Remove port-forwards: *-profiler$CEOL"
kill_port_forward "$VOLTHA_NS" voltha-rw-core-profiler
kill_port_forward "$VOLTHA_NS" voltha-voltha-rw-core-profiler
kill_port_forward "$VOLTHA_NS" voltha-of-agent-profiler
kill_port_forward "$VOLTHA_NS" voltha-voltha-of-agent-profiler
if [ "$WITH_OPEN_ADAPTERS" == "yes" ]; then
kill_port_forward "$VOLTHA_NS" adapter-open-olt-profiler
kill_port_forward "$VOLTHA_NS" open-olt-adapter-open-olt-profiler
fi
fi
if [ "$WITH_TRACING" == "yes" ]; then
sspin "Remove port-forwards: tracing-$NAME$CEOL"
VOLTHA_TRACING_CHART_NAME=$(resolve_chart_name "$VOLTHA_TRACING_CHART")
kill_port_forward "$INFRA_NS" "tracing-${VOLTHA_TRACING_CHART_NAME}-jaeger-gui"
fi
espin "$VERIFIED Remove port-forwards$CEOL"
if [ "$DEPLOY_K8S" == "yes" ]; then
if [ -x ./bin/kind ]; then
bspin "Delete Kubernetes Kind Cluster"
(set -x; ./bin/kind delete cluster --name "voltha-$NAME" >>"$LOG" 2>&1) >>"$LOG" 2>&1
espin "$VERIFIED"
else
espin "$NO_ENTRY Delete Kubernetes Kind Cluster: kind command not found"
fi
else
if [ "$CONFIG_SADIS" == "external" ]; then
bspin "Remove configmap for BBSIM SADIS server"
(set -x; kubectl -n "$BBSIM_NS" delete --ignore-not-found configmap kube-config >>"$LOG" 2>&1) >>"$LOG" 2>&1
espin "$VERIFIED"
fi
if [ "$HELM_MAJOR" -le 2 ]; then
EXISTS=$(helm list --output json 2>/dev/null | jq -r '.Releases | .[] | .Name + "," + .Namespace')
else
EXISTS=$(helm list --all-namespaces --output json 2>/dev/null | jq -r '.[] | .name + "," + .namespace')
fi
EXPECT="voltha"
if is_in "$WITH_ONOS" "yes,legacy,classic"; then
EXPECT+=" onos"
fi
if [ "$WITH_RADIUS" == "yes" ]; then
EXPECT+=" radius"
fi
if [ "$CONFIG_SADIS" == "external" ]; then
EXPECT+=" bbsim-sadis-server"
fi
if [ "$WITH_BBSIM" == "yes" ]; then
EXPECT+=" bbsim"
fi
if [ "$WITH_EFK" == "yes" ]; then
EXPECT+=" elasticsearch kibana fluentd"
fi
if [ "$WITH_TRACING" == "yes" ]; then
EXPECT+=" tracing"
fi
if [ "$WITH_OPEN_ADAPTERS" == "yes" ]; then
EXPECT+=" open-olt open-onu"
fi
if [ "$WITH_SIM_ADAPTERS" == "yes" ]; then
EXPECT+=" sim"
fi
if is_in "$WITH_ETCD" "yes,external"; then
EXPECT+=" etcd"
fi
if is_in "$WITH_KAFKA" "yes,external"; then
EXPECT+=" kafka"
fi
bspin "Remove Helm Deployments"
for i in $EXISTS; do
for j in $EXPECT; do
if [[ "$i" =~ $j ]]; then
__NAME=$(echo "$i" | cut -d, -f1)
__NS=$(echo "$i" | cut -d, -f2)
sspin "Remove Helm Deployments: $__NS:$__NAME$CEOL"
if [ "$HELM_MAJOR" -le 2 ]; then
(set -x; helm delete --no-hooks --purge "$__NAME" >>"$LOG" 2>&1) >>"$LOG" 2>&1
else
(set -x; helm uninstall --no-hooks --namespace "$__NS" "$__NAME" >>"$LOG" 2>&1) >>"$LOG" 2>&1
fi
fi
done
done
espin "$VERIFIED Remove Helm Deployments$CEOL"
if [ "$WAIT_ON_DOWN" == "yes" ]; then
PODS="voltha-ofagent.* voltha-rw-core.*"
INFRA_PODS=
ADAPT_PODS=
SIM_PODS=
EFK_PODS=
if [ "$WITH_RADIUS" == "yes" ]; then
PODS+=" radius.*"
fi
if [ "$WITH_BBSIM" == "yes" ]; then
SIM_PODS+=" bbsim.*"
fi
if [ "$WITH_EFK" == "yes" ]; then
EFK_PODS+=" kibana-* elasticsearch-* fluentd-*"
fi
if [ "$WITH_OPEN_ADAPTERS" ] || [ "$WITH_SIM_ADAPTERS" ]; then
ADAPT_PODS+=" adapter-*"
fi
if [ "$WITH_TRACING" == "yes" ]; then
INFRA_PODS+=" jaeger.*"
fi
if is_in "$WITH_ONOS" "yes,legacy,classic"; then
INFRA_PODS+=" onos-.*"
fi
if is_in "$WITH_ETCD" "yes,external"; then
INFRA_PODS+=" etcd.*"
fi
if is_in "$WITH_KAFKA" "yes,external"; then
INFRA_PODS+=" kafka.*"
fi
if [ -n "$SIM_PODS" ]; then
wait_for_pods "$BBSIM_NS" 0 "not" "Waiting for BBSIM PODs to terminate" "$BBSIM_LABEL" "$SIM_PODS"
fi
if [ -n "$INFRA_PODS" ]; then
wait_for_pods "$INFRA_NS" 0 "not" "Waiting for infrastructure PODs to terminate" "$NO_LABEL" "$INFRA_PODS"
fi
if [ -n "$ADAPT_PODS" ]; then
wait_for_pods "$ADAPTER_NS" 0 "not" "Waiting for adapter PODs to terminate" "$NO_LABEL" "$ADAPT_PODS"
fi
if [ -n "$EFK_PODS" ]; then
wait_for_pods "$INFRA_NS" 0 "not" "Waiting for EFK PODs to terminate" "$NO_LABEL" "$EFK_PODS"
fi
wait_for_pods "$VOLTHA_NS" 0 "not" "Waiting for VOLTHA PODs to terminate" "$NO_LABEL" "$PODS"
fi
fi
exit
fi
if [ "$1" == "dump" ]; then
LOG="dump-$NAME.log"
TS="$(date -u +"%Y%m%dT%H%M%SZ")"
if [ -n "$DUMP_FROM" ]; then
TS=${DUMP_FROM//[:-]}
fi
WORK="$(mktemp -u -d)"
DATA=$WORK/voltha-debug-dump-$NAME-$TS
mkdir -p "$DATA"
echo "$TS" > "$LOG"
echo -e "Capturing debug dump to voltha-debug-dump-$NAME-$TS.tgz"
bspin - "Copy install log"
if [ -f "install-$NAME.log" ]; then
(set -x; cp "install-$NAME.log" "$DATA/install-$NAME.log") >>"$LOG" 2>&1
espin - "$VERIFIED"
else
espin - "$NO_ENTRY Copy install log: install-$NAME.log not found"
fi
bspin - "Dumping Kubernetes PODs"
(set -x; kubectl get --all-namespaces pods >> "$DATA/all-pods.txt" 2>&1) >>"$LOG" 2>&1
espin - "$VERIFIED"
bspin - "Dumping Kubernetes SERVICEs"
(set -x; kubectl get --all-namespaces svc >> "$DATA/all-services.txt" 2>&1) >>"$LOG" 2>&1
espin - "$VERIFIED"
bspin - "Dumping Kubernetes EVENTs"
(set -x; kubectl get --all-namespaces events >> "$DATA/all-events.txt" 2>&1) >>"$LOG" 2>&1
espin - "$VERIFIED"
bspin - "Dumping VOLTHA POD details"
PODS="$(kubectl -n "$INFRA_NS" get pod -o name | grep onos | sed -e "s/^/$INFRA_NS:/g") $(kubectl get -n "$VOLTHA_NS" pod -o name | sed -e "s/^/$VOLTHA_NS:/g")"
SINCE=
if [ -n "$DUMP_FROM" ]; then
SINCE="--since-time=$DUMP_FROM"
fi
for POD in $PODS; do
NS="$(echo "$POD" | cut -d: -f1)"
POD="$(echo "$POD" | cut -d: -f2)"
sspin - "Dumping VOLTHA POD details: $POD$CEOL"
mkdir -p "$DATA/$POD"
(set -x; kubectl describe -n "$NS" "$POD" >> "$DATA/$POD/describe.txt" 2>&1) >>"$LOG" 2>&1
sspin - "Dumping VOLTHA POD details: $POD"
(set -x; kubectl logs -n "$NS" --all-containers "$SINCE" --previous "$LOG_ARGS" "$POD" >> "$DATA/$POD/logs-previous.txt" 2>&1) >>"$LOG" 2>&1
sspin - "Dumping VOLTHA POD details: $POD"
(set -x; kubectl logs -n "$NS" --all-containers "$SINCE" "$LOG_ARGS" "$POD" >> "$DATA/$POD/logs-current.txt" 2>&1) >>"$LOG" 2>&1
sspin - "Dumping VOLTHA POD details: $POD"
done
espin - "$VERIFIED Dumping VOLTHA POD details$CEOL"
bspin - "Dumping ETCD"
if [ -n "$(command -v etcdctl)" ]; then
(set -x; ETCDCTL_API=3 etcdctl --endpoints "localhost:$VOLTHA_ETCD_PORT" get --prefix service/voltha | hexdump -C >> "$DATA/etcd.hex" 2>&1) >>"$LOG" 2>&1
espin - "$VERIFIED"
else
espin - "$NO_ENTRY Dumping ETCD: etcdctl command not available"
fi
bspin - "Creating compressed TAR: voltha-debug-dump-$NAME-$TS.tgz"
(set -x; tar -C "$WORK" -zcf "voltha-debug-dump-$NAME-$TS.tgz" "./voltha-debug-dump-$NAME-$TS") >>"$LOG" 2>&1
espin - "$VERIFIED"
bspin - "Cleanup"
(set -x; rm -rf "$WORK") >>"$LOG" 2>&1
espin - "$VERIFIED"
bspin - "$(ls -l "voltha-debug-dump-$NAME-$TS.tgz")"
espin - "$VERIFIED"
exit
fi
LOG="install-$NAME.log"
PFLOG="port-forward-$NAME.log"
date > "$LOG"
echo "PORTS=$ONOS_API_PORT,$ONOS_SSH_PORT,$VOLTHA_API_PORT,$VOLTHA_SSH_PORT,$VOLTHA_ETCD_PORT,$VOLTHA_KAFKA_PORT,$VOLTHA_PPROF_PORT,$OPENOLT_PPROF_PORT,$OFAGENT_PPROF_PORT,$ELASTICSEARCH_PORT,$KIBANA_PORT,$TRACING_GUI_PORT" >> "$LOG"
# Output install options to log
echo "OPTIONS" >> "$LOG"
for O in $ALL_OPTIONS; do
VAL="$(eval echo "\$$O")"
if [ -n "$VAL" ]; then
printf " %-30s = %s\n" "$O" "$VAL" >> "$LOG"
fi
done
# Remove everything we created and set
if [ "$1" == "clean" ]; then
echo "Cleaning up the system"
kill_port_forward all-namespaces __ALL__
exit
fi
helm_values() {
local NAMESPACE=$1; shift
local INAME=$1; shift
local CHART=$1; shift
local CHART_VERSION=$1; shift
if [ "$CHART_VERSION X" != " X" ] && [ "$CHART_VERSION" != "latest" ]; then
CHART_VERSION="--version $CHART_VERSION"
else
CHART_VERSION=
fi
local CHART_ARGS=
if [ -r "${INAME}-values.yaml" ]; then
CHART_ARGS="-f ${INAME}-values.yaml"
fi
if [ "$HELM_USE_UPGRADE" == "yes" ]; then
_HELM_COMMAND="upgrade --install"
_HELM_ARGS="xxxdryrunxxx"
else
_HELM_COMMAND="install"
if [ "$HELM_MAJOR" -le 2 ]; then
_HELM_NAME_ARG="--name xxxdryrunxxx"
_HELM_ARGS=
else
_HELM_NAME_ARG="xxxdryrunxxx"
_HELM_ARGS="--create-namespace"
fi
fi
CMD=("helm $_HELM_COMMAND --debug --dry-run -f values.yaml $CHART_ARGS $INTERNAL_EXTRA_HELM_INSTALL_ARGS $EXTRA_HELM_INSTALL_ARGS --set defaults.log_level=$VOLTHA_LOG_LEVEL --namespace $NAMESPACE $_HELM_ARGS $CHART_VERSION $EXTRA_HELM_FLAGS $_HELM_NAME_ARG $CHART")
${CMD[*]} 2>/dev/null | awk 'PRINT==1 {print}; /^USER-SUPPLIED VALUES:/ {PRINT = 1}; /^$/ {PRINT = 0}'
}
helm_is_deployed() {
local NAMESPACE=$1; shift
local NAME=$1; shift
if [ "$HELM_MAJOR" -le 2 ]; then
helm list --deployed --short --namespace "$NAMESPACE" "$NAME" 2>/dev/null | wc -l
else
helm list --deployed --short --namespace "$NAMESPACE" --filter "$NAME" 2>/dev/null | wc -l
fi
}
helm_install() {
local INDENT NOW VALUES_FILE PPROF_ARG
if [ "$1" == "-" ]; then
INDENT=$1; shift
fi
local NAMESPACE=$1; shift
local INAME=$1; shift
local CHART=$1; shift
local CHART_VERSION=$1; shift
local FILTER=$1; shift
local MESSAGE=$*
if [ "$CHART_VERSION X" != " X" ] && [ "$CHART_VERSION" != "latest" ]; then
CHART_VERSION="--version $CHART_VERSION"
else
CHART_VERSION=
fi
local CHART_ARGS=
if [ -r "${INAME}-values.yaml" ]; then
CHART_ARGS="-f ${INAME}-values.yaml"
fi
local WAIT_START CMD
WAIT_START="$(date +%s)"
COUNT="$((300 / 15))"
bspin "$INDENT" "$MESSAGE"
if [ "$HELM_USE_UPGRADE" == "yes" ]; then
_HELM_COMMAND="upgrade --install"
_HELM_NAME_ARG="$INAME"
else
_HELM_COMMAND="install"
if [ "$HELM_MAJOR" -le 2 ]; then
_HELM_NAME_ARG="--name $INAME"
_HELM_ARGS=
else
_HELM_NAME_ARG="$INAME"
_HELM_ARGS="--create-namespace"
fi
fi
VALUES_FILE="$(mktemp)"
if [ "$FILTER" == "-" ]; then
cp "values.yaml" "$VALUES_FILE"
elif [ "${FILTER:0:1}" == "+" ]; then
cp "values.yaml" "$VALUES_FILE"
yq r - "${FILTER:1}" <"values.yaml" >>"$VALUES_FILE"
cat "$VALUES_FILE" >>"$LOG"
else
yq r - "$FILTER" <"values.yaml" >"$VALUES_FILE"
fi
if [ "$WITH_PPROF" == "yes" ]; then
PPROF_ARG="--set profiler.enabled=true"
fi
CMD=("helm $_HELM_COMMAND -f $VALUES_FILE $_HELM_ARGS $CHART_ARGS $PPROF_ARG $INTERNAL_EXTRA_HELM_INSTALL_ARGS $EXTRA_HELM_INSTALL_ARGS --set defaults.log_level=$VOLTHA_LOG_LEVEL --namespace $NAMESPACE $CHART_VERSION $EXTRA_HELM_FLAGS $_HELM_NAME_ARG $CHART")
(set -x; ${CMD[*]} >>"$LOG" 2>&1) >>"$LOG" 2>&1
SUCCESS=$?
while [ "$SUCCESS" -ne 0 ]; do
NOW="$(date +%s)"
if [ "$((NOW - WAIT_START))" -gt "$TIMEOUT_SECONDS" ]; then
espin "$THEX"
rm -f "$VALUES_FILE"
doTimeout "waiting for helm install $CHART"
fi
sspin "$INDENT"
COUNT="$((COUNT - 1))"
if [ "$COUNT" -eq 0 ]; then
if [ "$HELM_USE_UPGRADE" == "no" ]; then
if [ "$HELM_MAJOR" -le 2 ]; then
(set -x; helm delete --purge "$INAME" >>"$LOG" 2>&1) >>"$LOG" 2>&1
else
(set -x; helm uninstall "$INAME" >>"$LOG" 2>&1) >>"$LOG" 2>&1
fi
fi
(set -x; ${CMD[*]} >>"$LOG" 2>&1) >>"$LOG" 2>&1
SUCCESS=$?
COUNT="$((300 / 15))"
fi
sleep .15
done
rm -f "$VALUES_FILE"
espin "$INDENT" "$VERIFIED"
}
bspin "Installing VOLTHA: $NAME $GEAR"
espin "$VERIFIED"
echo "INSTALL NAME: $NAME" >> "$LOG"
STIME="$(date +%s)"
if [ "$INSTALL_KUBECTL" == "no" ]; then
bspin "Skip kubectl install"
espin $NO_ENTRY
else
bspin "Verify kubectl $HELM"
if [ -x "$GOPATH/bin/kubectl" ]; then
espin "$VERIFIED"
else
ERR_OUT="$(mktemp)"
espin "$NOT_VERIFIED"
bspin - "Download and install Kubernetes/kubectl $DOWNLOAD"
(set -x; curl --fail -o "$GOPATH/bin/kubectl" -sSL "https://storage.googleapis.com/kubernetes-release/release/$(curl --fail -sSL https://storage.googleapis.com/kubernetes-release/release/stable.txt 2>>"$ERR_OUT")/bin/$HOSTOS/$HOSTARCH/kubectl" >>"$LOG" 2>>"$ERR_OUT") >>"$LOG" 2>&1
RESULT=$?
if [ "$RESULT" -ne 0 ]; then
espin - "$THEX"
>&2 echo -e "${RED}${BOLD}${ERROR}ERROR: unable to download kubectl: $(tail "$ERR_OUT")${NORMAL}"
cat "$ERR_OUT" >> "$LOG"
rm -rf "$ERR_OUT" "$GOPATH/bin/kubectl"
exit 1
fi
(set -x; chmod 755 "$GOPATH/bin/kubectl" >>"$LOG" 2>&1) >>"$LOG" 2>&1
espin - "$VERIFIED"
rm -rf "$ERR_OUT"
fi
fi
if [ "$WITH_TIMINGS" == "yes" ]; then
NOW="$(date +%s)"
printtime $((NOW - STIME))
fi
STIME="$(date +%s)"
if [ "$DEPLOY_K8S" == "no" ]; then
bspin "Skip Kubernetes/Kind Deployment"
espin $NO_ENTRY
else
bspin "Verify Kubernetes/Kind $HELM"
IS_INSTALLED=0
OP_TYPE="install"
if [ -x "$GOPATH/bin/kind" ]; then
OP_TYPE="upgrade"
if [ "$("$GOPATH/bin/kind" version | grep -c "$KIND_VERSION")" -eq 1 ]; then
IS_INSTALLED=1
espin "$VERIFIED"
fi
fi
if [ "$IS_INSTALLED" -eq 0 ]; then
ERR_OUT="$(mktemp)"
espin "$NOT_VERIFIED"
bspin - "Download and $OP_TYPE Kubernetes/kind $DOWNLOAD"
(set -x; curl -o "$GOPATH/bin/kind" --fail -sSL "https://github.com/kubernetes-sigs/kind/releases/download/$KIND_VERSION/kind-$HOSTOS-$HOSTARCH" >>"$LOG" 2>"$ERR_OUT") >>"$LOG" 2>&1
RESULT=$?
if [ $RESULT -ne 0 ]; then
espin - "$THEX"
>&2 echo -e "${RED}${BOLD}${ERROR}ERROR: unable to download kind: $(tail "$ERR_OUT")${NORMAL}"
cat "$ERR_OUT" >> "$LOG"
rm -rf "$ERR_OUT" "$GOPATH/bin/kind"
exit 1
fi
(set -x; chmod 755 "$GOPATH/bin/kind" >>"$LOG" 2>&1) >>"$LOG" 2>&1
rm -rf "$ERR_OUT"
espin - "$VERIFIED"
fi
fi
if [ "$WITH_TIMINGS" == "yes" ]; then
NOW="$(date +%s)"
printtime $((NOW - STIME))
fi
STIME="$(date +%s)"
if [ "$INSTALL_HELM" == "no" ]; then
bspin "Skip Helm Install"
espin $NO_ENTRY
else
bspin "Verify Helm $HELM"
HELM_UPGRADE=0
HELM_UPGRADE_DESC="install"
if [ -x "$GOPATH/bin/helm" ]; then
HAVE_VER=$(helm version --client --short | sed -E -e 's/^.*(v[0-9]+\.[0-9]+\.[0-9]+).*$/\1/')
HELM_UP_DOWN="$(echo -e "$HAVE_VER\n$HELM_VERSION" | sort -V | head -1)"
if [ "$HAVE_VER" != "$HELM_VERSION" ]; then
if [ "$HELM_UP_DOWN" == "$HELM_VERSION" ]; then
HELM_UPGRADE_DESC="downgrade"
else
HELM_UPGRADE_DESC="upgrade"
fi
HELM_UPGRADE=1
else
espin "$VERIFIED"
fi
else
HELM_UPGRADE=1
fi
if [ "$HELM_UPGRADE" -ne 0 ]; then
ERR_OUT="$(mktemp)"
INSTALL_HELM_BIN=$(mktemp)
espin "$NOT_VERIFIED"
bspin - "Download and $HELM_UPGRADE_DESC Helm $DOWNLOAD"
(set -x; curl -o "$INSTALL_HELM_BIN" --fail -sSL https://git.io/get_helm.sh >>"$LOG" 2>"$ERR_OUT") >>"$LOG" 2>&1
RESULT=$?
if [ $RESULT -ne 0 ]; then
espin - "$THEX"
>&2 echo -e "${RED}${BOLD}${ERROR}ERROR: unable to download helm installer: $(tail "$ERR_OUT")${NORMAL}"
cat "$ERR_OUT" >> "$LOG"
rm -rf "$INSTALL_HELM_BIN" "$ERR_OUT" "$GOPATH/bin/helm" "$GOPATH/bin/tiller"
exit 1
fi
rm -rf "$ERR_OUT"
chmod +x "$INSTALL_HELM_BIN"
(set -x; PATH="$GOPATH/bin:$PATH" DESIRED_VERSION=$HELM_VERSION USE_SUDO=false HELM_INSTALL_DIR=$GOPATH/bin "$INSTALL_HELM_BIN" >>"$LOG" 2>&1) >>"$LOG" 2>&1
RESULT=$?
if [ $RESULT -ne 0 ]; then
espin - "$THEX"
>&2 echo -e "${RED}${BOLD}${ERROR}ERROR: unable to install helm, see install log for details${NORMAL}"
rm -rf "$INSTALL_HELM_BIN" "$ERR_OUT" "$GOPATH/bin/helm" "$GOPATH/bin/tiller"
exit 1
fi
rm -rf "$INSTALL_HELM_BIN" "$ERR_OUT"
espin - "$VERIFIED"
fi
fi
if [ "$WITH_TIMINGS" == "yes" ]; then
NOW="$(date +%s)"
printtime $((NOW - STIME))
fi
HELM_MAJOR=$(helm version --client --short 2>/dev/null| sed -E -e 's/^.*v([0-9]+)\.[0-9]+\.[0-9]+.*$/\1/')
STIME="$(date +%s)"
bspin "Verify voltctl $HIGH_VOLTAGE"
VOK=0
VMESSAGE="install"
ERR_OUT=$(mktemp)
export VC_VERSION="$VOLTCTL_VERSION"
if [ "$VC_VERSION" == "latest" ]; then
set -o pipefail # TODO: would be nice to run all in pipefail mode
VC_VERSION="$(curl --fail -sSL https://api.github.com/repos/opencord/voltctl/releases/latest 2>"$ERR_OUT" | jq -r .tag_name | sed -e 's/^v//g')"
RESULT=$?
set +o pipefail
if [ "$RESULT" -ne 0 ]; then
espin - "$THEX"
>&2 echo -e "${RED}${BOLD}${ERROR}ERROR: unable to determine released version of voltctl: $(cat "$ERR_OUT")${NORMAL}"
rm -rf "$ERR_OUT" "$GOPATH/bin/voltctl"
exit 1
fi
export VC_VERSION
fi
if [ -x "$GOPATH/bin/voltctl" ]; then
VHAVE="$("$GOPATH/bin/voltctl" version --clientonly -o json 2>/dev/null | jq -r .version)"
RESULT=$?
if [ $RESULT -eq 0 ] && [ "$VHAVE" == "$VC_VERSION" ]; then
VOK=1
espin "$VERIFIED"
else
VCHECK="$(echo -e "$VHAVE\n$VC_VERSION" | sort -V | head -1)"
if [ "$VCHECK" == "$VHAVE" ]; then
VMESSAGE="upgrade"
else
VMESSAGE="downgrade"
fi
fi
fi
if [ "$VOK" -eq 0 ]; then
espin "$NOT_VERIFIED"
bspin - "Download and $VMESSAGE voltctl $DOWNLOAD"
(set -x; curl --fail -o "$GOPATH/bin/voltctl" -sSL "https://github.com/opencord/voltctl/releases/download/v$VC_VERSION/voltctl-$VC_VERSION-$HOSTOS-$HOSTARCH" >>"$LOG" 2>"$ERR_OUT") >>"$LOG" 2>&1
RESULT=$?
if [ $RESULT -ne 0 ]; then
espin - "$THEX"
>&2 echo -e "${RED}${BOLD}${ERROR}ERROR: unable to download voltctl (version $VC_VERSION): $(cat "$ERR_OUT")${NORMAL}"
rm -rf "$ERR_OUT" "$GOPATH/bin/voltctl"
exit 1
fi
(set -x; chmod 755 "$GOPATH/bin/voltctl" >>"$LOG" 2>&1) >>"$LOG" 2>&1
espin - "$VERIFIED"
fi
rm -rf "$ERR_OUT"
if [ "$WITH_TIMINGS" == "yes" ]; then
NOW="$(date +%s)"
printtime $((NOW - STIME))
fi
bspin "Verify command PATH"
espin "$VERIFIED"
STIME="$(date +%s)"
if [ "$DEPLOY_K8S" == "yes" ]; then
HAVE="$(kind get clusters 2>/dev/null | grep -c "voltha-$NAME")"
bspin "Verify Kubernetes/Kind Cluster"
sspin
if [ "$HAVE" -eq 0 ]; then
espin "$NOT_VERIFIED"
FILE="$NAME-cluster.cfg"
if [ -n "$KIND_CFG_FILE" ]; then
FILE="$KIND_CFG_FILE"
else
bspin - "Generating cluster configuration"
if [ -f "$FILE" ] ; then
rm "$FILE"
fi
touch "$FILE"
yq w -i "$FILE" kind Cluster
yq w -i "$FILE" apiVersion "kind.sigs.k8s.io/v1alpha3"
if [ ! "$NUM_OF_CONTROLLER_NODES" -eq 0 ]; then
for instance in $(seq 1 "$NUM_OF_CONTROLLER_NODES"); do
yq w -i "$FILE" "nodes[+].role" "control-plane"
done
fi
if [ ! "$NUM_OF_WORKER_NODES" -eq 0 ]; then
for instance in $(seq 1 "$NUM_OF_WORKER_NODES"); do
yq w -i "$FILE" "nodes[+].role" worker
done
fi
espin - "$VERIFIED"
fi
cat "$FILE" >> "$LOG" 2>&1
kind create cluster --name "voltha-$NAME" --config "$FILE"
else
espin "$VERIFIED"
fi
KUBECONFIG_TMP=$(mktemp)
kind get kubeconfig --name="voltha-$NAME" >"$KUBECONFIG_TMP"
mkdir -p "$HOME/.kube"
KUBECONFIG="$HOME/.kube/kind-config-voltha-$NAME"; export KUBECONFIG
cp "$KUBECONFIG_TMP" "$KUBECONFIG"
rm -rf "$KUBECONFIG_TMP"
P="coredns-.* \
etcd-voltha-$NAME-control-plane \
kindnet-.* \
kube-apiserver-voltha-$NAME-control-plane \
kube-controller-manager-voltha-$NAME-control-plane \
kube-proxy-.* \
kube-scheduler-voltha-$NAME-control-plane"
EXPECT=2 # Always 2 DNS instances
EXPECT=$((EXPECT + 4 * NUM_OF_CONTROLLER_NODES)) # etcd, apiserver, controller manager, scheduler
EXPECT=$((EXPECT + 2 * (NUM_OF_CONTROLLER_NODES + NUM_OF_WORKER_NODES))) # kindnet, proxy
wait_for_pods - "kube-system" "$EXPECT" "includes" "Waiting for system PODs to start" "$NO_LABEL" "$P"
fi
if [ "$WITH_TIMINGS" == "yes" ]; then
NOW="$(date +%s)"
printtime $((NOW - STIME))
fi
if [ "$SCHEDULE_ON_CONTROL_NODES" == "yes" ]; then
bspin "Untaint control nodes to allow scheduling $LOCK"
for MNODE in $(kubectl get node --selector='node-role.kubernetes.io/master' -o json | jq -r '.items[].metadata.name'); do
(set -x; kubectl taint node "$MNODE" node-role.kubernetes.io/master:NoSchedule- >>"$LOG" 2>&1) >>"$LOG" 2>&1
sspin
done
espin "$VERIFIED"
fi
if [ "$HELM_MAJOR" -le 2 ]; then
STIME="$(date +%s)"
COUNT="$(count_pods "kube-system" ":Running:" "$NO_LABEL" "tiller-deploy-.*")"
bspin "Verify Helm"
if [ "$COUNT" -ne 1 ]; then
espin "$NOT_VERIFIED"
echo -e "Configuring Helm $GEAR"
if [ "$INSTALL_HELM" == "no" ]; then
bspin - "Skip Helm Initialization"
espin - $NO_ENTRY
else
bspin - "Initialize Helm"
(set -x; helm init --upgrade >>"$LOG" 2>&1) >>"$LOG" 2>&1
espin - "$VERIFIED"
wait_for_pods - "kube-system" 1 "includes" "Waiting for Tiller POD to start" "$NO_LABEL" "tiller-deploy-.*"
fi
# HACK (sort-of) - the config for tiller is about to be patched, which will
# cause the tiller pod to be recreated. This can sometimes cause a timing
# issue with the "wait_for_pods" call on tiller as it may incorrectly
# identify the running/ready tiller pod that is soon to be terminated as
# what it is waiting for. To avoid this issue we do a clean scale down and
# scale up of the pod so the script controlls when it should be expecting
# things
(set -x; kubectl -n kube-system scale deploy tiller-deploy --replicas=0 >>"$LOG" 2>&1) >>"$LOG" 2>&1
wait_for_pods - "kube-system" 0 "not" "Waiting for Tiller POD to shutdown" "$NO_LABEL" "tiller-deploy-.*"
# Create and k8s service account so that Helm can create pods
bspin - "Create Tiller ServiceAccount"
(set -x; kubectl create serviceaccount --namespace kube-system tiller >>"$LOG" 2>&1) >>"$LOG" 2>&1
espin - "$VERIFIED"
bspin - "Create Tiller ClusterRoleBinding"
(set -x; kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller >>"$LOG" 2>&1) >>"$LOG" 2>&1
espin - "$VERIFIED"
bspin - "Update Tiller Manifest"
(set -x; kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}' >>"$LOG" 2>&1) >>"$LOG" 2>&1
# HACK (sort-of) - part to, spin it back up
(set -x; kubectl -n kube-system scale deploy tiller-deploy --replicas=1 >>"$LOG" 2>&1) >>"$LOG" 2>&1
espin - "$VERIFIED"
else
if [ "$(helm version -s --template '{{.Server.SemVer}}')" != "$HELM_VERSION" ]; then
espin "$NOT_VERIFIED"
bspin - "Sync Tiller server version with helm client"
(set -x; helm init --upgrade --force-upgrade --wait >>"$LOG" 2>&1) >>"$LOG" 2>&1
espin - "$VERIFIED"
else
espin "$VERIFIED"
fi
fi
wait_for_pods - "kube-system" 1 "includes" "Waiting for Tiller POD to start" "$NO_LABEL" "tiller-deploy-.*"
fi
# The `etcd` repo is temporary until `http://github.com/helm/charts/pull/22955` is
# merged into the incubator charts
REPOS="onf|https://charts.opencord.org|ONF_VOLTHA \
stable|https://kubernetes-charts.storage.googleapis.com|Google_Stable \
incubator|https://kubernetes-charts-incubator.storage.googleapis.com|Google_Incubator \
onos|https://charts.onosproject.org|ONF_ONOS \
atomix|https://charts.atomix.io|ONF_Atomix \
bbsim-sadis|https://ciena.github.io/bbsim-sadis-server/charts|Custom_BBSIM_SADIS_Server \
elastic|https://helm.elastic.co|Elastic \
kiwigrid|https://kiwigrid.github.io|Fluentd-ElasticSearch\
bitnami|https://charts.bitnami.com/bitnami|Bitnami"
REPO_UPDATE_REQ="no"
bspin - "Verify Helm Repository"
for REPO in $REPOS; do
REPO_NAME="$(echo "$REPO" | cut -d\| -f1)"
REPO_URL="$(echo "$REPO" | cut -d\| -f2)"
REPO_LABEL="$(echo "$REPO" | cut -d\| -f3 | sed -e 's/_/ /g')"
sspin - "Verify Helm Repository: $REPO_LABEL$CEOL"
if [ "$(helm repo list 2>/dev/null | grep -c "$REPO_NAME" 2>/dev/null)" -eq 0 ]; then
sspin - "Add Helm Repository: $REPO_LABEL$CEOL"
(set -x; helm repo add "$REPO_NAME" "$REPO_URL" >>"$LOG" 2>&1) >>"$LOG" 2>&1
REPO_UPDATE_REQ="yes"
fi
done
sspin - "Verify Helm Repositories$CEOL"
espin - "$VERIFIED"
if [ "$REPO_UPDATE_REQ" == "yes" ] || [ "$UPDATE_HELM_REPOS" == "yes" ]; then
bspin - "Update Helm repository cache"
(set -x; helm repo update >>"$LOG" 2>&1) >>"$LOG" 2>&1
espin - "$VERIFIED"
fi
if [ "$WITH_TIMINGS" == "yes" ]; then
NOW="$(date +%s)"
printtime $((NOW - STIME))
fi
# Resolve chart versions and log them for debug/support
RESOLVED_VOLTHA_CHART_VERSION=$(resolve_chart_version "$VOLTHA_CHART" "$VOLTHA_CHART_VERSION")
RESOLVED_VOLTHA_BBSIM_CHART_VERSION=$(resolve_chart_version "$VOLTHA_BBSIM_CHART" "$VOLTHA_BBSIM_CHART_VERSION")
RESOLVED_VOLTHA_TRACING_CHART_VERSION=$(resolve_chart_version "$VOLTHA_TRACING_CHART" "$VOLTHA_TRACING_CHART_VERSION")
RESOLVED_VOLTHA_ADAPTER_SIM_CHART_VERSION=$(resolve_chart_version "$VOLTHA_ADAPTER_SIM_CHART" "$VOLTHA_ADAPTER_SIM_CHART_VERSION")
RESOLVED_VOLTHA_ADAPTER_OPEN_OLT_CHART_VERSION=$(resolve_chart_version "$VOLTHA_ADAPTER_OPEN_OLT_CHART" "$VOLTHA_ADAPTER_OPEN_OLT_CHART_VERSION")
RESOLVED_VOLTHA_ADAPTER_OPEN_ONU_CHART_VERSION=$(resolve_chart_version "$VOLTHA_ADAPTER_OPEN_ONU_CHART" "$VOLTHA_ADAPTER_OPEN_ONU_CHART_VERSION")
RESOLVED_ONOS_CHART_VERSION=$(resolve_chart_version "$ONOS_CHART" "$ONOS_CHART_VERSION")
RESOLVED_ONOS_CLASSIC_CHART_VERSION=$(resolve_chart_version "$ONOS_CLASSIC_CHART" "$ONOS_CLASSIC_CHART_VERSION")
RESOLVED_KAFKA_CHART_VERSION=$(resolve_chart_version "$KAFKA_CHART" "$KAFKA_CHART_VERSION")
RESOLVED_ELASTICSEARCH_CHART_VERSION=$(resolve_chart_version "$ELASTICSEARCH_CHART" "$ELASTICSEARCH_CHART_VERSION")
RESOLVED_KIBANA_CHART_VERSION=$(resolve_chart_version "$KIBANA_CHART" "$KIBANA_CHART_VERSION")
RESOLVED_FLUENTD_ELASTICSEARCH_CHART_VERSION=$(resolve_chart_version "$FLUENTD_ELASTICSEARCH_CHART" "$FLUENTD_ELASTICSEARCH_CHART_VERSION")
RESOLVED_BBSIM_SADIS_SERVER_CHART_VERSION=$(resolve_chart_version "$BBSIM_SADIS_SERVER_CHART" "$BBSIM_SADIS_SERVER_CHART_VERSION")
RESOLVED_RADIUS_CHART_VERSION=$(resolve_chart_version "$RADIUS_CHART" "$RADIUS_CHART_VERSION")
cat <<EOV >>"$LOG"
Resolved helm charts and versions:
$VOLTHA_CHART:$RESOLVED_VOLTHA_CHART_VERSION
$VOLTHA_BBSIM_CHART:$RESOLVED_VOLTHA_BBSIM_CHART_VERSION
$VOLTHA_TRACING_CHART:$RESOLVED_VOLTHA_TRACING_CHART_VERSION
$VOLTHA_ADAPTER_SIM_CHART:$RESOLVED_VOLTHA_ADAPTER_SIM_CHART_VERSION
$VOLTHA_ADAPTER_OPEN_OLT_CHART:$RESOLVED_VOLTHA_ADAPTER_OPEN_OLT_CHART_VERSION
$VOLTHA_ADAPTER_OPEN_ONU_CHART:$RESOLVED_VOLTHA_ADAPTER_OPEN_ONU_CHART_VERSION
$ONOS_CHART:$RESOLVED_ONOS_CHART_VERSION
$ONOS_CLASSIC_CHART:$RESOLVED_ONOS_CLASSIC_CHART_VERSION
$KAFKA_CHART:$RESOLVED_KAFKA_CHART_VERSION
$ELASTICSEARCH_CHART:$RESOLVED_ELASTICSEARCH_CHART_VERSION
$KIBANA_CHART:$RESOLVED_KIBANA_CHART_VERSION
$FLUENTD_ELASTICSEARCH_CHART:$RESOLVED_FLUENTD_ELASTICSEARCH_CHART_VERSION
$BBSIM_SADIS_SERVER_CHART:$RESOLVED_BBSIM_SADIS_SERVER_CHART_VERSION
$RADIUS_CHART:$RESOLVED_RADIUS_CHART_VERSION
EOV
STIME="$(date +%s)"
bspin "Verify Helm values file: values.yaml"
if [ ! -r "./values.yaml" ]; then
espin "$NOT_VERIFIED"
bspin - "Download Helm values file: values.yaml to values.yaml $DOWNLOAD"
ERR_OUT="$(mktemp)"
if ! (set -x; curl --fail -o "./values.yaml" -sSL "https://raw.githubusercontent.com/opencord/kind-voltha/$VK_RELEASE/values.yaml" >>"$LOG" 2>"$ERR_OUT") >>"$LOG" 2>&1; then
espin - "$THEX"
echo -e "${RED}${BOLD}${ERROR}ERROR: values.yaml${NORMAL}${RED} - $(cat "$ERR_OUT")${NORMAL}"
echo "ERROR: $(cat "$ERR_OUT")" >>"$LOG"
rm -rf "$ERR_OUT" "./values.yaml"
exit 1
fi
rm -rf "$ERR_OUT"
espin - "$VERIFIED"
else
espin "$VERIFIED"
fi
if [ "$WITH_TIMINGS" == "yes" ]; then
NOW="$(date +%s)"
printtime $((NOW - STIME))
fi
STIME="$(date +%s)"
bspin "Verify or download chart specific values files $DOWNLOAD"
VALUES_FILES="monkey-values.yaml"
ERR_OUT="$(mktemp)"
for i in $VALUES_FILES; do
if [ ! -r ./$i ]; then
if ! (set -x; curl --fail -o "./$i" -sSL "https://raw.githubusercontent.com/opencord/kind-voltha/$VK_RELEASE/$i" >>"$LOG" 2>"$ERR_OUT") >>"$LOG" 2>&1; then
espin "$THEX"
echo -e "${RED}${BOLD}${ERROR}ERROR: $i${NORMAL}${RED} - $(cat "$ERR_OUT")${NORMAL}"
echo "ERROR: $i - $(cat "$ERR_OUT")" >>"$LOG"
rm -rf "$ERR_OUT" "./$i"
exit 1
fi
rm -rf "$ERR_OUT"
fi
done
rm -rf "$ERR_OUT"
espin "$VERIFIED"
if [ "$WITH_TIMINGS" == "yes" ]; then
NOW="$(date +%s)"
printtime $((NOW - STIME))
fi
if [ "$WITH_CHAOS" == "yes" ]; then
bspin "Verify or clone kube-monkey helm chart $DOWNLOAD"
if [ -r ./kube-monkey ]; then
espin "$VERIFIED"
else
espin "$NOT_VERIFIED"
bspin - "GIT clone kube-monkey"
(set -x; git clone https://github.com/asobti/kube-monkey kube-monkey >>"$LOG" 2>&1) >>"$LOG" 2>&1
espin - "$VERIFIED"
fi
fi
if [ "$JUST_K8S" == "yes" ]; then
echo "Environment deployed, not deploying VOLTHA artifacts as requested. Good bye." | tee -a "$LOG"
echo ""
echo "Please issue the following commands in your terminal to ensure that you" | tee -a "$LOG"
echo "are accessing the correct Kubernetes/Kind cluster as well as have the " | tee -a "$LOG"
echo "tools required by VOLTHA in your command path. " | tee -a "$LOG"
echo "" | tee -a "$LOG"
echo -en "$BOLD"
if [ "$DEPLOY_K8S" == "yes" ]; then
KUBECONFIG_TMP=$(mktemp)
kind get kubeconfig --name="voltha-$NAME" >"$KUBECONFIG_TMP"
mkdir -p "$HOME/.kube"
KUBECONFIG="$HOME/.kube/kind-config-voltha-$NAME"; export KUBECONFIG
cp "$KUBECONFIG_TMP" "$KUBECONFIG"
rm -rf "$KUBECONFIG_TMP"
echo "export KUBECONFIG=\"$KUBECONFIG\"" | tee -a "$LOG"
fi
echo "export PATH=$GOPATH/bin:\$PATH" | tee -a "$LOG"
echo -en "$NORMAL"
echo "" | tee -a "$LOG"
echo "Thank you for choosing kind-voltha for you quick cluster needs." | tee -a "$LOG"
exit 0
fi
if is_in "$WITH_ETCD" "yes,external"; then
STIME="$(date +%s)"
bspin "Verify external ETCD cluster $OLD_KEY"
EXPECT=$NUM_OF_ETCD
if [ "$HELM_USE_UPGRADE" == "yes" ] || [ "$(helm_is_deployed "$INFRA_NS" "^etcd\$")" -ne 1 ]; then
espin "$NOT_VERIFIED"
INTERNAL_EXTRA_HELM_INSTALL_ARGS="--set auth.rbac.enabled=false,persistence.enabled=false,statefulset.replicaCount=$NUM_OF_ETCD" helm_install - "$INFRA_NS" etcd "$ETCD_CHART" "$ETCD_CHART_VERSION" etcd "$_HELM_DESC external ETCD cluster"
INTERNAL_EXTRA_HELM_INSTALL_ARGS=
else
espin "$VERIFIED"
fi
wait_for_pods - "$INFRA_NS" "$EXPECT" "includes" "Waiting for ETCD cluster to start" "$NO_LABEL" "etcd-.*"
if [ "$WITH_TIMINGS" == "yes" ]; then
NOW="$(date +%s)"
printtime $((NOW - STIME))
fi
fi
if [ "$WITH_ETCD" == "yes" ] && [ "$WITH_PORT_FORWARDS" == "yes" ]; then
bspin - "Forward VOLTHA ETCD port $FORWARD"
kill_port_forward "$INFRA_NS" "etcd"
port_forward "$INFRA_NS" "etcd" "$VOLTHA_ETCD_PORT:2379"
espin - "$VERIFIED"
fi
if is_in "$WITH_KAFKA" "yes,external"; then
_TMP="$(mktemp -u)"
cat << EOC > "$_TMP"
configurationOverrides:
"default.replication.factor": $NUM_OF_KAFKA
"offsets.topic.replication.factor": $NUM_OF_KAFKA
"log.retention.hours": 4
"log.message.timestamp.type": "LogAppendTime"
persistence:
enabled: false
zookeeper:
replicaCount: $NUM_OF_KAFKA
persistence:
enabled: false
replicas: $NUM_OF_KAFKA
EOC
STIME="$(date +%s)"
bspin "Verify external Kafka cluster $OLD_KEY"
EXPECT=$((NUM_OF_KAFKA*2))
if [ "$HELM_USE_UPGRADE" == "yes" ] || [ "$(helm_is_deployed "$INFRA_NS" "^kafka\$")" -ne 1 ]; then
espin "$NOT_VERIFIED"
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" -f $_TMP"
helm_install - "$INFRA_NS" kafka "$KAFKA_CHART" "$KAFKA_CHART_VERSION" kafka "$_HELM_DESC external Kafka cluster"
else
espin "$VERIFIED"
fi
wait_for_pods - "$INFRA_NS" "$EXPECT" "includes" "Waiting for Kafka cluster to start" "$NO_LABEL" "kafka-.*"
if [ "$WITH_TIMINGS" == "yes" ]; then
NOW="$(date +%s)"
printtime $((NOW - STIME))
fi
rm -rf "$_TMP"
fi
if [ "$WITH_KAFKA" == "yes" ] && [ "$WITH_PORT_FORWARDS" == "yes" ]; then
bspin - "Forward VOLTHA Kafka port $FORWARD"
kill_port_forward "$INFRA_NS" "kafka"
port_forward "$INFRA_NS" kafka "$VOLTHA_KAFKA_PORT:9092"
espin - "$VERIFIED"
fi
STIME="$(date +%s)"
if is_in "$WITH_ONOS" "yes,legacy,classic"; then
if is_in "$WITH_ONOS" "legacy"; then
bspin "Verify ONOS LEGACY installed $BIRD"
if [ "$HELM_USE_UPGRADE" == "yes" ] || [ "$(helm_is_deployed "$INFRA_NS" "^onos\$")" -ne 1 ]; then
espin "$NOT_VERIFIED"
INTERNAL_EXTRA_HELM_INSTALL_ARGS="$SET_TAG $EXTRA_HELM_FLAGS" helm_install - "$INFRA_NS" onos "$ONOS_CHART" "$ONOS_CHART_VERSION" "+onos" "$_HELM_DESC ONOS LEGACY"
INTERNAL_EXTRA_HELM_INSTALL_ARGS=
else
espin "$VERIFIED"
fi
wait_for_pods - "$INFRA_NS" 1 "includes" "Waiting for ONOS to start" "$NO_LABEL" "onos-.*"
if [ "$WITH_PORT_FORWARDS" == "yes" ]; then
bspin - "Forward ONOS API port $FORWARD"
kill_port_forward "$INFRA_NS" onos-ui
port_forward "$INFRA_NS" onos-ui "$ONOS_API_PORT:8181"
espin - "$VERIFIED"
bspin - "Forward ONOS SSH port $FORWARD"
kill_port_forward "$INFRA_NS" onos-ssh
port_forward "$INFRA_NS" onos-ssh "$ONOS_SSH_PORT:8101"
espin - "$VERIFIED"
_ONOS_API_EP="127.0.0.1:$ONOS_API_PORT"
else
_ONOS_API_EP="$(get_service_ep "$INFRA_NS" onos-ui)"
fi
elif is_in "$WITH_ONOS" "yes,classic"; then
bspin "Verify ONOS CLASSIC installed $BIRD"
# ONOS CLASSIC identifies its image repo/tag/pullPolicy differently, so yq is used to grab the values from the
# values value
YAML_VALUES=$(INTERNAL_EXTRA_HELM_INSTALL_ARGS="--set replicas=$NUM_OF_ONOS --set atomix.replicas=$NUM_OF_ATOMIX" \
helm_values "$INFRA_NS" onos "$ONOS_CLASSIC_CHART" "$ONOS_CLASSIC_CHART_VERSION")
if [ "$HELM_USE_UPGRADE" == "yes" ] || [ "$(helm_is_deployed "$INFRA_NS" "^onos\$")" -ne 1 ]; then
espin "$NOT_VERIFIED"
_ONOS_REPO="$(echo "$YAML_VALUES" | yq read - images.onos.repository)"
if [ -z "$_ONOS_REPO" ]; then
_ONOS_REPO="voltha/voltha-onos"
fi
_ONOS_TAG="$(echo "$YAML_VALUES" | yq read - images.onos.tag)"
if [ -z "$_ONOS_TAG" ]; then
_ONOS_TAG="$(echo "$YAML_VALUES" | yq read - defaults.image_tag)"
fi
if [ -z "$_ONOS_TAG" ]; then
_ONOS_TAG="master"
fi
_ONOS_PULL_POLICY="$(echo "$YAML_VALUES" | yq read - images.onos.pullPolicy)"
if [ -z "$_ONOS_PULL_POLICY" ]; then
_ONOS_PULL_POLICY="$(echo "$YAML_VALUES" | yq read - defaults.image_pullPolicy)"
fi
if [ -z "$_ONOS_PULL_POLICY" ]; then
_ONOS_PULL_POLICY="Always"
fi
INTERNAL_EXTRA_HELM_INSTALL_ARGS="--set image.pullPolicy=$_ONOS_PULL_POLICY,image.repository=$_ONOS_REPO,image.tag=$_ONOS_TAG,replicas=$NUM_OF_ONOS,atomix.replicas=$NUM_OF_ATOMIX" \
helm_install - "$INFRA_NS" onos "$ONOS_CLASSIC_CHART" "$ONOS_CLASSIC_CHART_VERSION" "+onos" "$_HELM_DESC ONOS CLASSIC"
INTERNAL_EXTRA_HELM_INSTALL_ARGS=
else
_ONOS_TAG="$(echo "$YAML_VALUES" | yq read - images.onos.tag)"
if [ -z "$_ONOS_TAG" ]; then
_ONOS_TAG="$(echo "$YAML_VALUES" | yq read - defaults.image_tag)"
fi
if [ -z "$_ONOS_TAG" ]; then
_ONOS_TAG="master"
fi
espin "$VERIFIED"
fi
wait_for_pods - "$INFRA_NS" $((NUM_OF_ONOS + NUM_OF_ATOMIX)) "includes" "Waiting for ONOS CLASSIC to start" "$NO_LABEL" "onos-.*"
elif [ "$WITH_ONOS" == "micro" ]; then
bspin "Verify micro-ONOS installed $BIRD"
# We should never get here
>&2 echo -e "${RED}${BOLD}${ERROR}ERROR:${NORMAL}${RED} Micro ONOS not currently supported${NORMAL}"
exit 1
fi
if is_in "$WITH_ONOS" "yes,classic" && [ "$WITH_PORT_FORWARDS" == "yes" ]; then
bspin - "Forward ONOS API port $FORWARD"
SVC=
kill_port_forward "$INFRA_NS" onos-onos-classic-hs
port_forward "$INFRA_NS" onos-onos-classic-hs "$ONOS_API_PORT:8181" "$ONOS_SSH_PORT:8101"
espin - "$VERIFIED"
_ONOS_API_EP="127.0.0.1:$ONOS_API_PORT"
else
_ONOS_API_EP="$(get_service_ep "$INFRA_NS" onos-onos-classic-hs)"
fi
bspin - "Verify or download ONOS configuration support files $DOWNLOAD"
ONOS_FILES="olt-onos-enableExtraneousRules.json onos-aaa.json \
onos-dhcpl2relay.json onos-sadis-sample.json"
(set -x; mkdir -p ./onos-files >>"$LOG" 2>&1) >>"$LOG" 2>&1
ERR_OUT="$(mktemp)"
for i in $ONOS_FILES; do
if [ ! -r "./onos-files/$i" ]; then
if ! (set -x; curl --fail -o "./onos-files/$i" -sSL "https://raw.githubusercontent.com/opencord/kind-voltha/$VK_RELEASE/onos-files/$i" >>"$LOG" 2>"$ERR_OUT") >>"$LOG" 2>&1; then
espin - "$THEX"
echo -e "${RED}${BOLD}${ERROR}ERROR: $i${NORMAL}${RED} - $(cat "$ERR_OUT")${NORMAL}"
echo "ERROR: $(cat "$ERR_OUT")" >>"$LOG"
rm -rf "$ERR_OUT" "./onos-files/$i"
exit 1
fi
rm -rf "$ERR_OUT"
fi
done
rm -rf "$ERR_OUT"
espin - "$VERIFIED"
if [ "$INSTALL_ONOS_APPS" == "yes" ]; then
bspin - "Installing custom ONOS applications"
if [ -x onos-files/onos-apps ] && [ "$(find onos-files/onos-apps -name "*.oar" 2>/dev/null | wc -l)" -gt 0 ]; then
for OAR in onos-files/onos-apps/*.oar; do
sspin - "Installing custom ONOS applications - $OAR$CEOL"
override_onos_app "$OAR"
done
espin - "$VERIFIED Installing custom ONOS applications$CEOL"
else
espin - "$NOT_VERIFIED Installing custom ONOS applications - None Found"
fi
fi
if [ "$WITH_KAFKA" != "no" ]; then
check_onos_app_active org.opencord.kafka
if is_in "$WITH_KAFKA" "yes,external"; then
_HOST=kafka.$INFRA_NS.svc
_PORT=9092
elif [ "$WITH_KAFKA" != "no" ]; then
_HOST="$(echo "$WITH_KAFKA" | cut -d: -f1)"
_PORT="$(echo "$WITH_KAFKA" | cut -s -d: -f2)"
_PORT=${_PORT:-9092}
fi
push_onos_config "json" "Push ONOS Kafka Configuration" "network/configuration/apps/org.opencord.kafka" \
"$(cat <<EOJ | tr -d '[:space:]'
{
"kafka": {
"bootstrapServers": "$_HOST:$_PORT"
}
}
EOJ
)"
fi
check_onos_app_active org.opencord.dhcpl2relay
push_onos_config "file" "Push ONOS DHCP L2 Relay Configuration" "network/configuration/apps/org.opencord.dhcpl2relay" "onos-files/onos-dhcpl2relay.json"
check_onos_app_active org.opencord.olt
# Default value for "enableEapol" is true in OLT App
if [ "$WITH_EAPOL" == "yes" ]; then
push_onos_config "json" "Enable VOLTHA ONOS EAPOL provisioning" "configuration/org.opencord.olt.impl.OltFlowService" '{"enableEapol":true}'
elif [ "$WITH_EAPOL" == "no" ]; then
push_onos_config "json" "Disable VOLTHA ONOS EAPOL provisioning" "configuration/org.opencord.olt.impl.OltFlowService" '{"enableEapol":false}'
fi
# Enable DHCP via component configs for dhcp programming in the OLT app. Note that sadis config per UNI port/service is also required for dhcp to work.
# Two component configs are "enableDhcpV4" which is true by default, and "enableDhcpV6" which is false by default. These don't need to change until we support v6.
if [[ $(semver_greater "$_ONOS_TAG" "4.1.4") == "true" ]]; then
# newer versions use "enableDhcpOnNni" which is false by default
if [ "$WITH_DHCP" == "yes" ]; then
push_onos_config "json" "Enable VOLTHA ONOS DHCP on NNI port(s)" "configuration/org.opencord.olt.impl.OltFlowService" '{"enableDhcpOnNni":true}'
elif [ "$WITH_DHCP" == "no" ]; then
push_onos_config "json" "Disable VOLTHA ONOS DHCP on NNI port(s)" "configuration/org.opencord.olt.impl.OltFlowService" '{"enableDhcpOnNni":false}'
fi
else
# older versions use "enableDhcpOnProvisioning" which is false by default
if [ "$WITH_DHCP" == "yes" ]; then
push_onos_config "json" "Enable VOLTHA ONOS DHCP on provisioning" "configuration/org.opencord.olt.impl.OltFlowService" '{"enableDhcpOnProvisioning":true}'
elif [ "$WITH_DHCP" == "no" ]; then
push_onos_config "json" "Disable VOLTHA ONOS DHCP on provisioning" "configuration/org.opencord.olt.impl.OltFlowService" '{"enableDhcpOnProvisioning":false}'
fi
fi
# Enable IGMP via component configs for IGMP programming in the OLT app. Note that sadis config on UNI port/service is also required for igmp to work.
# Default value for "enableIgmpOnNni" is false in OLT App
if [[ $(semver_greater "$_ONOS_TAG" "4.1.4") == "true" ]]; then
if [ "$WITH_IGMP" == "yes" ]; then
push_onos_config "json" "Enable VOLTHA ONOS IGMP on NNI port(s)" "configuration/org.opencord.olt.impl.OltFlowService" '{"enableIgmpOnNni":true}'
elif [ "$WITH_IGMP" == "no" ]; then
push_onos_config "json" "Disable VOLTHA ONOS IGMP on NNI port(s)" "configuration/org.opencord.olt.impl.OltFlowService" '{"enableIgmpOnNni":false}'
fi
else
if [ "$WITH_IGMP" == "yes" ]; then
push_onos_config "json" "Enable VOLTHA ONOS IGMP on provisioning" "configuration/org.opencord.olt.impl.OltFlowService" '{"enableIgmpOnProvisioning":true}'
elif [ "$WITH_IGMP" == "no" ]; then
push_onos_config "json" "Disable VOLTHA ONOS IGMP on provisioning" "configuration/org.opencord.olt.impl.OltFlowService" '{"enableIgmpOnProvisioning":false}'
fi
fi
if [ "$ENABLE_ONOS_EXTRANEOUS_RULES" == "yes" ]; then
push_onos_config "file" "Enabling extraneous rules for ONOS" "configuration/org.onosproject.net.flow.impl.FlowRuleManager" "onos-files/olt-onos-enableExtraneousRules.json"
fi
if is_in "$CONFIG_SADIS" "yes,file"; then
check_onos_app_active org.opencord.sadis
push_onos_config "file" "[optional] Push ONOS SADIS Configuration: $SADIS_CFG" "network/configuration/apps/org.opencord.sadis" "$SADIS_CFG"
elif [ "$CONFIG_SADIS" == "bbsim" ]; then
push_onos_config "json" \
"[optional] Push ONOS configuration for BBSIM SADIS servers" \
"network/configuration/apps/org.opencord.sadis" \
"$(cat <<EOJ | tr -d '[:space:]'
{
"sadis": {
"integration": {
"url": "http://$BBSIM_SADIS_SVC.$BBSIM_NS.svc:50074/v2/subscribers/%s",
"cache": {
"enabled": true,
"maxsize": 50,
"ttl": "PT1m"
}
}
},
"bandwidthprofile": {
"integration": {
"url": "http://$BBSIM_SADIS_SVC.$BBSIM_NS.svc:50074/v2/bandwidthprofiles/%s",
"cache": {
"enabled": true,
"maxsize": 50,
"ttl": "PT1m"
}
}
}
}
EOJ
)"
elif [ "$CONFIG_SADIS" == "url" ]; then
push_onos_config "json" \
"[optional] Push ONOS configuration for custom SADIS and Bandwidth Profile servers" \
"network/configuration/apps/org.opencord.sadis" \
"$(cat <<EOJ | tr -d '[:space:]'
{
"sadis": {
"integration": {
"url": "$SADIS_SUBSCRIBERS",
"cache": {
"enabled": true,
"maxsize": 50,
"ttl": "PT1m"
}
}
},
"bandwidthprofile": {
"integration": {
"url": "$SADIS_BANDWIDTH_PROFILES",
"cache": {
"enabled": true,
"maxsize": 50,
"ttl": "PT1m"
}
}
}
}
EOJ
)"
fi
fi
if [ "$WITH_TIMINGS" == "yes" ]; then
NOW="$(date +%s)"
printtime $((NOW - STIME))
fi
if [ "$WITH_RADIUS" == "yes" ]; then
STIME="$(date +%s)"
echo -e "Verify RADIUS $LOCK"
bspin - "Verify RADIUS Installed"
if [ "$HELM_USE_UPGRADE" == "yes" ] || [ "$(helm_is_deployed "$INFRA_NS" "^radius\$")" -ne 1 ]; then
espin - "$NOT_VERIFIED"
helm_install - "$INFRA_NS" radius "$RADIUS_CHART" "$RADIUS_CHART_VERSION" "+radius" "$_HELM_DESC RADIUS"
else
espin - "$VERIFIED"
fi
wait_for_pods - "$INFRA_NS" 1 "includes" "Waiting for RADIUS to start" "$NO_LABEL" "radius-.*"
if [ "$WITH_TIMINGS" == "yes" ]; then
NOW="$(date +%s)"
printtime $((NOW - STIME))
fi
fi
if is_in "$WITH_ONOS" "yes,legacy,classic" && [ "$WITH_RADIUS" != "no" ]; then
SVC_NAME=radius
if kubectl get -n "$INFRA_NS" "svc/radius-freeradius" >/dev/null 2>&1; then
SVC_NAME="radius-freeradius"
fi
_HOST="$SVC_NAME.$INFRA_NS.svc"
_PORT=1812
if [ "$WITH_RADIUS" != "yes" ]; then
_HOST="$(echo "$WITH_RADIUS" | cut -d: -f1)"
_PORT="$(echo "$WITH_RADIUS" | cut -s -d: -f2)"
_PORT=${_PORT:-1812}
fi
bspin "Configure ONOS RADIUS Connection $GEAR"
(set -x; sed -e "s/:RADIUS_SVC:/$_HOST/g" -e "s/:RADIUS_PORT:/$_PORT/" onos-files/onos-aaa.json | curl --fail -sSL --user karaf:karaf -X POST "http://$_ONOS_API_EP/onos/v1/network/configuration/apps/org.opencord.aaa" -H Content-type:application/json -d@- >>"$LOG" 2>&1) >>"$LOG" 2>&1
espin "$VERIFIED"
fi
if [ "$CONFIG_SADIS" == "external" ]; then
STIME="$(date +%s)"
echo -e "Verify BBSIM SADIS Service $PLUG"
bspin - "Verify required configmap"
(set -x; kubectl -n "$INFRA_NS" delete --ignore-not-found configmap kube-config >>"$LOG" 2>&1) >>"$LOG" 2>&1
(set -x; kubectl -n "$INFRA_NS" create configmap kube-config "--from-file=kube_config=$KUBECONFIG" >>"$LOG" 2>&1) >>"$LOG" 2>&1
espin - "$VERIFIED"
bspin - "Verify BBSIM SADIS Service Installed"
if [ "$HELM_USE_UPGRADE" == "yes" ] || [ "$(helm_is_deployed "$INFRA_NS" "^bbsim-sadis-server\$")" -ne 1 ]; then
espin - "$NOT_VERIFIED"
INTERNAL_EXTRA_HELM_INSTALL_ARGS="$EXTRA_HELM_FLAGS" helm_install - "$INFRA_NS" bbsim-sadis-server "$BBSIM_SADIS_SERVER_CHART" "$BBSIM_SADIS_SERVER_CHART_VERSION" "+sadis" "$_HELM_DESC BBSIM SADIS Server"
INTERNAL_EXTRA_HELM_INSTALL_ARGS=
else
espin - "$VERIFIED"
fi
wait_for_pods - "$INFRA_NS" 1 "includes" "Waiting for BBSIM SADIS Server to start" "$NO_LABEL" "bbsim-sadis-server-.*"
if is_in "$WITH_ONOS" "yes,classic,legacy"; then
if kubectl get -n "$INFRA_NS" svc/bbsim-sadis-server-bbsim-sadis-server >/dev/null 2>&1; then
BBSIM_SADIS_SVC="bbsim-sadis-server-bbsim-sadis-server"
else
BBSIM_SADIS_SVC="bbsim-sadis-server"
fi
push_onos_config "json" \
"[optional] Push ONOS configuration for custom SADIS and Bandwidth Profile servers" \
"network/configuration/apps/org.opencord.sadis" \
"$(cat <<EOJ | tr -d '[:space:]'
{
"sadis": {
"integration": {
"url": "http://$BBSIM_SADIS_SVC.$INFRA_NS.svc:58080/subscribers/%s",
"cache": {
"enabled": true,
"maxsize": 50,
"ttl": "PT1m"
}
}
},
"bandwidthprofile": {
"integration": {
"url": "http://$BBSIM_SADIS_SVC.$INFRA_NS.svc:58080/profiles/%s",
"cache": {
"enabled": true,
"maxsize": 50,
"ttl": "PT1m"
}
}
}
}
EOJ
)"
fi
if [ "$WITH_TIMINGS" == "yes" ]; then
NOW="$(date +%s)"
printtime $((NOW - STIME))
fi
fi
if [ "$WITH_EFK" == "yes" ]; then
STIME="$(date +%s)"
echo -e "Verify EFK $PLUG"
bspin - "Verify EFK Installed"
if [ "$HELM_USE_UPGRADE" == "yes" ] || [ "$(helm_is_deployed "$INFRA_NS" "^elasticsearch\$")" -ne 1 ] || [ "$(helm_is_deployed "$INFRA_NS" "^kibana\$")" -ne 1 ] || [ "$(helm_is_deployed "$INFRA_NS" "^fluentd\$")" -ne 1 ]; then
espin - "$NOT_VERIFIED"
if [ "$HELM_USE_UPGRADE" == "yes" ] || [ "$(helm_is_deployed "$INFRA_NS" "^elasticsearch\$")" -ne 1 ]; then
helm_install - "$INFRA_NS" elasticsearch "$ELASTICSEARCH_CHART" "$ELASTICSEARCH_CHART_VERSION" elasticsearch "$_HELM_DESC elasticsearch"
fi
if [ "$HELM_USE_UPGRADE" == "yes" ] || [ "$(helm_is_deployed "$INFRA_NS" "^kibana\$")" -ne 1 ]; then
helm_install - "$INFRA_NS" kibana "$KIBANA_CHART" "$KIBANA_CHART_VERSION" kibana "$_HELM_DESC kibana"
fi
if [ "$HELM_USE_UPGRADE" == "yes" ] || [ "$(helm_is_deployed "$INFRA_NS" "^fluentd\$")" -ne 1 ]; then
helm_install - "$INFRA_NS" fluentd "$FLUENTD_ELASTICSEARCH_CHART" "$FLUENTD_ELASTICSEARCH_CHART_VERSION" fluentd-elasticsearch "$_HELM_DESC fluentd-elasticsearch"
fi
else
espin - "$VERIFIED"
fi
EXPECT=2 # for elastic and kibana
if [ "$TYPE" == "minimal" ]; then
EXPECT=$((EXPECT + 2)) # for fluentd on worker 2 worker nodes
else
EXPECT=$((EXPECT + 3)) # for fluentd on worker 3 worker nodes
fi
if [ "$SCHEDULE_ON_CONTROL_NODES" == "yes" ]; then
EXPECT=$((EXPECT + 1)) # for fluentd on the control plan node
fi
wait_for_pods - "$INFRA_NS" "$EXPECT" "includes" "Waiting for EFK to start" "$NO_LABEL" "fluentd-* elasticsearch-* kibana-*"
if [ "$WITH_PORT_FORWARDS" == "yes" ]; then
bspin - "Forward EFK port $FORWARD"
kill_port_forward "$INFRA_NS" elasticsearch-master
kill_port_forward "$INFRA_NS" kibana-kibana
port_forward "$INFRA_NS" elasticsearch-master "$ELASTICSEARCH_PORT:9200"
port_forward "$INFRA_NS" kibana-kibana "$KIBANA_PORT:5601"
espin - "$VERIFIED"
fi
do_curl "" "POST" "" "" "http://localhost:$KIBANA_PORT/api/saved_objects/index-pattern/logst*" "json" '{"attributes":{"title":"logst*","timeFieldName":"@timestamp"}}' "Verify logging index in EFK" "409,200" "-H Content-type:application/json -H kbn-xsrf:true" "$GEAR"
if [ "$WITH_TIMINGS" == "yes" ]; then
NOW="$(date +%s)"
printtime $((NOW - STIME))
fi
fi
if [ "$WITH_TRACING" == "yes" ]; then
STIME="$(date +%s)"
echo -e "Verify Jaeger Tracing $PLUG"
bspin - "Verify Jaeger Tracing Installed"
if [ "$HELM_USE_UPGRADE" == "yes" ] || [ "$(helm_is_deployed "$INFRA_NS" "^tracing\$")" -ne 1 ]; then
espin - "$NOT_VERIFIED"
INTERNAL_EXTRA_HELM_INSTALL_ARGS=
helm_install - "$INFRA_NS" tracing "$VOLTHA_TRACING_CHART" "$VOLTHA_TRACING_CHART_VERSION" tracing "$_HELM_DESC Jaeger Tracing"
else
espin - "$VERIFIED"
fi
wait_for_pods - "$INFRA_NS" 1 "includes" "Waiting for Jaeger Tracing to start" "$NO_LABEL" "jaeger*"
if [ "$WITH_PORT_FORWARDS" == "yes" ]; then
bspin - "Forward VOLTHA Tracing GUI port $FORWARD"
VOLTHA_TRACING_CHART_NAME=$(resolve_chart_name "$VOLTHA_TRACING_CHART")
kill_port_forward "$INFRA_NS" "tracing-${VOLTHA_TRACING_CHART_NAME}-jaeger-gui"
port_forward "$INFRA_NS" "tracing-${VOLTHA_TRACING_CHART_NAME}-jaeger-gui" "$TRACING_GUI_PORT:16686"
espin - "$VERIFIED"
fi
if [ "$WITH_TIMINGS" == "yes" ]; then
NOW="$(date +%s)"
printtime $((NOW - STIME))
fi
fi
if [ "$JUST_INFRA" == "yes" ]; then
echo "Infrastructure deployed, not deploying VOLTHA artifacts as requested. Good bye." | tee -a "$LOG"
echo ""
echo "Please issue the following commands in your terminal to ensure that you" | tee -a "$LOG"
echo "are accessing the correct Kubernetes/Kind cluster as well as have the " | tee -a "$LOG"
echo "tools required by VOLTHA in your command path. " | tee -a "$LOG"
echo "" | tee -a "$LOG"
echo -en "$BOLD"
if [ "$DEPLOY_K8S" == "yes" ]; then
echo "export KUBECONFIG=\"$HOME/.kube/kind-config-voltha-$NAME\"" | tee -a "$LOG"
fi
echo "export VOLTCONFIG=\"$HOME/.volt/config-$NAME\"" | tee -a "$LOG"
echo "export PATH=$GOPATH/bin:\$PATH" | tee -a "$LOG"
echo " "
echo -en "$NORMAL"
echo "To configure your VOLTHA stack to use this infrastructure please export these variables:" | tee -a "$LOG"
echo -en "$BOLD"
echo " export INFRA_NS=$INFRA_NS"
echo " export WITH_ETCD=etcd.$INFRA_NS.svc:2379"
echo " export WITH_KAFKA=kafka.$INFRA_NS.svc:9092"
echo " export WITH_ONOS=onos-onos-classic-hs.$INFRA_NS.svc:6653"
echo -en "$NORMAL"
echo " "
echo "Or use:"
echo " "
echo -en "$BOLD"
echo " INFRA_NS=$INFRA_NS WITH_ETCD=etcd.$INFRA_NS.svc:2379 WITH_KAFKA=kafka.$INFRA_NS.svc:9092 WITH_ONOS=onos-onos-classic-hs.$INFRA_NS.svc:6653 ./voltha up"
echo -en "$NORMAL"
echo " "
echo "If you are deploying mutltiple VOLTHA stack, also remember to configure a different NAME, NAMESPACE and BBSIM_BASE_INDEX for each stack:"
echo -en "$BOLD"
echo " export NAME=voltha1"
echo " export VOLTHA_NS=voltha1"
echo " export ADAPTER_NS=voltha1"
echo " export BBSIM_NS=voltha1"
echo " export BBSIM_BASE_INDEX=1"
echo -en "$NORMAL"
echo " "
echo "Or use:"
echo " "
echo -en "$BOLD"
echo " INFRA_NS=$INFRA_NS WITH_ETCD=etcd.$INFRA_NS.svc:2379 WITH_KAFKA=kafka.$INFRA_NS.svc:9092 WITH_ONOS=onos-onos-classic-hs.$INFRA_NS.svc:6653" \
"NAME=voltha1 VOLTHA_NS=voltha1 ADAPTER_NS=voltha1 BBSIM_NS=voltha1 BBSIM_BASE_INDEX=1 ./voltha up"
echo -en "$NORMAL"
exit 0
fi
STIME="$(date +%s)"
EXPECT=2
# therecanbeonlyone set hardcoded to true for backward compatibility
INTERNAL_EXTRA_HELM_INSTALL_ARGS="--set therecanbeonlyone=true"
if [[ $(semver_greater "$VOLTHA_CHART_VERSION" "2.6.7") == "true" ]]; then
# this is past voltha-2.5, we support multiple stacks
# customize the topics in case of multiple stacks sharing the same kafka
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set defaults.topics.core_topic=$NAME-rwcore,defaults.kv_store_data_prefix=service/$NAME"
fi
case $WITH_ETCD in
no)
;;
yes|external)
_HOST="etcd.$INFRA_NS.svc"
_PORT=2379
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set services.etcd.service=$_HOST --set services.etcd.port=$_PORT"
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set services.etcd.address=$_HOST:$_PORT"
;;
*)
_HOST="$(echo "$WITH_ETCD" | cut -d: -f1)"
_PORT="$(echo "$WITH_ETCD" | cut -s -d: -f2)"
_PORT=${_PORT:-2379}
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set services.etcd.service=$_HOST --set services.etcd.port=$_PORT"
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set services.etcd.address=$_HOST:$_PORT"
;;
esac
case $WITH_KAFKA in
no)
;;
yes|external)
_HOST=kafka.$INFRA_NS.svc
_PORT=9092
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set kafka_broker=$_HOST:$_PORT --set services.kafka.adapter.service=$_HOST --set services.kafka.adapter.port=$_PORT --set services.kafka.cluster.service=$_HOST --set services.kafka.cluster.port=$_PORT"
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set services.kafka.adapter.address=$_HOST:$_PORT --set services.kafka.cluster.address=$_HOST:$_PORT"
;;
*)
_HOST="$(echo "$WITH_KAFKA" | cut -d: -f1)"
_PORT="$(echo "$WITH_KAFKA" | cut -s -d: -f2)"
_PORT=${_PORT:-9092}
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set kafka_broker=$_HOST:$_PORT --set services.kafka.adapter.service=$_HOST --set services.kafka.adapter.port=$_PORT --set services.kafka.cluster.service=$_HOST --set services.kafka.cluster.port=$_PORT"
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set services.kafka.adapter.address=$_HOST:$_PORT --set services.kafka.cluster.address=$_HOST:$_PORT"
;;
esac
if is_in "$WITH_ONOS" "legacy"; then
_HOST=onos-openflow.$INFRA_NS.svc
_PORT=6653
elif is_in "$WITH_ONOS" "yes,classic"; then
_HOST=onos-onos-classic-hs.$INFRA_NS.svc
_PORT=6653
elif [ "$WITH_ONOS" != "no" ]; then
_HOST="$(echo "$WITH_ONOS" | cut -d: -f1)"
_PORT="$(echo "$WITH_ONOS" | cut -s -d: -f2)"
_PORT=${_PORT:-6653}
fi
compare_versions "$RESOLVED_VOLTHA_CHART_VERSION" 2.4.2
if [ "$?" == "1" ]; then
# voltha chart > 2.4.2 uses list of controllers
for NUM in $(seq 0 $((NUM_OF_ONOS-1))); do
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set services.controller[$NUM].service=onos-onos-classic-$NUM.onos-onos-classic-hs.$INFRA_NS.svc --set services.controller[$NUM].port=6653"
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set services.controller[$NUM].address=onos-onos-classic-$NUM.onos-onos-classic-hs.$INFRA_NS.svc:6653"
done
else
# voltha chart <= 2.4.2 uses single controller
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set services.controller.service=$_HOST --set services.controller.port=$_PORT"
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set services.controller.address=$_HOST:$_PORT"
fi
if [ "$WITH_TRACING" == "yes" ]; then
VOLTHA_TRACING_CHART_NAME=$(resolve_chart_name "$VOLTHA_TRACING_CHART")
_HOST="tracing-${VOLTHA_TRACING_CHART_NAME}-jaeger-agent.$INFRA_NS.svc"
_PORT=6831
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set tracing.enabled=true --set services.tracing_agent.address=$_HOST:$_PORT"
fi
bspin "Verify VOLTHA installed $HIGH_VOLTAGE"
if [ "$HELM_USE_UPGRADE" == "yes" ] || [ "$(helm_is_deployed "$VOLTHA_NS" "^voltha\$")" -ne 1 ]; then
espin "$NOT_VERIFIED"
helm_install - "$VOLTHA_NS" voltha "$VOLTHA_CHART" "$VOLTHA_CHART_VERSION" "+voltha" "$_HELM_DESC VOLTHA Core"
else
espin "$VERIFIED"
fi
INTERNAL_EXTRA_HELM_INSTALL_ARGS=
VOLTHA="voltha-ofagent-.* \
rw-core.* \
voltha-zookeeper-.*"
wait_for_pods - "$VOLTHA_NS" "$EXPECT" "includes" "Waiting for VOLTHA Core to start" "$NO_LABEL" "$VOLTHA"
if [ "$WITH_TIMINGS" == "yes" ]; then
NOW="$(date +%s)"
printtime $((NOW - STIME))
fi
if [ "$WITH_ADAPTERS" == "yes" ]; then
STIME="$(date +%s)"
EXPECT=0
if [[ $(semver_greater "$VOLTHA_ADAPTER_OPEN_ONU_CHART_VERSION" "2.5.0") == "true" ]]; then
# this is past voltha-2.5, we support multiple stacks
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set defaults.topics.core_topic=$NAME-rwcore,defaults.topics.adapter_open_olt_topic=$NAME-openolt,defaults.topics.adapter_open_onu_topic=$NAME-brcm_openomci_onu,defaults.kv_store_data_prefix=service/$NAME"
fi
if is_in "$WITH_ETCD" "yes,external"; then
_HOST=etcd.$INFRA_NS.svc
_PORT=2379
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set services.etcd.service=$_HOST --set services.etcd.port=$_PORT"
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set services.etcd.address=$_HOST:$_PORT"
elif [ "$WITH_ETCD" != "no" ]; then
_HOST="$(echo "$WITH_ETCD" | cut -d: -f1)"
_PORT="$(echo "$WITH_ETCD" | cut -s -d: -f2)"
_PORT=${_PORT:-2379}
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set services.etcd.service=$_HOST --set services.etcd.port=$_PORT"
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set services.etcd.address=$_HOST:$_PORT"
fi
if is_in "$WITH_KAFKA" "yes,external"; then
_HOST=kafka.$INFRA_NS.svc
_PORT=9092
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set kafka_broker=$_HOST:$_PORT --set services.kafka.adapter.service=$_HOST --set services.kafka.adapter.port=$_PORT --set services.kafka.cluster.service=$_HOST --set services.kafka.cluster.port=$_PORT"
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set services.kafka.adapter.address=$_HOST:$_PORT --set services.kafka.cluster.address=$_HOST:$_PORT"
elif [ "$WITH_KAFKA" != "no" ]; then
_HOST="$(echo "$WITH_KAFKA" | cut -d: -f1)"
_PORT="$(echo "$WITH_KAFKA" | cut -s -d: -f2)"
_PORT=${_PORT:-9092}
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set kafka_broker=$_HOST:$_PORT --set services.kafka.adapter.service=$_HOST --set services.kafka.adapter.port=$_PORT --set services.kafka.cluster.service=$_HOST --set services.kafka.cluster.port=$_PORT"
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set services.kafka.adapter.address=$_HOST:$_PORT --set services.kafka.cluster.address=$_HOST:$_PORT"
fi
if [ "$WITH_TRACING" == "yes" ]; then
VOLTHA_TRACING_CHART_NAME=$(resolve_chart_name "$VOLTHA_TRACING_CHART")
_HOST="tracing-${VOLTHA_TRACING_CHART_NAME}-jaeger-agent.$INFRA_NS.svc"
_PORT=6831
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set tracing.enabled=true --set services.tracing_agent.address=$_HOST:$_PORT"
fi
echo -e "Verify Adapters $PLUG"
if [ "$WITH_SIM_ADAPTERS" == "yes" ]; then
bspin - "Verify Simulated Adapters installed"
if [ "$HELM_USE_UPGRADE" == "yes" ] || [ "$(helm_is_deployed "$ADAPTER_NS" "^sim\$")" -ne 1 ]; then
espin - "$NOT_VERIFIED"
helm_install - "$ADAPTER_NS" sim "$VOLTHA_ADAPTER_SIM_CHART" "$VOLTHA_ADAPTER_SIM_CHART_VERSION" - "$_HELM_DESC Simulated Adapters"
else
espin - "$VERIFIED"
fi
EXPECT="$((EXPECT + 2))"
fi
if [ "$WITH_OPEN_ADAPTERS" == "yes" ]; then
bspin - "Verify OpenOLT Adapter installed"
if [ "$HELM_USE_UPGRADE" == "yes" ] || [ "$(helm_is_deployed "$ADAPTER_NS" "^open-olt\$")" -ne 1 ]; then
espin - "$NOT_VERIFIED"
helm_install - "$ADAPTER_NS" open-olt "$VOLTHA_ADAPTER_OPEN_OLT_CHART" "$VOLTHA_ADAPTER_OPEN_OLT_CHART_VERSION" "+open-olt" "$_HELM_DESC OpenOLT Adapter"
else
espin - "$VERIFIED"
fi
EXPECT="$((EXPECT + 1))"
bspin - "Verify OpenONU Adapter installed"
if [ "$HELM_USE_UPGRADE" == "yes" ] || [ "$(helm_is_deployed "$ADAPTER_NS" "^open-onu\$")" -ne 1 ]; then
_ORIGINAL_EXTRA_HELM_INSTALL_ARGS="$INTERNAL_EXTRA_HELM_INSTALL_ARGS"
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set replicas.adapter_open_onu=$NUM_OF_OPENONU"
if [ "$WITH_INCREMENTAL_EVTO_UPDATE" == "yes" ]; then
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set defaults.incremental_evto_update=true"
fi
espin - "$NOT_VERIFIED"
helm_install - "$ADAPTER_NS" open-onu "$VOLTHA_ADAPTER_OPEN_ONU_CHART" "$VOLTHA_ADAPTER_OPEN_ONU_CHART_VERSION" "+open-onu" "$_HELM_DESC OpenONU Adapter"
INTERNAL_EXTRA_HELM_INSTALL_ARGS="$_ORIGINAL_EXTRA_HELM_INSTALL_ARGS"
else
espin - "$VERIFIED"
fi
EXPECT=$((EXPECT + NUM_OF_OPENONU))
fi
INTERNAL_EXTRA_HELM_INSTALL_ARGS=
ADAPTERS="adapter-.*"
wait_for_pods - "$ADAPTER_NS" "$EXPECT" "includes" "Waiting for adapters to start" "$NO_LABEL" "$ADAPTERS"
if [ "$WITH_TIMINGS" == "yes" ]; then
NOW="$(date +%s)"
printtime $((NOW - STIME))
fi
fi
if [ "$WITH_BBSIM" == "yes" ]; then
STIME="$(date +%s)"
echo -e "Verify BBSIM $PLUG"
bspin - "Verify BBSIM Installed"
for instance in $(seq 0 $((NUM_OF_BBSIM-1))); do
if [ "$LEGACY_BBSIM_INDEX" == "yes" ]; then
if [ "$instance" -eq 0 ]; then
instance_num=""
else
instance_num=$instance
fi
else
instance_num=$instance
fi
if [ "$HELM_USE_UPGRADE" == "yes" ] || [ "$(helm_is_deployed "$BBSIM_NS" "^bbsim${instance_num}\$")" -ne 1 ]; then
espin - "$NOT_VERIFIED"
if [[ $(semver_greater "$VOLTHA_BBSIM_CHART_VERSION" "4.0.3") == "true" ]]; then
# this is past voltha-2.5, we support multiple stacks
OLT_ID=$BBSIM_BASE_INDEX$instance
else
# this is voltha-2.5 or earlier
OLT_ID=$instance
fi
if [[ $(semver_greater "$VOLTHA_BBSIM_CHART_VERSION" "3.1.0") == "true" ]]; then
# this is the latest BBSim with support for TT
# when we deploy multiple BBSims we need to update the configuration
# to avoid overlapping tags.
_TAG="$((900+instance))"
BBSIM_TMP="$(mktemp -u)"
sed -e "s/\":TAG:\"/$_TAG/g" "$BBSIM_CFG" > "$BBSIM_TMP"
INTERNAL_EXTRA_HELM_INSTALL_ARGS="--set olt_id=$OLT_ID -f $BBSIM_TMP"
else
# these are older versio of BBSim
S_TAG="$((900+instance))"
INTERNAL_EXTRA_HELM_INSTALL_ARGS="--set olt_id=$OLT_ID,s_tag=$S_TAG"
if [ "$WITH_EAPOL" == "yes" ]; then
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set auth=true"
else
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set auth=false"
fi
if [ "$WITH_DHCP" == "yes" ]; then
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set dhcp=true"
else
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set dhcp=false"
fi
fi
if is_in "$WITH_KAFKA" "yes,external"; then
_HOST=kafka.$INFRA_NS.svc
_PORT=9092
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set kafkaAddress=$_HOST:$_PORT"
elif [ "$WITH_KAFKA" != "no" ]; then
_HOST="$(echo "$WITH_KAFKA" | cut -d: -f1)"
_PORT="$(echo "$WITH_KAFKA" | cut -s -d: -f2)"
_PORT=${_PORT:-9092}
INTERNAL_EXTRA_HELM_INSTALL_ARGS+=" --set kafkaAddress=$_HOST:$_PORT"
fi
helm_install - "$BBSIM_NS" "bbsim${instance_num}" "$VOLTHA_BBSIM_CHART" "$VOLTHA_BBSIM_CHART_VERSION" "+bbsim" "$_HELM_DESC BBSIM${instance_num}"
INTERNAL_EXTRA_HELM_INSTALL_ARGS=
rm -f "$BBSIM_TMP"
else
espin - "$VERIFIED"
fi
done
wait_for_pods - "$BBSIM_NS" "$NUM_OF_BBSIM" "includes" "Waiting for BBSIM to start" "$BBSIM_LABEL" "bbsim*"
if [ "$WITH_PORT_FORWARDS" == "yes" ]; then
for instance in $(seq 0 $((NUM_OF_BBSIM-1))); do
bspin - "Forward BBSIM DMI port $FORWARD"
kill_port_forward "$BBSIM_NS" "bbsim${instance}"
_PORT=$((50075+instance))
port_forward "$BBSIM_NS" "bbsim${instance}" "50075:$_PORT"
espin - "$VERIFIED"
done
fi
if [ "$WITH_TIMINGS" == "yes" ]; then
NOW="$(date +%s)"
printtime $((NOW - STIME))
fi
fi
if [ "$WITH_PORT_FORWARDS" == "yes" ]; then
STIME="$(date +%s)"
bspin - "Forward VOLTHA API port $FORWARD"
kill_port_forward "$VOLTHA_NS" voltha-api
kill_port_forward "$VOLTHA_NS" voltha-voltha-api
VOLTHA_PREFIX=
if kubectl get -n "$VOLTHA_NS" "svc/voltha-voltha-api" >/dev/null 2>&1; then
VOLTHA_PREFIX="voltha-"
fi
port_forward "$VOLTHA_NS" "${VOLTHA_PREFIX}voltha-api" "$VOLTHA_API_PORT:55555"
espin - "$VERIFIED"
if [ "$WITH_PPROF" == "yes" ]; then
VOLTHA_OPENOLT_PREFIX=
if kubectl get -n "$ADAPTER_NS" "svc/open-olt-adapter-open-olt-profiler" >/dev/null 2>&1; then
VOLTHA_OPENOLT_PREFIX="open-olt-"
fi
bspin - "Forward PProf ports port $FORWARD"
kill_port_forward "$VOLTHA_NS" voltha-rw-core-profiler
kill_port_forward "$VOLTHA_NS" voltha-voltha-rw-core-profiler
kill_port_forward "$VOLTHA_NS" adapter-open-olt-profiler
kill_port_forward "$VOLTHA_NS" open-olt-adapter-open-olt-profiler
kill_port_forward "$VOLTHA_NS" voltha-of-agent-profiler
kill_port_forward "$VOLTHA_NS" voltha-voltha-of-agent-profiler
port_forward "$VOLTHA_NS" "${VOLTHA_PREFIX}voltha-rw-core-profiler" "$VOLTHA_PPROF_PORT:6060"
port_forward "$ADAPTER_NS" "${VOLTHA_OPENOLT_PREFIX}adapter-open-olt-profiler" "$OPENOLT_PPROF_PORT:6060"
port_forward "$VOLTHA_NS" "${VOLTHA_PREFIX}voltha-of-agent-profiler" "$OFAGENT_PPROF_PORT:6060"
espin - "$VERIFIED"
fi
if [ "$WITH_TIMINGS" == "yes" ]; then
NOW="$(date +%s)"
printtime $((NOW - STIME))
fi
fi
if [ "$WITH_CHAOS" == "yes" ]; then
STIME="$(date +%s)"
echo -e "Verify kube-monkey $LOCK"
bspin - "Verify kube-monkey Installed"
if [ "$HELM_USE_UPGRADE" == "yes" ] || [ "$(helm_is_deployed kube-monkey "^monkey\$")" -ne 1 ]; then
espin - "$NOT_VERIFIED"
helm_install - kube-monkey monkey ./kube-monkey/helm/kubemonkey latest - "$_HELM_DESC Chaos Monkey"
else
espin - "$VERIFIED"
fi
wait_for_pods - "kube-monkey" 1 "includes" "Waiting for Chaos to start" "$NO_LABEL" "monkey-.*"
if [ "$WITH_TIMINGS" == "yes" ]; then
NOW="$(date +%s)"
printtime $((NOW - STIME))
fi
fi
bspin "Create voltctl configuration file"
(set -x; mkdir -p "$HOME/.volt" >>"$LOG" 2>&1) >>"$LOG" 2>&1
MIN_VC_VERSION="$(echo -e "1.0.15\n$VC_VERSION" | sort -V | head -1)"
if [ "$WITH_PORT_FORWARDS" == "yes" ]; then
KAFKA_FLAG="-k localhost:$VOLTHA_KAFKA_PORT"
SERVER_FLAG="-s localhost:$VOLTHA_API_PORT"
ETCD_FLAG=
if [ "$MIN_VC_VERSION" == "1.0.15" ]; then
ETCD_FLAG="-e localhost:$VOLTHA_ETCD_PORT"
fi
else
KAFKA_FLAG="-k $(get_service_ep "$INFRA_NS" kafka)"
SERVER_FLAG="-s $(get_service_ep "$VOLTHA_NS" voltha-api)"
ETCD_FLAG=
if [ "$MIN_VC_VERSION" == "1.0.15" ]; then
ETCD_FLAG="-e $(get_service_ep "$INFRA_NS" etcd)"
fi
fi
CMD=("voltctl $KAFKA_FLAG $SERVER_FLAG $ETCD_FLAG config")
(set -x; ${CMD[*]} > "$HOME/.volt/config-$NAME" 2>>"$LOG") >>"$LOG" 2>&1
#(set -x; voltctl "$KAFKA_FLAG" "$SERVER_FLAG" "$ETCD_FLAG" config > "$HOME/.volt/config-$NAME" 2>>"$LOG") >>"$LOG" 2>&1
espin "$VERIFIED"
if [ ! -f "$NAME-env.sh" ]; then
touch "$NAME-env.sh"
fi
for O in $ALL_OPTIONS; do
VAL="$(eval echo "\$$O")"
if [ -n "$VAL" ] && [ "$(grep -c "^export $O=" "$NAME-env.sh")" -eq 0 ]; then
echo "export $O=\"$(eval echo "\$$O")\"" >> "$NAME-env.sh"
fi
done
if [ "$DEPLOY_K8S" == "yes" ] && [ "$(grep -c "^export KUBECONFIG=" "$NAME-env.sh")" -eq 0 ]; then
echo "export KUBECONFIG=\"$HOME/.kube/kind-config-voltha-$NAME\"" >> "$NAME-env.sh"
fi
if [ "$(grep -c "^export VOLTCONFIG=" "$NAME-env.sh")" -eq 0 ]; then
echo "export VOLTCONFIG=\"$HOME/.volt/config-$NAME\"" >> "$NAME-env.sh"
fi
if [ "$(grep -c "^export PATH=" "$NAME-env.sh")" -eq 0 ]; then
echo "export PATH=\"$GOPATH/bin:\$PATH\"" >> "$NAME-env.sh"
fi
echo ""
echo "Please issue the following commands in your terminal to ensure that you" | tee -a "$LOG"
echo "are accessing the correct Kubernetes/Kind cluster as well as have the " | tee -a "$LOG"
echo "tools required by VOLTHA in your command path. " | tee -a "$LOG"
echo "" | tee -a "$LOG"
echo -en "$BOLD"
if [ "$DEPLOY_K8S" == "yes" ]; then
echo "export KUBECONFIG=\"$HOME/.kube/kind-config-voltha-$NAME\"" | tee -a "$LOG"
fi
echo "export VOLTCONFIG=\"$HOME/.volt/config-$NAME\"" | tee -a "$LOG"
echo "export PATH=$GOPATH/bin:\$PATH" | tee -a "$LOG"
echo -en "$NORMAL"
echo "" | tee -a "$LOG"
echo "Thank you for choosing kind-voltha for you quick cluster needs." | tee -a "$LOG"
if [ "$WITH_TIMINGS" == "yes" ]; then
NOW="$(date +%s)"
echo -e "$CLOCK ${BOLD}TOTAL: $(duration $((NOW - TOTAL_START_TIME)))${NORMAL}"
fi