VOL-1054 Kubernetes Conversion and PonsimV2date Makefile with selective changes from the one from SIAB to allow compatibility with Helm Chart changes
Change Class and filenames according to best practices
Change preprovisionTest -> Preprovision
Change-Id: Ib15e34ad397d2307b5ba3890cc6e8b245e5e3004
diff --git a/requirements.txt b/requirements.txt
index 4ed57f4..e81de3e 100755
--- a/requirements.txt
+++ b/requirements.txt
@@ -61,6 +61,7 @@
etcd3==0.7.0
pyparsing==2.2.0
packaging==17.1
+pexpect==4.6.0
# python-consul>=0.6.1 we need the pre-released version for now, because 0.6.1 does not
# yet support Twisted. Once this is released, it will be the 0.6.2 version
diff --git a/tests/atests/build/Makefile b/tests/atests/build/Makefile
new file mode 100644
index 0000000..75cf937
--- /dev/null
+++ b/tests/atests/build/Makefile
@@ -0,0 +1,180 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+SHELL := /bin/bash
+BUILD ?= /tmp
+M ?= $(BUILD)/milestones
+MYDIR := $(dir $(realpath $(firstword $(MAKEFILE_LIST))))
+
+HELM_VERSION ?= "2.10.0"
+HELM_SHA256SUM ?= "0fa2ed4983b1e4a3f90f776d08b88b0c73fd83f305b5b634175cb15e61342ffe"
+HELM_PLATFORM ?= "linux-amd64"
+
+KAFKA_CHART_VERSION ?= 0.8.8
+
+/all: $(M)/voltha_ponsim_running
+
+$(M)/setup:
+ echo "MYDIR = ${MYDIR}"
+ mkdir -p $(M)
+ sudo apt update
+ sudo apt install -y httpie jq software-properties-common
+ sudo swapoff -a
+ touch $@
+
+/usr/bin/docker: | $(M)/setup
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv 0EBFCD88
+ sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(shell lsb_release -cs) stable"
+ sudo apt update
+ sudo apt install -y "docker-ce=17.06*"
+
+/usr/bin/kubeadm: | $(M)/setup /usr/bin/docker
+ curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+ echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /tmp/kubernetes.list
+ sudo cp /tmp/kubernetes.list /etc/apt/sources.list.d/kubernetes.list
+ sudo apt update
+ sudo apt install -y "kubeadm=1.11.3-*" "kubelet=1.11.3-*" "kubectl=1.11.3-*"
+
+/usr/local/bin/helm:
+ curl -L -o /tmp/helm.tgz "https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-${HELM_PLATFORM}.tar.gz"
+ echo "${HELM_SHA256SUM} /tmp/helm.tgz" | sha256sum -c -
+ cd /tmp; tar -xzvf helm.tgz; sudo mv ${HELM_PLATFORM}/helm /usr/local/bin/helm
+ sudo chmod a+x /usr/local/bin/helm
+ rm -rf /tmp/helm.tgz /tmp/${HELM_PLATFORM}
+
+$(M)/kubeadm: | $(M)/setup /usr/bin/kubeadm
+ sudo kubeadm init --pod-network-cidr=192.168.0.0/16
+ mkdir -p $(HOME)/.kube
+ sudo cp -f /etc/kubernetes/admin.conf $(HOME)/.kube/config
+ sudo chown $(id -u):$(id -g) $(HOME)/.kube/config
+ kubectl apply -f https://docs.projectcalico.org/v2.6/getting-started/kubernetes/installation/hosted/kubeadm/1.6/calico.yaml
+ kubectl taint nodes --all node-role.kubernetes.io/master-
+ touch $@
+
+$(M)/helm-init: | $(M)/kubeadm /usr/local/bin/helm
+ kubectl create serviceaccount --namespace kube-system tiller
+ kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
+ helm init --service-account tiller
+ until helm ls >& /dev/null; \
+ do \
+ echo "Waiting for Helm to be ready"; \
+ sleep 5; \
+ done
+ helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/
+ touch $@
+
+$(HOME)/cord/helm-charts: | $(M)/setup
+ mkdir -p $(HOME)/cord
+ cd $(HOME)/cord; git clone https://gerrit.opencord.org/helm-charts
+
+$(M)/kafka: | $(HOME)/cord/helm-charts $(M)/helm-init
+ cd $(HOME)/cord/helm-charts && \
+ helm upgrade --install cord-kafka --version $(KAFKA_CHART_VERSION) -f examples/kafka-single.yaml incubator/kafka
+ touch $@
+
+$(M)/kafka-running: | $(M)/kafka
+ kubectl wait pod/cord-kafka-0 --for condition=Ready --timeout=180s
+ touch $@
+
+$(M)/onos: | $(M)/kafka-running
+ cd $(HOME)/cord/helm-charts; helm upgrade --install onos onos -f configs/onos.yaml -f configs/seba-ponsim.yaml --set onosImage=voltha/voltha-onos:latest
+ touch $@
+
+$(M)/voltha: | $(M)/kafka-running $(M)/onos
+ cd $(HOME)/cord/helm-charts; helm dep up voltha
+ cd $(HOME)/cord/helm-charts; helm upgrade --install voltha -f configs/seba-ponsim.yaml \
+ --set etcd-operator.customResources.createEtcdClusterCRD=false \
+ voltha
+ touch $@
+
+$(M)/etcd-operator-ready: | $(M)/voltha
+ until kubectl api-versions | grep etcd.database.coreos.com/v1beta2; \
+ do \
+ echo "Waiting for etcd.database.coreos.com/v1beta2 to be available"; \
+ sleep 5; \
+ done
+ until kubectl api-resources | grep EtcdCluster; \
+ do \
+ echo "Waiting for EtcdCluster API resource to be available"; \
+ sleep 5; \
+ done
+ touch $@
+
+$(M)/etcd-cluster: | $(M)/etcd-operator-ready
+ cd $(HOME)/cord/helm-charts; helm upgrade voltha -f configs/seba-ponsim.yaml \
+ --set etcd-operator.customResources.createEtcdClusterCRD=true \
+ voltha
+ touch $@
+
+$(M)/voltha-running: | $(M)/etcd-cluster
+ $(HOME)/cord/helm-charts/scripts/wait_for_pods.sh voltha
+ touch $@
+
+$(M)/ponsim: | $(M)/voltha-running
+ cd $(HOME)/cord/helm-charts; helm upgrade --install ponnet ponnet
+ $(HOME)/cord/helm-charts/scripts/wait_for_pods.sh kube-system
+ cd $(HOME)/cord/helm-charts; helm upgrade --install ponsimv2 ponsimv2 -f configs/seba-ponsim.yaml
+ touch $@
+
+$(M)/pon0_fwd: | $(M)/ponsim
+ echo 8 > /tmp/pon0_group_fwd_mask
+ until sudo cp /tmp/pon0_group_fwd_mask /sys/class/net/pon0/bridge/group_fwd_mask; \
+ do \
+ echo "waiting for pon0..."; \
+ sleep 5; \
+ done
+ rm /tmp/pon0_group_fwd_mask
+ touch $@
+
+$(M)/voltha_ponsim_running: | $(M)/pon0_fwd
+ $(HOME)/cord/helm-charts/scripts/wait_for_pods.sh
+ touch $@
+ echo "Voltha Test Framework Ready!"
+
+$(M)/authenticate: $(M)/voltha_ponsim_running
+ timeout 60s bash -c "until http -a admin@opencord.org:letmein GET http://127.0.0.1:30001/xosapi/v1/att-workflow-driver/attworkflowdriverserviceinstances |jq '.items[0].authentication_state'|grep AWAITING; do echo 'waiting for att-workflow-driver to be in AWAITING state'; sleep 5; done"
+ kubectl -n voltha exec $(shell kubectl -n voltha get pod|grep "^rg-"|cut -d' ' -f1) -- wpa_supplicant -i eth0 -Dwired -c /etc/wpa_supplicant/wpa_supplicant.conf -B
+ timeout 60s bash -c "until http -a admin@opencord.org:letmein GET http://127.0.0.1:30001/xosapi/v1/att-workflow-driver/attworkflowdriverserviceinstances |jq '.items[0].authentication_state'|grep APPROVED; do echo 'waiting for att-workflow-driver to be in APPROVED state'; sleep 5; done"
+ touch $@
+
+$(M)/dhclient: $(M)/authenticate
+ sudo iptables -P FORWARD ACCEPT
+ timeout 60s bash -c "until http -a admin@opencord.org:letmein GET http://127.0.0.1:30001/xosapi/v1/fabric-crossconnect/fabriccrossconnectserviceinstances |jq '.items[0].backend_status'|grep OK; do echo 'waiting for fabric-crossconnect to be synchronized';sleep 5; done"
+ kubectl -n voltha exec $(shell kubectl -n voltha get pod|grep "^rg-"|cut -d' ' -f1) -- dhclient
+ kubectl -n voltha exec $(shell kubectl -n voltha get pod|grep "^rg-"|cut -d' ' -f1) -- dhclient -r
+ kubectl -n voltha exec $(shell kubectl -n voltha get pod|grep "^rg-"|cut -d' ' -f1) -- dhclient
+ touch $@
+
+$(M)/pingtest: $(M)/dhclient
+ kubectl -n voltha exec $(shell kubectl -n voltha get pod|grep "^rg-"|cut -d' ' -f1) -- ping -c 3 172.18.0.10
+ touch $@
+
+run-tests: $(M)/pingtest
+
+remove-chart-milestones:
+ cd $(M); sudo rm -f setup kafka kafka-running onos voltha etcd-operator-ready etcd-cluster \
+ pon0_fwd voltha-running ponsim voltha_ponsim_running
+remove-kube-milestones:
+ cd $(M); sudo rm -f kubeadm helm-init
+
+remove-test-milestones:
+ cd $(M); sudo rm -f authenticate dhclient pingtest
+
+teardown-charts: remove-chart-milestones
+ helm delete --purge $(shell helm ls -q)
+
+reset-kubeadm: remove-chart-milestones remove-kube-milestones
+ sudo kubeadm reset -f
+ sudo iptables -F && sudo iptables -t nat -F && sudo iptables -t mangle -F && sudo iptables -X
+
diff --git a/tests/atests/build/devices_json b/tests/atests/build/devices_json
new file mode 100644
index 0000000..6fbc4d5
--- /dev/null
+++ b/tests/atests/build/devices_json
@@ -0,0 +1 @@
+{"of:0001000000000005":{"basic":{"driver":"voltha"},"accessDevice":{"uplink":"0","vlan":"1004"}},"of:0001000000000006":{"basic":{"driver":"voltha"},"accessDevice":{"uplink":"0","vlan":"1005"}},"of:0001000000000003":{"basic":{"driver":"voltha"},"accessDevice":{"uplink":"0","vlan":"1002"}},"of:0001000000000004":{"basic":{"driver":"voltha"},"accessDevice":{"uplink":"0","vlan":"1003"}},"of:0000aabbccddeeff":{"basic":{"driver":"voltha"},"accessDevice":{"uplink":"0","vlan":"2"}},"of:0001000000000002":{"basic":{"driver":"voltha"},"accessDevice":{"uplink":"0","vlan":"1001"}},"of:0001aabbccddeeff":{"basic":{"driver":"voltha"},"accessDevice":{"uplink":"0","vlan":"2"}}}
diff --git a/tests/atests/build/sadis_json b/tests/atests/build/sadis_json
new file mode 100644
index 0000000..43838a1
--- /dev/null
+++ b/tests/atests/build/sadis_json
@@ -0,0 +1,32 @@
+{
+ "org.opencord.sadis":{
+ "sadis":{
+ "integration":{
+ "cache":{
+ "enabled":true,
+ "maxsize":50,
+ "ttl":"PT1m"
+ }
+ },
+ "entries":[
+ {
+ "id":"00:26:F2:F3:17:B0",
+ "cTag":33,
+ "sTag":44,
+ "nasPortId":"uni-128"
+ },
+ {
+ "id":"1d1d1d1d1d1d11",
+ "hardwareIdentifier":"00:1b:22:00:b1:78",
+ "ipAddress":"192.168.1.252",
+ "nasId":"B100-NASID"
+ },
+ {
+ "id":"olt.voltha.svc:50060",
+ "uplinkPort":2
+ }
+ ]
+ }
+ }
+}
+
diff --git a/tests/atests/common/auto_test.py b/tests/atests/common/auto_test.py
index d0bd327..8f1997e 100755
--- a/tests/atests/common/auto_test.py
+++ b/tests/atests/common/auto_test.py
@@ -23,15 +23,10 @@
import time
import argparse
import volthaMngr
-import preprovisioningTest
+import preprovisioning
DEFAULT_LOG_DIR = '/tmp/voltha_test_results'
-def runOnos():
- os.system('docker-compose -f compose/docker-compose-auth-test.yml'
- ' up -d onos freeradius' + ' > /dev/null 2>&1')
-
-
def dirInit(logDir=DEFAULT_LOG_DIR,
volthaDir=os.environ['VOLTHA_BASE']):
print(__file__)
@@ -47,9 +42,9 @@
# In future in order to keep the history of jobs, the run time should be
# added to the log directory name
# logDir += '_' + currentTime
-
+
os.system('mkdir -p ' + logDir + ' > /dev/null 2>&1')
- os.system('rm -rf %s/*' + logDir)
+ os.system('rm -rf %s/*' % logDir)
print('Start Provisioning Test at: %s\nRoot Directory: %s\n'
'VOLTHA Directory: %s\nLog Directory: %s' %
(currentTime, rootDir, volthaDir, logDir))
@@ -74,8 +69,6 @@
volthaMngr.voltha_Initialize(ROOT_DIR, VOLTHA_DIR, LOG_DIR)
- runOnos()
-
- preprovisioningTest.runTest('172.17.0.1', 50060, LOG_DIR)
+ preprovisioning.runTest('olt.voltha.svc', 50060, LOG_DIR)
time.sleep(5)
diff --git a/tests/atests/common/build.sh b/tests/atests/common/build.sh
new file mode 100755
index 0000000..203f59b
--- /dev/null
+++ b/tests/atests/common/build.sh
@@ -0,0 +1,45 @@
+#!/bin/bash +x
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+SRC_DIR="$( cd -P "$( dirname "${BASH_SOURCE[0]}" )" && pwd)"
+BUILD_DIR="$SRC_DIR/../build"
+
+cd $BUILD_DIR
+
+if [ $# -ne 1 ]
+ then
+ echo "No arguments supplied"
+ exit 1
+fi
+if [ -z "$1" ]
+ then
+ echo "Empty argument supplied"
+ exit 1
+fi
+if [ $1 == "clear" ]
+ then
+ sudo make reset-kubeadm
+elif [ $1 == "start" ]
+ then
+ sudo make -f Makefile
+elif [ $1 == "stop" ]
+ then
+ pods=$( /usr/bin/kubectl get pods --all-namespaces 2>&1 | grep -c -e refused -e resource )
+ if [ $pods -eq 0 ]
+ then
+ sudo make teardown-charts
+ fi
+fi
+exit 0
diff --git a/tests/atests/common/enable_bridge.sh b/tests/atests/common/enable_bridge.sh
deleted file mode 100755
index 9d17ae5..0000000
--- a/tests/atests/common/enable_bridge.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/sh +x
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-echo 'Enable Bridge'
-echo 8 > /sys/class/net/ponmgmt/bridge/group_fwd_mask
diff --git a/tests/atests/common/preprovisioning.py b/tests/atests/common/preprovisioning.py
new file mode 100755
index 0000000..9b19408
--- /dev/null
+++ b/tests/atests/common/preprovisioning.py
@@ -0,0 +1,89 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+vOLT-HA Pre-provisioning Test module
+"""
+
+import time
+import os
+import commands
+import testCaseUtils
+
+class Preprovisioning(object):
+
+ """
+ This class implements voltha pre-provisioning test
+ """
+
+ def __init__(self):
+ self.dirs = {}
+ self.dirs ['log'] = None
+ self.dirs ['root'] = None
+ self.dirs ['voltha'] = None
+
+ self.__oltIpAddress = None
+ self.__oltPort = None
+ self.__statusLine = ""
+ self.__fields = []
+
+ def pSetLogDirs(self, logDir):
+ testCaseUtils.configDirs(self, logDir)
+
+ def configure(self, oltIpAddress, oltPort):
+ self.__oltIpAddress = oltIpAddress
+ self.__oltPort = oltPort
+
+ def preprovisionOlt(self):
+ print('Do PROVISIONING')
+ testCaseUtils.send_command_to_voltha_cli(testCaseUtils.getDir(self, 'log'),
+ 'preprovision_olt -t ponsim_olt -H %s:%s' %
+ (self.__oltIpAddress, self.__oltPort),
+ 'voltha_preprovision_olt.log')
+ time.sleep(5)
+
+ def query_devices_before_enable(self):
+ testCaseUtils.send_command_to_voltha_cli(testCaseUtils.getDir(self, 'log'), 'devices',
+ 'voltha_devices_before_enable.log')
+ time.sleep(5)
+ grepCommand =\
+ "grep PREPROVISIONED %s/voltha_devices_before_enable.log " % testCaseUtils.getDir(self, 'log')
+ self.__statusLine = commands.getstatusoutput(grepCommand)[1]
+ self.__fields = testCaseUtils.parseFields(self.__statusLine)
+ self.__oltDeviceId = self.__fields[1].strip()
+ print ("OLT device id = %s" % self.__oltDeviceId)
+
+ def enable(self):
+ print('Enable %s OLT device' % self.__oltDeviceId)
+ testCaseUtils.send_command_to_voltha_cli(testCaseUtils.getDir(self, 'log'), 'enable ' + self.__oltDeviceId,
+ 'voltha_enable.log')
+
+ def query_devices_after_enable(self):
+ testCaseUtils.send_command_to_voltha_cli(testCaseUtils.getDir(self, 'log'), 'devices',
+ 'voltha_devices_after_enable.log')
+
+def runTest(oltIpAddress, oltPort, logDir):
+ preprovisioning = Preprovisioning()
+ preprovisioning.pSetLogDirs(logDir)
+ preprovisioning.configure(str(oltIpAddress), int(oltPort))
+ preprovisioning.preprovisionOlt()
+ preprovisioning.query_devices_before_enable()
+ preprovisioning.enable()
+ preprovisioning.query_devices_after_enable()
+
+
+
+
diff --git a/tests/atests/common/preprovisioningTest.py b/tests/atests/common/preprovisioningTest.py
deleted file mode 100755
index a8bbe3e..0000000
--- a/tests/atests/common/preprovisioningTest.py
+++ /dev/null
@@ -1,105 +0,0 @@
-#
-# Copyright 2018 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-vOLT-HA Pre-provisioning Test module
-"""
-
-import time
-import os
-import commands
-
-
-class preprovisioningTest(object):
-
- """
- This class implements voltha pre-provisioning test
- """
-
- def __init__(self):
- self.__oltIpAddress = None
- self.__oltPort = None
- self.__logDir = None
- self.__oltDeviceId = None
- self.__statusLine = ""
- self.__fields = []
-
- def configure(self, oltIpAddress, oltPort, logDir):
- self.__oltIpAddress = oltIpAddress
- self.__oltPort = oltPort
- self.__logDir = logDir
-
-
- def preprovisionOlt(self):
- print('Do PROVISIONING')
- self.send_command_to_voltha_cli(
- 'preprovision_olt -t ponsim_olt -H %s:%s' %
- (self.__oltIpAddress, self.__oltPort),
- 'voltha_preprovision_olt.log')
- time.sleep(5)
-
- def query_devices_before_enable(self):
- self.send_command_to_voltha_cli('devices',
- 'voltha_devices_before_enable.log')
- time.sleep(5)
- grepCommand =\
- "grep PREPROVISIONED %s/voltha_devices_before_enable.log " % self.__logDir
- self.__statusLine = commands.getstatusoutput(grepCommand)[1]
- self.__fields = self.parseFields(self.__statusLine)
- self.__oltDeviceId = self.__fields[1].strip()
- print ("OLT device id = %s" % self.__oltDeviceId)
-
- def enable(self):
- print('Enable %s OLT device' % self.__oltDeviceId)
- self.send_command_to_voltha_cli('enable ' + self.__oltDeviceId,
- 'voltha_enable.log')
- def query_devices_after_enable(self):
- self.send_command_to_voltha_cli('devices',
- 'voltha_devices_after_enable.log')
-
- def send_command_to_voltha_cli(self, cmd, logFile):
- # os.system("docker exec -i -t compose_cli_1 sh -c 'echo \"" + cmd +
- # "\" > /voltha_tmp_command.txt'")
- os.system("docker exec compose_cli_1 sh -c 'echo \"" + cmd +
- "\" > /voltha_tmp_command.txt'")
- os.system("docker exec compose_cli_1 sh -c '/cli/cli/main.py -C "
- "vconsul:8500 -L < /voltha_tmp_command.txt' > " +
- self.__logDir + '/' + logFile)
-
- def send_command_to_onos_cli(self, cmd, logFile):
- os.system(
- "sshpass -p karaf ssh -o StrictHostKeyChecking=no -p 8101 "
- "karaf@localhost " + cmd + " 2>/dev/null > " +
- self.__logDir + '/' + logFile)
-
- def parseFields(self, statusLine):
- statusList = statusLine.split("|")
- return statusList
-
-
-
-def runTest(oltIpAddress, oltPort, logDir):
- preprovisioning = preprovisioningTest()
- preprovisioning.configure(str(oltIpAddress), int(oltPort),
- str(logDir))
- preprovisioning.preprovisionOlt()
- preprovisioning.query_devices_before_enable()
- preprovisioning.enable()
- preprovisioning.query_devices_after_enable()
-
-
-
-
diff --git a/tests/atests/common/run_robot.sh b/tests/atests/common/run_robot.sh
index fa60b99..b86a267 100755
--- a/tests/atests/common/run_robot.sh
+++ b/tests/atests/common/run_robot.sh
@@ -19,4 +19,4 @@
echo "Run Robot Framework TEST. Log: $1"
cd $VOLTHA_DIR
source env.sh
-robot -d $1 -v LOG_DIR:$1/voltha ./tests/atests/robot/auto_testing.robot
+robot -d $1 -v LOG_DIR:$1/voltha_test_results ./tests/atests/robot/voltha_automated_test_suite.robot
diff --git a/tests/atests/common/testCaseUtils.py b/tests/atests/common/testCaseUtils.py
new file mode 100755
index 0000000..541c9c0
--- /dev/null
+++ b/tests/atests/common/testCaseUtils.py
@@ -0,0 +1,116 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+vOLT-HA Test Case Utils module
+"""
+import time
+import os
+import commands
+import subprocess
+import pexpect
+
+def configDirs(self, logDir, rootDir = None, volthaDir = None):
+ self.dirs ['log'] = logDir
+ self.dirs ['root'] = rootDir
+ self.dirs ['voltha'] = volthaDir
+
+def getDir(self, Dir):
+ return self.dirs.get(Dir)
+
+def removeLeadingLine(logDir, logFile):
+ with open(logDir + '/' + logFile, 'r+') as file:
+ lines = file.readlines()
+ file.seek(0)
+ lines = lines[1:]
+ for line in lines:
+ file.write(line)
+ file.truncate()
+ file.close()
+
+def send_command_to_voltha_cli(logDir, cmd, logFile):
+ vcliIp = extractIpAddr('vcli')
+ print (vcliIp)
+ print (cmd)
+ output = open(logDir + '/' + logFile, 'w')
+ child = pexpect.spawn('ssh -p 5022 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no voltha@' + vcliIp)
+ child.expect('[pP]assword:')
+ child.sendline('admin')
+ child.expect('\((\\x1b\[\d*;?\d+m){1,2}voltha(\\x1b\[\d*;?\d+m){1,2}\)')
+ time.sleep(5)
+ bytes = child.sendline(cmd)
+ child.expect('\((\\x1b\[\d*;?\d+m){1,2}voltha(\\x1b\[\d*;?\d+m){1,2}\)')
+ print (child.before)
+ output.write(child.before)
+ output.close()
+ removeLeadingLine(logDir, logFile)
+ child.close()
+
+def send_command_to_onos_cli(logDir, cmd, logFile):
+ onosIp = extractIpAddr('onos-ssh')
+ print (onosIp)
+ output = open(logDir + '/' + logFile, 'w')
+ child = pexpect.spawn('ssh -p 8101 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no karaf@' + onosIp)
+
+ child.expect('[pP]assword:')
+ child.sendline('karaf')
+ child.expect('(\\x1b\[\d*;?\d+m){1,2}onos>(\\x1b\[\d*;?\d+m){1,2}')
+ child.sendline('flows')
+ child.expect('flows')
+ child.expect('(\\x1b\[\d*;?\d+m){1,2}onos>(\\x1b\[\d*;?\d+m){1,2}')
+
+ output.write(child.before)
+
+ output.close()
+ child.close()
+
+def parseFields(statusLine):
+ statusList = statusLine.split("|")
+ return statusList
+
+def extractIpAddr(podName):
+ proc1 = subprocess.Popen(['/usr/bin/kubectl', 'get', 'svc', '--all-namespaces'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc2 = subprocess.Popen(['grep', '-e', podName], stdin=proc1.stdout,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc3 = subprocess.Popen(['awk', "{print $4}"], stdin=proc2.stdout,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ proc1.stdout.close
+ proc2.stdout.close
+ out, err = proc3.communicate()
+ return out
+
+def extractPodName(shortPodName):
+ proc1 = subprocess.Popen(['/usr/bin/kubectl', 'get', 'pods', '--all-namespaces'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc2 = subprocess.Popen(['grep', '-e', shortPodName], stdin=proc1.stdout,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc3 = subprocess.Popen(['awk', "{print $2}"], stdin=proc2.stdout,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+
+ proc1.stdout.close
+ proc2.stdout.close
+ out, err = proc3.communicate()
+ return out
+
diff --git a/tests/atests/common/volthaMngr.py b/tests/atests/common/volthaMngr.py
index 553f7c0..5257dfa 100755
--- a/tests/atests/common/volthaMngr.py
+++ b/tests/atests/common/volthaMngr.py
@@ -22,111 +22,103 @@
import os
import time
import subprocess
-import paramiko
-import spur
+import testCaseUtils
+import urlparse
-class volthaMngr(object):
+class VolthaMngr(object):
"""
This class implements voltha startup/shutdown callable helper functions
"""
def __init__(self):
- self.__rootDir = None
- self.__volthaDir = None
- self.__logDir = None
- self.__rootSsh = None
-
- def configDir(self, rootDir, volthaDir, logDir):
- self.__rootDir = rootDir
- self.__volthaDir = volthaDir
- self.__logDir = logDir
+ self.dirs = {}
+ self.dirs ['root'] = None
+ self.dirs ['voltha'] = None
+ self.dirs ['log'] = None
- os.chdir(volthaDir)
-
- def openRootSsh(self):
- shell = spur.SshShell(hostname='localhost', username='root',
- password='root',
- missing_host_key=spur.ssh.MissingHostKey.accept)
- return shell
-
- def getAllRunningContainers(self):
- allContainers = []
- proc1 = subprocess.Popen(['docker', 'ps', '-a'],
+ def vSetLogDirs(self, rootDir, volthaDir, logDir):
+ testCaseUtils.configDirs(self, logDir, rootDir, volthaDir)
+
+ def startAllPods(self):
+ proc1 = subprocess.Popen([testCaseUtils.getDir(self, 'root') + '/build.sh', 'start'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
- proc2 = subprocess.Popen(['grep', '-v', 'CONT'], stdin=proc1.stdout,
+ output = proc1.communicate()[0]
+ print(output)
+ proc1.stdout.close
+
+ def stopAllPods(self):
+ proc1 = subprocess.Popen([testCaseUtils.getDir(self, 'root') + '/build.sh', 'stop'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ output = proc1.communicate()[0]
+ print(output)
+ proc1.stdout.close
+
+ def resetKubeAdm(self):
+ proc1 = subprocess.Popen([testCaseUtils.getDir(self, 'root') + '/build.sh', 'clear'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ output = proc1.communicate()[0]
+ print(output)
+ proc1.stdout.close
+
+ """
+ Because we are not deploying SEBA with XOS and NEM, and that a standalone Voltha
+ deployment is not common, in order to get flows to work, we need to alter Onos
+ NetCfg in two fashion.
+ One is to add to device list and the other is to add the missing Sadis section
+ """
+ def alterOnosNetCfg(self):
+ print ('Altering the Onos NetCfg to suit Voltha\'s needs')
+ time.sleep(30)
+ onosIp = testCaseUtils.extractIpAddr("onos-ui")
+ netloc = onosIp.rstrip() + ":8181"
+ devUrl = urlparse.urlunparse(('http', netloc, '/onos/v1/network/configuration/devices/', '', '', ''))
+ sadisUrl = urlparse.urlunparse(('http', netloc, '/onos/v1/network/configuration/apps/', '', '', ''))
+ os.system('curl --user karaf:karaf -X POST -H "Content-Type: application/json" '
+ '%s -d @%s/tests/atests/build/devices_json' % (devUrl, testCaseUtils.getDir(self, 'voltha')))
+ os.system('curl --user karaf:karaf -X POST -H "Content-Type: application/json" '
+ '%s -d @%s/tests/atests/build/sadis_json' % (sadisUrl, testCaseUtils.getDir(self, 'voltha')))
+
+ def getAllRunningPods(self):
+ allRunningPods = []
+ proc1 = subprocess.Popen(['/usr/bin/kubectl', 'get', 'pods', '--all-namespaces'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc2 = subprocess.Popen(['grep', '-v', 'NAMESPACE'], stdin=proc1.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc1.stdout.close
out, err = proc2.communicate()
+ print (out)
if out:
for line in out.split('\n'):
items = line.split()
- if len(items):
- allContainers.append(items)
- return allContainers
+ nsName = {}
+ if len(items) > 2:
+ nsName = {}
+ nsName['NS'] = items[0]
+ nsName['Name'] = items[1]
+ allRunningPods.append(nsName)
+ return allRunningPods
+
+ def collectPodLogs(self):
+ print('Collect logs from all Pods')
+ allRunningPods = self.getAllRunningPods()
+ for nsName in allRunningPods:
+ Namespace = nsName.get('NS')
+ podName = nsName.get('Name')
+ os.system('/usr/bin/kubectl logs -n %s -f %s > %s/%s.log 2>&1 &' %
+ (Namespace, podName, testCaseUtils.getDir(self, 'log'), podName))
- def stopPonsim(self):
- command = "for pid in $(ps -ef | grep ponsim | grep -v grep | " \
- "awk '{print $2}'); do echo $pid; done"
- client = paramiko.SSHClient()
- client.load_system_host_keys()
- client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- client.connect('localhost', username='root', password='root')
- transport = client.get_transport()
- channel = transport.open_session()
-
- channel.exec_command(command)
- procIds = channel.recv(4096).replace('\n', ' ')
- channel = transport.open_session()
- channel.exec_command('sudo kill -9 %s' % procIds)
-
- def removeExistingContainers(self):
- allContainers = self.getAllRunningContainers()
- for container in allContainers:
- procID = container[0]
- os.system('docker rm -f %s > /dev/null 2>&1' % procID)
-
- def startVolthaContainers(self):
- print('Start VOLTHA containers')
- # Bring up all the containers required for VOLTHA (total 15)
- os.system(
- 'docker-compose -f compose/docker-compose-system-test.yml '
- 'up -d > %s/start_voltha_containers.log 2>&1' %
- self.__logDir)
-
- def collectAllLogs(self):
- print('Collect all VOLTHA container logs')
- allContainers = self.getAllRunningContainers()
- for container in allContainers:
- containerName = container[-1]
- os.system('docker logs --since 0m -f %s > %s/%s.log 2>&1 &' %
- (containerName, self.__logDir, containerName))
-
- def enableBridge(self):
- self.__rootSsh = self.openRootSsh()
- result = self.__rootSsh.run([self.__rootDir + '/enable_bridge.sh'])
- print(result.output)
-
- def startPonsim(self, onusAmount=1):
- command = 'source env.sh ; ./ponsim/main.py -v'
- if onusAmount > 1:
- command += ' -o %s' % onusAmount
- ponsimLog = open('%s/ponsim.log' % self.__logDir, 'w')
- process = self.__rootSsh.spawn(['bash', '-c', command],
- cwd=self.__volthaDir, store_pid=True,
- stdout=ponsimLog)
- return process.pid
-
-
+
def voltha_Initialize(rootDir, volthaDir, logDir):
+ voltha = VolthaMngr()
+ voltha.vSetLogDirs(rootDir, volthaDir, logDir)
+ voltha.stopAllPods()
+ voltha.resetKubeAdm()
+ voltha.startAllPods()
+ voltha.alterOnosNetCfg()
+ voltha.collectPodLogs()
- voltha = volthaMngr()
- voltha.configDir(rootDir, volthaDir, logDir)
- voltha.stopPonsim()
- voltha.removeExistingContainers()
- voltha.startVolthaContainers()
- voltha.collectAllLogs()
- voltha.enableBridge()
- voltha.startPonsim(3)
-
diff --git a/tests/atests/robot/voltha_automated_test_suite.robot b/tests/atests/robot/voltha_automated_test_suite.robot
index 9c9f37b..ca0da45 100755
--- a/tests/atests/robot/voltha_automated_test_suite.robot
+++ b/tests/atests/robot/voltha_automated_test_suite.robot
@@ -16,7 +16,9 @@
Library Process
Library ../common/auto_test.py
Library ../common/volthaMngr.py
-Library ../common/preprovisioningTest.py
+Library ../common/preprovisioning.py
+Library volthaMngr.VolthaMngr
+LIbrary preprovisioning.Preprovisioning
Test Setup Start Voltha
Test Teardown Stop Voltha
@@ -27,18 +29,20 @@
${ROOT_DIR} ${EMPTY}
${VOLTHA_DIR} ${EMPTY}
${PONSIM_PID} ${EMPTY}
-${ONUS} 3
${ONOS_SSH_PORT} 8101
-${OLT_IP_ADDR} "172.17.0.1"
+${OLT_IP_ADDR} olt.voltha.svc
${OLT_PORT_ID} 50060
*** Test Cases ***
Provisioning
- [Documentation] VOLTHA Pre-provisioning Test
- ... This test deploys an OLT port and a number of ONU ports
- ... Then it verifies that all the physical and logical devices are up
- Configure ${OLT_IP_ADDR} ${OLT_PORT_ID} ${LOG_DIR}
+ [Documentation] VOLTHA Pre-provisioning
+ ... This test preprovisions a ponsim-OLT with given IP address and TCP port
+ ... and then enables both it and a number of ponsim-ONUs with predefined IP/port
+ ... information. It then verifies that all the physical and logical devices are ACTIVE
+ ... and REACHEABLE
+ PSet Log Dirs ${LOG_DIR}
+ Configure ${OLT_IP_ADDR} ${OLT_PORT_ID}
Preprovision Olt
Query Devices Before Enable
Enable
@@ -48,23 +52,21 @@
*** Keywords ***
Start Voltha
[Documentation] Start Voltha infrastructure to run test(s). This includes starting all
- ... Docker containers for Voltha and Onos as well as Ponsim. It then start
- ... Voltha and Onos Cli
- ${ROOT_DIR} ${VOLTHA_DIR} ${LOG_DIR} Dir Init ${LOG_DIR}
- Config Dir ${ROOT_DIR} ${VOLTHA_DIR} ${LOG_DIR}
+ ... Kubernetes Pods and start collection of logs. PonsimV2 has now been
+ ... containerized and does not need to be managed separately
+ ${ROOT_DIR} ${VOLTHA_DIR} ${LOG_DIR} Dir Init ${LOG_DIR}
+ VSet Log Dirs ${ROOT_DIR} ${VOLTHA_DIR} ${LOG_DIR}
Stop Voltha
- Start Voltha Containers
- Collect All Logs
- Enable Bridge
- ${PONSIM_PID} Start Ponsim ${ONUS}
- Run Onos
+ Start All Pods
+ Collect Pod Logs
+ Alter Onos NetCfg
Stop Voltha
- [Documentation] Stop Voltha infrastucture. This includes stopping all Docker containers
- ... for Voltha and Onos as well stopping Ponsim process.
- Stop Ponsim
- Remove Existing Containers
+ [Documentation] Stop Voltha infrastucture. This includes clearing all installation milestones
+ ... files and stopping all Kubernetes pods
+ Stop All Pods
+ Reset Kube Adm