VOL-572: Integration testing with Kubernetes
* Updated the Kubernetes version installed in the single-node environment from 1.8.5 to 1.9.3.
* Replaced the Weave pod network plugin with Calico
* Updated test_device_state_changes to run in single-node Kubernetes
* Moved test_utils.py from voltha/tests/itests/docutests/ to voltha/tests/itests/.
Change-Id: I472cffec1e0c3a783edaecb375664cd1c6bb93b1
diff --git a/ansible/roles/k8s-master/tasks/main.yml b/ansible/roles/k8s-master/tasks/main.yml
index 6fe0456..c260791 100644
--- a/ansible/roles/k8s-master/tasks/main.yml
+++ b/ansible/roles/k8s-master/tasks/main.yml
@@ -27,14 +27,14 @@
name: "{{ item }}"
state: present
with_items:
- - kubeadm=1.8.5-00
- - kubectl=1.8.5-00
- - kubelet=1.8.5-00
+ - kubeadm=1.9.3-00
+ - kubectl=1.9.3-00
+ - kubelet=1.9.3-00
- kubernetes-cni=0.6.0-00
- name: Initialize node as Kubernetes master
become: yes
- command: "kubeadm init --apiserver-advertise-address=10.100.198.220"
+ command: "kubeadm init --pod-network-cidr=192.168.0.0/16 --apiserver-advertise-address=10.100.198.220"
- name: Create .kube directory under home
become: yes
@@ -71,4 +71,4 @@
- name: Install pod network
become: yes
- command: "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /cord/incubator/voltha/k8s/weave-net-1.8.yml"
\ No newline at end of file
+ command: "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /cord/incubator/voltha/k8s/calico-1.6.yml"
diff --git a/k8s/calico-1.6.yml b/k8s/calico-1.6.yml
new file mode 100644
index 0000000..e57071a
--- /dev/null
+++ b/k8s/calico-1.6.yml
@@ -0,0 +1,461 @@
+# Calico Version v2.6.8
+# https://docs.projectcalico.org/v2.6/releases#v2.6.8
+# This manifest includes the following component versions:
+# calico/node:v2.6.8
+# calico/cni:v1.11.4
+# calico/kube-controllers:v1.0.3
+
+# This ConfigMap is used to configure a self-hosted Calico installation.
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: calico-config
+ namespace: kube-system
+data:
+ # The location of your etcd cluster. This uses the Service clusterIP
+ # defined below.
+ etcd_endpoints: "http://10.96.232.136:6666"
+
+ # Configure the Calico backend to use.
+ calico_backend: "bird"
+
+ # The CNI network configuration to install on each node.
+ cni_network_config: |-
+ {
+ "name": "k8s-pod-network",
+ "cniVersion": "0.1.0",
+ "type": "calico",
+ "etcd_endpoints": "__ETCD_ENDPOINTS__",
+ "log_level": "info",
+ "mtu": 1500,
+ "ipam": {
+ "type": "calico-ipam"
+ },
+ "policy": {
+ "type": "k8s",
+ "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
+ "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
+ },
+ "kubernetes": {
+ "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
+ }
+ }
+
+---
+
+# This manifest installs the Calico etcd on the kubeadm master. This uses a DaemonSet
+# to force it to run on the master even when the master isn't schedulable, and uses
+# nodeSelector to ensure it only runs on the master.
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: calico-etcd
+ namespace: kube-system
+ labels:
+ k8s-app: calico-etcd
+spec:
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-etcd
+ annotations:
+ # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
+ # reserves resources for critical add-on pods so that they can be rescheduled after
+ # a failure. This annotation works in tandem with the toleration below.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ tolerations:
+ # this taint is set by all kubelets running `--cloud-provider=external`
+ # so we should tolerate it to schedule the calico pods
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ # Toleration allows the pod to run on master
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
+ # This, along with the annotation above marks this pod as a critical add-on.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ # Only run this pod on the master.
+ nodeSelector:
+ node-role.kubernetes.io/master: ""
+ hostNetwork: true
+ containers:
+ - name: calico-etcd
+ image: quay.io/coreos/etcd:v3.1.10
+ env:
+ - name: CALICO_ETCD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ command: ["/bin/sh","-c"]
+ args: ["/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$CALICO_ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"]
+ volumeMounts:
+ - name: var-etcd
+ mountPath: /var/etcd
+ volumes:
+ - name: var-etcd
+ hostPath:
+ path: /var/etcd
+
+---
+
+# This manifest installs the Service which gets traffic to the Calico
+# etcd.
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: calico-etcd
+ name: calico-etcd
+ namespace: kube-system
+spec:
+ # Select the calico-etcd pod running on the master.
+ selector:
+ k8s-app: calico-etcd
+ # This ClusterIP needs to be known in advance, since we cannot rely
+ # on DNS to get access to etcd.
+ clusterIP: 10.96.232.136
+ ports:
+ - port: 6666
+
+---
+
+# This manifest installs the calico/node container, as well
+# as the Calico CNI plugins and network config on
+# each master and worker node in a Kubernetes cluster.
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+ name: calico-node
+ namespace: kube-system
+ labels:
+ k8s-app: calico-node
+spec:
+ selector:
+ matchLabels:
+ k8s-app: calico-node
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-node
+ annotations:
+ # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
+ # reserves resources for critical add-on pods so that they can be rescheduled after
+ # a failure. This annotation works in tandem with the toleration below.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ hostNetwork: true
+ tolerations:
+ # This taint is set by all kubelets running `--cloud-provider=external`
+ # so we should tolerate it to schedule the calico pods
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ # Toleration allows the pod to run on master
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
+ # This, along with the annotation above marks this pod as a critical add-on.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ serviceAccountName: calico-cni-plugin
+ # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
+ # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
+ terminationGracePeriodSeconds: 0
+ containers:
+ # Runs calico/node container on each Kubernetes node. This
+ # container programs network policy and routes on each
+ # host.
+ - name: calico-node
+ image: quay.io/calico/node:v2.6.8
+ env:
+ # The location of the Calico etcd cluster.
+ - name: ETCD_ENDPOINTS
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_endpoints
+ # Enable BGP. Disable to enforce policy only.
+ - name: CALICO_NETWORKING_BACKEND
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: calico_backend
+ # Cluster type to identify the deployment type
+ - name: CLUSTER_TYPE
+ value: "kubeadm,bgp"
+ # Set noderef for node controller.
+ - name: CALICO_K8S_NODE_REF
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # Disable file logging so `kubectl logs` works.
+ - name: CALICO_DISABLE_FILE_LOGGING
+ value: "true"
+ # Set Felix endpoint to host default action to ACCEPT.
+ - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
+ value: "ACCEPT"
+ # Configure the IP Pool from which Pod IPs will be chosen.
+ - name: CALICO_IPV4POOL_CIDR
+ value: "192.168.0.0/16"
+ - name: CALICO_IPV4POOL_IPIP
+ value: "always"
+ # Disable IPv6 on Kubernetes.
+ - name: FELIX_IPV6SUPPORT
+ value: "false"
+ # Set MTU for tunnel device used if ipip is enabled
+ - name: FELIX_IPINIPMTU
+ value: "1440"
+ # Set Felix logging to "info"
+ - name: FELIX_LOGSEVERITYSCREEN
+ value: "info"
+ # Auto-detect the BGP IP address.
+ - name: IP
+ value: ""
+ - name: FELIX_HEALTHENABLED
+ value: "true"
+ securityContext:
+ privileged: true
+ resources:
+ requests:
+ cpu: 250m
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9099
+ periodSeconds: 10
+ initialDelaySeconds: 10
+ failureThreshold: 6
+ readinessProbe:
+ httpGet:
+ path: /readiness
+ port: 9099
+ periodSeconds: 10
+ volumeMounts:
+ - mountPath: /lib/modules
+ name: lib-modules
+ readOnly: true
+ - mountPath: /var/run/calico
+ name: var-run-calico
+ readOnly: false
+ # This container installs the Calico CNI binaries
+ # and CNI network config file on each node.
+ - name: install-cni
+ image: quay.io/calico/cni:v1.11.4
+ command: ["/install-cni.sh"]
+ env:
+ # The location of the Calico etcd cluster.
+ - name: ETCD_ENDPOINTS
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_endpoints
+ # The CNI network config to install on each node.
+ - name: CNI_NETWORK_CONFIG
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: cni_network_config
+ volumeMounts:
+ - mountPath: /host/opt/cni/bin
+ name: cni-bin-dir
+ - mountPath: /host/etc/cni/net.d
+ name: cni-net-dir
+ volumes:
+ # Used by calico/node.
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: var-run-calico
+ hostPath:
+ path: /var/run/calico
+ # Used to install CNI.
+ - name: cni-bin-dir
+ hostPath:
+ path: /opt/cni/bin
+ - name: cni-net-dir
+ hostPath:
+ path: /etc/cni/net.d
+
+---
+
+# This manifest deploys the Calico Kubernetes controllers.
+# See https://github.com/projectcalico/kube-controllers
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+spec:
+ # The controllers can only have a single active instance.
+ replicas: 1
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+ annotations:
+ # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
+ # reserves resources for critical add-on pods so that they can be rescheduled after
+ # a failure. This annotation works in tandem with the toleration below.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ # The controllers must run in the host network namespace so that
+ # it isn't governed by policy that would prevent it from working.
+ hostNetwork: true
+ tolerations:
+ # this taint is set by all kubelets running `--cloud-provider=external`
+ # so we should tolerate it to schedule the calico pods
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
+ # This, along with the annotation above marks this pod as a critical add-on.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ serviceAccountName: calico-kube-controllers
+ containers:
+ - name: calico-kube-controllers
+ image: quay.io/calico/kube-controllers:v1.0.3
+ env:
+ # The location of the Calico etcd cluster.
+ - name: ETCD_ENDPOINTS
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_endpoints
+ # The location of the Kubernetes API. Use the default Kubernetes
+ # service for API access.
+ - name: K8S_API
+ value: "https://kubernetes.default:443"
+ # Choose which controllers to run.
+ - name: ENABLED_CONTROLLERS
+ value: policy,profile,workloadendpoint,node
+ # Since we're running in the host namespace and might not have KubeDNS
+ # access, configure the container's /etc/hosts to resolve
+ # kubernetes.default to the correct service clusterIP.
+ - name: CONFIGURE_ETC_HOSTS
+ value: "true"
+
+---
+
+# This deployment turns off the old "policy-controller". It should remain at 0 replicas, and then
+# be removed entirely once the new kube-controllers deployment has been deployed above.
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: calico-policy-controller
+ namespace: kube-system
+ labels:
+ k8s-app: calico-policy-controller
+spec:
+ # Turn this deployment off in favor of the kube-controllers deployment above.
+ replicas: 0
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: calico-policy-controller
+ namespace: kube-system
+ labels:
+ k8s-app: calico-policy-controller
+ spec:
+ hostNetwork: true
+ serviceAccountName: calico-kube-controllers
+ containers:
+ - name: calico-policy-controller
+ image: quay.io/calico/kube-controllers:v1.0.3
+ env:
+ - name: ETCD_ENDPOINTS
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_endpoints
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: calico-cni-plugin
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-cni-plugin
+subjects:
+- kind: ServiceAccount
+ name: calico-cni-plugin
+ namespace: kube-system
+
+---
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: calico-cni-plugin
+rules:
+ - apiGroups: [""]
+ resources:
+ - pods
+ - nodes
+ verbs:
+ - get
+
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-cni-plugin
+ namespace: kube-system
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: calico-kube-controllers
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-kube-controllers
+subjects:
+- kind: ServiceAccount
+ name: calico-kube-controllers
+ namespace: kube-system
+
+---
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: calico-kube-controllers
+rules:
+ - apiGroups:
+ - ""
+ - extensions
+ resources:
+ - pods
+ - namespaces
+ - networkpolicies
+ - nodes
+ verbs:
+ - watch
+ - list
+
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
diff --git a/k8s/genie-cni-1.8.yml b/k8s/genie-cni-1.8.yml
new file mode 100644
index 0000000..4f97c31
--- /dev/null
+++ b/k8s/genie-cni-1.8.yml
@@ -0,0 +1,134 @@
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: genie
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - get
+ - patch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: genie
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: genie
+subjects:
+- kind: ServiceAccount
+ name: genie
+ namespace: kube-system
+- kind: Group
+ name: system:authenticated
+ apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: genie
+ namespace: kube-system
+---
+# This ConfigMap can be used to configure a self-hosted CNI-Genie installation.
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: genie-config
+ namespace: kube-system
+data:
+ # The CNI network configuration to install on each node.
+ cni_genie_network_config: |-
+ {
+ "name": "k8s-pod-network",
+ "type": "genie",
+ "log_level": "info",
+ "datastore_type": "kubernetes",
+ "hostname": "__KUBERNETES_NODE_NAME__",
+ "policy": {
+ "type": "k8s",
+ "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
+ },
+ "kubernetes": {
+ "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
+ "kubeconfig": "/etc/cni/net.d/genie-kubeconfig"
+ },
+ "romana_root": "http://__ROMANA_SERVICE_HOST__:__ROMANA_SERVICE_PORT__",
+ "segment_label_name": "romanaSegment"
+ }
+
+---
+# Install CNI-Genie plugin on each slave node.
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+ name: genie
+ namespace: kube-system
+ labels:
+ k8s-app: genie
+spec:
+ selector:
+ matchLabels:
+ k8s-app: genie
+ template:
+ metadata:
+ labels:
+ k8s-app: genie
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ scheduler.alpha.kubernetes.io/tolerations: |
+ [
+ {
+ "key": "dedicated",
+ "value": "master",
+ "effect": "NoSchedule"
+ },
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ }
+ ]
+ spec:
+ hostNetwork: true
+ hostPID: true
+ containers:
+ # Create a container with install.sh that
+ # Installs required 00-genie.conf and genie binary
+ # on slave node.
+ - name: install-cni
+ image: quay.io/cnigenie/v1.5:latest
+ command: ["/launch.sh"]
+ env:
+ - name: CNI_NETWORK_CONFIG
+ valueFrom:
+ configMapKeyRef:
+ name: genie-config
+ key: cni_genie_network_config
+ - name: KUBERNETES_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ volumeMounts:
+ - mountPath: /host/opt/cni/bin
+ name: cni-bin-dir
+ - mountPath: /host/etc/cni/net.d
+ name: cni-net-dir
+ volumes:
+ # Used by genie/node.
+ #- name: lib-modules
+ # hostPath:
+ # path: /lib/modules
+ #- name: var-run-genie
+ # hostPath:
+ # path: /var/run/genie
+ # Used to install CNI.
+ - name: cni-bin-dir
+ hostPath:
+ path: /opt/cni/bin
+ - name: cni-net-dir
+ hostPath:
+ path: /etc/cni/net.d
diff --git a/k8s/olt.yml b/k8s/olt.yml
index cd64d51..6b18657 100644
--- a/k8s/olt.yml
+++ b/k8s/olt.yml
@@ -45,7 +45,7 @@
- "-device_type"
- "OLT"
- "-onus"
- - "3"
+ - "4"
- "-internal_if"
- "eth0"
- "-external_if"
diff --git a/k8s/onu.yml b/k8s/onu.yml
index edea4d2..a243550 100644
--- a/k8s/onu.yml
+++ b/k8s/onu.yml
@@ -19,7 +19,7 @@
name: onu
namespace: voltha
spec:
- replicas: 3
+ replicas: 4
template:
metadata:
labels:
diff --git a/k8s/single-node/vcore_for_consul.yml b/k8s/single-node/vcore_for_consul.yml
index 6d5c672..19205a7 100644
--- a/k8s/single-node/vcore_for_consul.yml
+++ b/k8s/single-node/vcore_for_consul.yml
@@ -60,4 +60,4 @@
- "--interface=eth1"
- "--backend=consul"
- "--pon-subnet=10.38.0.0/12"
-# - "--ponsim-comm=grpc"
+ - "--ponsim-comm=grpc"
diff --git a/k8s/single-node/vcore_for_etcd.yml b/k8s/single-node/vcore_for_etcd.yml
index ff579dc..fb75ee7 100644
--- a/k8s/single-node/vcore_for_etcd.yml
+++ b/k8s/single-node/vcore_for_etcd.yml
@@ -52,7 +52,7 @@
- "--interface=eth1"
- "--backend=etcd"
- "--pon-subnet=10.38.0.0/12"
-# - "--ponsim-comm=grpc"
+ - "--ponsim-comm=grpc"
ports:
- containerPort: 8880
name: rest-port
diff --git a/tests/itests/README.md b/tests/itests/README.md
index 761c02b..47aae4a 100644
--- a/tests/itests/README.md
+++ b/tests/itests/README.md
@@ -101,7 +101,7 @@
* Ponsim_olt and Ponsim_onu adapters
* Ponsim
-First start the Voltha ensemble:
+To run the test in the docker-compose environment, first start the Voltha ensemble:
```
cd /cord/incubator/voltha
. ./env.sh
@@ -112,13 +112,26 @@
sudo -s
. ./env.sh
./ponsim/main.py -v -o 4
-```
+```
Run the test:
```
cd /cord/incubator/voltha
. ./env.sh
nosetests -s tests/itests/voltha/test_device_state_changes.py
```
+To set up the test in a single-node Kubernetes environment (see document voltha/BUILD.md):
+```
+. ./env.sh
+./tests/itests/env/voltha-k8s-start.sh
+```
+Refer to the Kubernetes section in document voltha/ponsim/v2/README.md to set up the node for PONSIM. To install the CNI plugin, you may enter:
+```
+kubectl apply -f k8s/genie-cni-1.8.yml
+```
+To run the test:
+```
+nosetests -s tests/itests/voltha/test_device_state_changes.py --tc-file=tests/itests/env/k8s-consul.ini
+```
* **Persistence**: This test goes through several voltha restarts along with variations
of configurations in between to ensure data integrity is preserved.
diff --git a/tests/itests/docutests/build_md_test.py b/tests/itests/docutests/build_md_test.py
index 6a00f71..6f6c018 100644
--- a/tests/itests/docutests/build_md_test.py
+++ b/tests/itests/docutests/build_md_test.py
@@ -29,7 +29,7 @@
this_dir = os.path.abspath(os.path.dirname(__file__))
-from test_utils import run_command_to_completion_with_raw_stdout, \
+from tests.itests.test_utils import run_command_to_completion_with_raw_stdout, \
is_open, \
is_valid_ip, \
run_long_running_command_with_timeout, \
diff --git a/tests/itests/ofagent/test_ofagent_multicontroller_failover.py b/tests/itests/ofagent/test_ofagent_multicontroller_failover.py
index b119e74..289a327 100644
--- a/tests/itests/ofagent/test_ofagent_multicontroller_failover.py
+++ b/tests/itests/ofagent/test_ofagent_multicontroller_failover.py
@@ -22,7 +22,7 @@
this_dir = os.path.abspath(os.path.dirname(__file__))
-from tests.itests.docutests.test_utils import run_command_to_completion_with_raw_stdout
+from tests.itests.test_utils import run_command_to_completion_with_raw_stdout
from voltha.protos.device_pb2 import Device
from google.protobuf.json_format import MessageToDict
from tests.itests.voltha.rest_base import RestBase
diff --git a/tests/itests/ofagent/test_ofagent_recovery.py b/tests/itests/ofagent/test_ofagent_recovery.py
index dba3563..bd44b6a 100644
--- a/tests/itests/ofagent/test_ofagent_recovery.py
+++ b/tests/itests/ofagent/test_ofagent_recovery.py
@@ -27,7 +27,7 @@
this_dir = os.path.abspath(os.path.dirname(__file__))
-from tests.itests.docutests.test_utils import run_command_to_completion_with_raw_stdout
+from tests.itests.test_utils import run_command_to_completion_with_raw_stdout
log = logging.getLogger(__name__)
diff --git a/tests/itests/docutests/test_utils.py b/tests/itests/test_utils.py
similarity index 100%
rename from tests/itests/docutests/test_utils.py
rename to tests/itests/test_utils.py
diff --git a/tests/itests/voltha/test_cold_activation_sequence.py b/tests/itests/voltha/test_cold_activation_sequence.py
index 1b06fda..9dd350b 100644
--- a/tests/itests/voltha/test_cold_activation_sequence.py
+++ b/tests/itests/voltha/test_cold_activation_sequence.py
@@ -9,7 +9,7 @@
from tests.itests.voltha.rest_base import RestBase
from common.utils.consulhelpers import get_endpoint_from_consul
from structlog import get_logger
-from tests.itests.docutests.test_utils import get_pod_ip
+from tests.itests.test_utils import get_pod_ip
from testconfig import config
LOCAL_CONSUL = "localhost:8500"
diff --git a/tests/itests/voltha/test_device_state_changes.py b/tests/itests/voltha/test_device_state_changes.py
index 93685ca..14b73d0 100644
--- a/tests/itests/voltha/test_device_state_changes.py
+++ b/tests/itests/voltha/test_device_state_changes.py
@@ -8,9 +8,15 @@
from voltha.protos import openflow_13_pb2 as ofp
from tests.itests.voltha.rest_base import RestBase
from common.utils.consulhelpers import get_endpoint_from_consul
+from testconfig import config
+from tests.itests.test_utils import get_pod_ip
LOCAL_CONSUL = "localhost:8500"
+orch_env = 'docker-compose'
+if 'test_parameters' in config and 'orch_env' in config['test_parameters']:
+ orch_env = config['test_parameters']['orch_env']
+print('orchestration-environment: {}'.format(orch_env))
class TestDeviceStateChangeSequence(RestBase):
"""
@@ -24,7 +30,15 @@
"""
# Retrieve details of the REST entry point
- rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'envoy-8443')
+ if orch_env == 'k8s-single-node':
+ rest_endpoint = get_pod_ip('voltha') + ':8443'
+ olt_host_and_port = get_pod_ip('olt') + ':50060'
+ elif orch_env == 'swarm-single-node':
+ rest_endpoint = 'localhost:8443'
+ olt_host_and_port = 'localhost:50060'
+ else:
+ rest_endpoint = get_endpoint_from_consul(LOCAL_CONSUL, 'voltha-envoy-8443')
+ olt_host_and_port = '172.17.0.1:50060'
# Construct the base_url
base_url = 'https://' + rest_endpoint
@@ -127,7 +141,7 @@
def add_olt_device(self):
device = Device(
type='ponsim_olt',
- host_and_port='172.17.0.1:50060'
+ host_and_port=self.olt_host_and_port
)
device = self.post('/api/v1/devices', MessageToDict(device),
expected_http_code=200)
@@ -150,15 +164,13 @@
self.wait_till(
'admin state moves to ACTIVATING or ACTIVE',
- lambda: self.get(path)['oper_status'] in ('ACTIVATING', 'ACTIVE'),
- timeout=0.5)
+ lambda: self.get(path)['oper_status'] in ('ACTIVATING', 'ACTIVE'))
# eventually, it shall move to active state and by then we shall have
# device details filled, connect_state set, and device ports created
self.wait_till(
'admin state ACTIVE',
- lambda: self.get(path)['oper_status'] == 'ACTIVE',
- timeout=0.5)
+ lambda: self.get(path)['oper_status'] == 'ACTIVE')
device = self.get(path)
self.assertEqual(device['connect_status'], 'REACHABLE')
@@ -203,8 +215,7 @@
# the olt device
self.wait_till(
'find four ONUs linked to the olt device',
- lambda: len(self.find_onus(olt_id)) >= 4,
- 2
+ lambda: len(self.find_onus(olt_id)) >= 4
)
# verify that they are properly set
onus = self.find_onus(olt_id)
@@ -280,11 +291,8 @@
self.assertGreaterEqual(len(flows), 4)
def verify_olt_eapol_flow(self, olt_id):
- # olt shall have two flow rules, one is the default and the
- # second is the result of eapol forwarding with rule:
- # if eth_type == 0x888e => push vlan(1000); out_port=nni_port
flows = self.get('/api/v1/devices/{}/flows'.format(olt_id))['items']
- self.assertEqual(len(flows), 2)
+ self.assertEqual(len(flows), 8)
flow = flows[1]
self.assertEqual(flow['table_id'], 0)
self.assertEqual(flow['priority'], 1000)
@@ -301,14 +309,12 @@
self.wait_till(
'operational state moves to UNKNOWN',
- lambda: self.get(path)['oper_status'] == 'UNKNOWN',
- timeout=0.5)
+ lambda: self.get(path)['oper_status'] == 'UNKNOWN')
# eventually, the connect_state should be UNREACHABLE
self.wait_till(
- 'connest status UNREACHABLE',
- lambda: self.get(path)['connect_status'] == 'UNREACHABLE',
- timeout=0.5)
+ 'connect status UNREACHABLE',
+ lambda: self.get(path)['connect_status'] == 'UNREACHABLE')
# Device's ports should be INACTIVE
ports = self.get(path + '/ports')['items']
diff --git a/tests/itests/voltha/test_persistence.py b/tests/itests/voltha/test_persistence.py
index 54f091d..09da460 100644
--- a/tests/itests/voltha/test_persistence.py
+++ b/tests/itests/voltha/test_persistence.py
@@ -7,7 +7,7 @@
import subprocess
import select
-from tests.itests.docutests.test_utils import \
+from tests.itests.test_utils import \
run_command_to_completion_with_raw_stdout, \
run_command_to_completion_with_stdout_in_list
from unittest import skip
diff --git a/tests/itests/voltha/test_self_signed_cert_auth_failure.py b/tests/itests/voltha/test_self_signed_cert_auth_failure.py
index c04fe00..b7e35eb 100644
--- a/tests/itests/voltha/test_self_signed_cert_auth_failure.py
+++ b/tests/itests/voltha/test_self_signed_cert_auth_failure.py
@@ -5,7 +5,7 @@
from tests.itests.voltha.rest_base import RestBase
from common.utils.consulhelpers import get_endpoint_from_consul
from common.utils.consulhelpers import verify_all_services_healthy
-from tests.itests.docutests.test_utils import \
+from tests.itests.test_utils import \
run_command_to_completion_with_raw_stdout, \
run_command_to_completion_with_stdout_in_list
diff --git a/tests/itests/voltha/test_voltha_alarm_events.py b/tests/itests/voltha/test_voltha_alarm_events.py
index 48f68e1..dd37a6e 100644
--- a/tests/itests/voltha/test_voltha_alarm_events.py
+++ b/tests/itests/voltha/test_voltha_alarm_events.py
@@ -1,6 +1,6 @@
from unittest import main
from common.utils.consulhelpers import get_endpoint_from_consul
-from tests.itests.docutests.test_utils import \
+from tests.itests.test_utils import \
run_long_running_command_with_timeout
from tests.itests.voltha.rest_base import RestBase
from google.protobuf.json_format import MessageToDict
diff --git a/tests/itests/voltha/test_voltha_alarm_filters.py b/tests/itests/voltha/test_voltha_alarm_filters.py
index 6624763..ccb2929 100644
--- a/tests/itests/voltha/test_voltha_alarm_filters.py
+++ b/tests/itests/voltha/test_voltha_alarm_filters.py
@@ -4,7 +4,7 @@
from google.protobuf.json_format import MessageToDict
from common.utils.consulhelpers import get_endpoint_from_consul
-from tests.itests.docutests.test_utils import \
+from tests.itests.test_utils import \
run_long_running_command_with_timeout
from tests.itests.voltha.rest_base import RestBase
from voltha.protos.device_pb2 import Device
diff --git a/tests/itests/voltha/test_voltha_rest_apis.py b/tests/itests/voltha/test_voltha_rest_apis.py
index d5fc1ee..d1874b0 100644
--- a/tests/itests/voltha/test_voltha_rest_apis.py
+++ b/tests/itests/voltha/test_voltha_rest_apis.py
@@ -9,7 +9,7 @@
from voltha.protos import openflow_13_pb2 as ofp
from common.utils.consulhelpers import get_endpoint_from_consul
from structlog import get_logger
-from tests.itests.docutests.test_utils import get_pod_ip
+from tests.itests.test_utils import get_pod_ip
from testconfig import config
LOCAL_CONSUL = "localhost:8500"