[WIP] Initial checkin for automation of CORD VTN
test scenarios.
Change-Id: Ife27562132cd547acba913de2ca2fd4781e7de36
diff --git a/src/test/cordvtn/__init__.py b/src/test/cordvtn/__init__.py
new file mode 100644
index 0000000..d2ccc00
--- /dev/null
+++ b/src/test/cordvtn/__init__.py
@@ -0,0 +1,22 @@
+#
+# Copyright 2016-present Ciena Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os,sys
+##add the python path to lookup the utils
+working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
+utils_dir = os.path.join(working_dir, '../utils')
+fsm_dir = os.path.join(working_dir, '../fsm')
+__path__.append(utils_dir)
+__path__.append(fsm_dir)
diff --git a/src/test/cordvtn/access-docker/Dockerfile b/src/test/cordvtn/access-docker/Dockerfile
new file mode 100644
index 0000000..427807b
--- /dev/null
+++ b/src/test/cordvtn/access-docker/Dockerfile
@@ -0,0 +1,10 @@
+FROM ubuntu:14.04
+MAINTAINER chgaonke@partner.ciena.com
+
+RUN apt-get update && \
+ apt-get install -qy vlan python python-pexpect strace \
+ python python-pip python-setuptools python-scapy tcpdump doxygen doxypy wget \
+ openvswitch-common openvswitch-switch \
+ python-twisted python-sqlite sqlite3 python-pexpect telnet arping isc-dhcp-server \
+ python-paramiko python-maas-client
+
diff --git a/src/test/cordvtn/access-docker/access-tag.sh b/src/test/cordvtn/access-docker/access-tag.sh
new file mode 100755
index 0000000..76a1f7a
--- /dev/null
+++ b/src/test/cordvtn/access-docker/access-tag.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+sudo docker run --privileged --cap-add=ALL -d -v /dev:/dev -v /lib/modules:/lib/modules --name access -t vlan /bin/bash
+sudo ./pipework fabric -i eth1 access 10.168.0.254/24
+sudo docker exec -d access modprobe 8021q
+sudo docker exec -d access vconfig add eth1 222
+sudo docker exec -d access ip link set eth1.222 up
+sudo docker exec -d access ip addr add 10.169.0.254/24 dev eth1.222
diff --git a/src/test/cordvtn/access-docker/pipework b/src/test/cordvtn/access-docker/pipework
new file mode 100755
index 0000000..fffc097
--- /dev/null
+++ b/src/test/cordvtn/access-docker/pipework
@@ -0,0 +1,422 @@
+#!/bin/sh
+# This code should (try to) follow Google's Shell Style Guide
+# (https://google-styleguide.googlecode.com/svn/trunk/shell.xml)
+set -e
+
+case "$1" in
+ --wait)
+ WAIT=1
+ ;;
+esac
+
+IFNAME=$1
+
+# default value set further down if not set here
+CONTAINER_IFNAME=
+if [ "$2" = "-i" ]; then
+ CONTAINER_IFNAME=$3
+ shift 2
+fi
+
+if [ "$2" = "-l" ]; then
+ LOCAL_IFNAME=$3
+ shift 2
+fi
+
+GUESTNAME=$2
+IPADDR=$3
+MACADDR=$4
+
+case "$MACADDR" in
+ *@*)
+ VLAN="${MACADDR#*@}"
+ VLAN="${VLAN%%@*}"
+ MACADDR="${MACADDR%%@*}"
+ ;;
+ *)
+ VLAN=
+ ;;
+esac
+
+# did they ask to generate a custom MACADDR?
+# generate the unique string
+case "$MACADDR" in
+ U:*)
+ macunique="${MACADDR#*:}"
+ # now generate a 48-bit hash string from $macunique
+ MACADDR=$(echo $macunique|md5sum|sed 's/^\(..\)\(..\)\(..\)\(..\)\(..\).*$/02:\1:\2:\3:\4:\5/')
+ ;;
+esac
+
+
+[ "$IPADDR" ] || [ "$WAIT" ] || {
+ echo "Syntax:"
+ echo "pipework <hostinterface> [-i containerinterface] [-l localinterfacename] <guest> <ipaddr>/<subnet>[@default_gateway] [macaddr][@vlan]"
+ echo "pipework <hostinterface> [-i containerinterface] [-l localinterfacename] <guest> dhcp [macaddr][@vlan]"
+ echo "pipework route <guest> <route_command>"
+ echo "pipework --wait [-i containerinterface]"
+ exit 1
+}
+
+# Succeed if the given utility is installed. Fail otherwise.
+# For explanations about `which` vs `type` vs `command`, see:
+# http://stackoverflow.com/questions/592620/check-if-a-program-exists-from-a-bash-script/677212#677212
+# (Thanks to @chenhanxiao for pointing this out!)
+installed () {
+ command -v "$1" >/dev/null 2>&1
+}
+
+# Google Styleguide says error messages should go to standard error.
+warn () {
+ echo "$@" >&2
+}
+die () {
+ status="$1"
+ shift
+ warn "$@"
+ exit "$status"
+}
+
+# First step: determine type of first argument (bridge, physical interface...),
+# Unless "--wait" is set (then skip the whole section)
+if [ -z "$WAIT" ]; then
+ if [ -d "/sys/class/net/$IFNAME" ]
+ then
+ if [ -d "/sys/class/net/$IFNAME/bridge" ]; then
+ IFTYPE=bridge
+ BRTYPE=linux
+ elif installed ovs-vsctl && ovs-vsctl list-br|grep -q "^${IFNAME}$"; then
+ IFTYPE=bridge
+ BRTYPE=openvswitch
+ elif [ "$(cat "/sys/class/net/$IFNAME/type")" -eq 32 ]; then # InfiniBand IPoIB interface type 32
+ IFTYPE=ipoib
+ # The IPoIB kernel module is fussy, set device name to ib0 if not overridden
+ CONTAINER_IFNAME=${CONTAINER_IFNAME:-ib0}
+ PKEY=$VLAN
+ else IFTYPE=phys
+ fi
+ else
+ case "$IFNAME" in
+ br*)
+ IFTYPE=bridge
+ BRTYPE=linux
+ ;;
+ ovs*)
+ if ! installed ovs-vsctl; then
+ die 1 "Need OVS installed on the system to create an ovs bridge"
+ fi
+ IFTYPE=bridge
+ BRTYPE=openvswitch
+ ;;
+ route*)
+ IFTYPE=route
+ ;;
+ dummy*)
+ IFTYPE=dummy
+ ;;
+ *) die 1 "I do not know how to setup interface $IFNAME." ;;
+ esac
+ fi
+fi
+
+# Set the default container interface name to eth1 if not already set
+CONTAINER_IFNAME=${CONTAINER_IFNAME:-eth1}
+
+[ "$WAIT" ] && {
+ while true; do
+ # This first method works even without `ip` or `ifconfig` installed,
+ # but doesn't work on older kernels (e.g. CentOS 6.X). See #128.
+ grep -q '^1$' "/sys/class/net/$CONTAINER_IFNAME/carrier" && break
+ # This method hopefully works on those older kernels.
+ ip link ls dev "$CONTAINER_IFNAME" && break
+ sleep 1
+ done > /dev/null 2>&1
+ exit 0
+}
+
+[ "$IFTYPE" = bridge ] && [ "$BRTYPE" = linux ] && [ "$VLAN" ] && {
+ die 1 "VLAN configuration currently unsupported for Linux bridge."
+}
+
+[ "$IFTYPE" = ipoib ] && [ "$MACADDR" ] && {
+ die 1 "MACADDR configuration unsupported for IPoIB interfaces."
+}
+
+# Second step: find the guest (for now, we only support LXC containers)
+while read _ mnt fstype options _; do
+ [ "$fstype" != "cgroup" ] && continue
+ echo "$options" | grep -qw devices || continue
+ CGROUPMNT=$mnt
+done < /proc/mounts
+
+[ "$CGROUPMNT" ] || {
+ die 1 "Could not locate cgroup mount point."
+}
+
+# Try to find a cgroup matching exactly the provided name.
+N=$(find "$CGROUPMNT" -name "$GUESTNAME" | wc -l)
+case "$N" in
+ 0)
+ # If we didn't find anything, try to lookup the container with Docker.
+ if installed docker; then
+ RETRIES=3
+ while [ "$RETRIES" -gt 0 ]; do
+ DOCKERPID=$(docker inspect --format='{{ .State.Pid }}' "$GUESTNAME")
+ [ "$DOCKERPID" != 0 ] && break
+ sleep 1
+ RETRIES=$((RETRIES - 1))
+ done
+
+ [ "$DOCKERPID" = 0 ] && {
+ die 1 "Docker inspect returned invalid PID 0"
+ }
+
+ [ "$DOCKERPID" = "<no value>" ] && {
+ die 1 "Container $GUESTNAME not found, and unknown to Docker."
+ }
+ else
+ die 1 "Container $GUESTNAME not found, and Docker not installed."
+ fi
+ ;;
+ 1) true ;;
+ *) die 1 "Found more than one container matching $GUESTNAME." ;;
+esac
+
+# only check IPADDR if we are not in a route mode
+[ "$IFTYPE" != route ] && {
+ case "$IPADDR" in
+ # Let's check first if the user asked for DHCP allocation.
+ dhcp|dhcp:*)
+ # Use Docker-specific strategy to run the DHCP client
+ # from the busybox image, in the network namespace of
+ # the container.
+ if ! [ "$DOCKERPID" ]; then
+ warn "You asked for a Docker-specific DHCP method."
+ warn "However, $GUESTNAME doesn't seem to be a Docker container."
+ warn "Try to replace 'dhcp' with another option?"
+ die 1 "Aborting."
+ fi
+ DHCP_CLIENT=${IPADDR%%:*}
+ ;;
+ udhcpc|udhcpc:*|udhcpc-f|udhcpc-f:*|dhcpcd|dhcpcd:*|dhclient|dhclient:*|dhclient-f|dhclient-f:*)
+ DHCP_CLIENT=${IPADDR%%:*}
+ # did they ask for the client to remain?
+ DHCP_FOREGROUND=
+ [ "${DHCP_CLIENT: -2}" = '-f' ] && {
+ DHCP_FOREGROUND=true
+ }
+ DHCP_CLIENT=${DHCP_CLIENT%-f}
+ if ! installed "$DHCP_CLIENT"; then
+ die 1 "You asked for DHCP client $DHCP_CLIENT, but I can't find it."
+ fi
+ ;;
+ # Alright, no DHCP? Then let's see if we have a subnet *and* gateway.
+ */*@*)
+ GATEWAY="${IPADDR#*@}" GATEWAY="${GATEWAY%%@*}"
+ IPADDR="${IPADDR%%@*}"
+ ;;
+ # No gateway? We need at least a subnet, anyway!
+ */*) : ;;
+ # ... No? Then stop right here.
+ *)
+ warn "The IP address should include a netmask."
+ die 1 "Maybe you meant $IPADDR/24 ?"
+ ;;
+ esac
+}
+
+# If a DHCP method was specified, extract the DHCP options.
+if [ "$DHCP_CLIENT" ]; then
+ case "$IPADDR" in
+ *:*) DHCP_OPTIONS="${IPADDR#*:}" ;;
+ esac
+fi
+
+if [ "$DOCKERPID" ]; then
+ NSPID=$DOCKERPID
+else
+ NSPID=$(head -n 1 "$(find "$CGROUPMNT" -name "$GUESTNAME" | head -n 1)/tasks")
+ [ "$NSPID" ] || {
+ # it is an alternative way to get the pid
+ NSPID=$(lxc-info -n "$GUESTNAME" | grep PID | grep -Eo '[0-9]+')
+ [ "$NSPID" ] || {
+ die 1 "Could not find a process inside container $GUESTNAME."
+ }
+ }
+fi
+
+# Check if an incompatible VLAN device already exists
+[ "$IFTYPE" = phys ] && [ "$VLAN" ] && [ -d "/sys/class/net/$IFNAME.VLAN" ] && {
+ ip -d link show "$IFNAME.$VLAN" | grep -q "vlan.*id $VLAN" || {
+ die 1 "$IFNAME.VLAN already exists but is not a VLAN device for tag $VLAN"
+ }
+}
+
+[ ! -d /var/run/netns ] && mkdir -p /var/run/netns
+rm -f "/var/run/netns/$NSPID"
+ln -s "/proc/$NSPID/ns/net" "/var/run/netns/$NSPID"
+
+# Check if we need to create a bridge.
+[ "$IFTYPE" = bridge ] && [ ! -d "/sys/class/net/$IFNAME" ] && {
+ [ "$BRTYPE" = linux ] && {
+ (ip link add dev "$IFNAME" type bridge > /dev/null 2>&1) || (brctl addbr "$IFNAME")
+ ip link set "$IFNAME" up
+ }
+ [ "$BRTYPE" = openvswitch ] && {
+ ovs-vsctl add-br "$IFNAME"
+ }
+}
+
+[ "$IFTYPE" != "route" ] && [ "$IFTYPE" != "dummy" ] && MTU=$(ip link show "$IFNAME" | awk '{print $5}')
+
+# If it's a bridge, we need to create a veth pair
+[ "$IFTYPE" = bridge ] && {
+ if [ -z "$LOCAL_IFNAME" ]; then
+ LOCAL_IFNAME="v${CONTAINER_IFNAME}pl${NSPID}"
+ fi
+ GUEST_IFNAME="v${CONTAINER_IFNAME}pg${NSPID}"
+ # Does the link already exist?
+ if ip link show "$LOCAL_IFNAME" >/dev/null 2>&1; then
+ # link exists, is it in use?
+ if ip link show "$LOCAL_IFNAME" up | grep -q "UP"; then
+ echo "Link $LOCAL_IFNAME exists and is up"
+ exit 1
+ fi
+ # delete the link so we can re-add it afterwards
+ ip link del "$LOCAL_IFNAME"
+ fi
+ ip link add name "$LOCAL_IFNAME" mtu "$MTU" type veth peer name "$GUEST_IFNAME" mtu "$MTU"
+ case "$BRTYPE" in
+ linux)
+ (ip link set "$LOCAL_IFNAME" master "$IFNAME" > /dev/null 2>&1) || (brctl addif "$IFNAME" "$LOCAL_IFNAME")
+ ;;
+ openvswitch)
+ if ! ovs-vsctl list-ports "$IFNAME" | grep -q "^${LOCAL_IFNAME}$"; then
+ ovs-vsctl add-port "$IFNAME" "$LOCAL_IFNAME" ${VLAN:+tag="$VLAN"}
+ fi
+ ;;
+ esac
+ ip link set "$LOCAL_IFNAME" up
+}
+
+# If it's a physical interface, create a macvlan subinterface
+[ "$IFTYPE" = phys ] && {
+ [ "$VLAN" ] && {
+ [ ! -d "/sys/class/net/${IFNAME}.${VLAN}" ] && {
+ ip link add link "$IFNAME" name "$IFNAME.$VLAN" mtu "$MTU" type vlan id "$VLAN"
+ }
+ ip link set "$IFNAME" up
+ IFNAME=$IFNAME.$VLAN
+ }
+ GUEST_IFNAME=ph$NSPID$CONTAINER_IFNAME
+ ip link add link "$IFNAME" dev "$GUEST_IFNAME" mtu "$MTU" type macvlan mode bridge
+ ip link set "$IFNAME" up
+}
+
+# If it's an IPoIB interface, create a virtual IPoIB interface (the IPoIB
+# equivalent of a macvlan device)
+#
+# Note: no macvlan subinterface nor Ethernet bridge can be created on top of an
+# IPoIB interface. InfiniBand is not Ethernet. IPoIB is an IP layer on top of
+# InfiniBand, without an intermediate Ethernet layer.
+[ "$IFTYPE" = ipoib ] && {
+ GUEST_IFNAME="${IFNAME}.${NSPID}"
+
+ # If a partition key is provided, use it
+ [ "$PKEY" ] && {
+ GUEST_IFNAME="${IFNAME}.${PKEY}.${NSPID}"
+ PKEY="pkey 0x$PKEY"
+ }
+
+ ip link add link "$IFNAME" name "$GUEST_IFNAME" type ipoib $PKEY
+ ip link set "$IFNAME" up
+}
+
+# If its a dummy interface, create a dummy interface.
+[ "$IFTYPE" = dummy ] && {
+ GUEST_IFNAME=du$NSPID$CONTAINER_IFNAME
+ ip link add dev "$GUEST_IFNAME" type dummy
+}
+
+# If the `route` command was specified ...
+if [ "$IFTYPE" = route ]; then
+ # ... discard the first two arguments and pass the rest to the route command.
+ shift 2
+ ip netns exec "$NSPID" ip route "$@"
+else
+ # Otherwise, run normally.
+ ip link set "$GUEST_IFNAME" netns "$NSPID"
+ ip netns exec "$NSPID" ip link set "$GUEST_IFNAME" name "$CONTAINER_IFNAME"
+ [ "$MACADDR" ] && ip netns exec "$NSPID" ip link set dev "$CONTAINER_IFNAME" address "$MACADDR"
+
+ # When using any of the DHCP methods, we start a DHCP client in the
+ # network namespace of the container. With the 'dhcp' method, the
+ # client used is taken from the Docker busybox image (therefore
+ # requiring no specific client installed on the host). Other methods
+ # use a locally installed client.
+ case "$DHCP_CLIENT" in
+ dhcp)
+ docker run -d --net container:$GUESTNAME --cap-add NET_ADMIN \
+ busybox udhcpc -i "$CONTAINER_IFNAME" -x "hostname:$GUESTNAME" \
+ $DHCP_OPTIONS \
+ >/dev/null
+ ;;
+ udhcpc)
+ DHCP_Q="-q"
+ [ "$DHCP_FOREGROUND" ] && {
+ DHCP_OPTIONS="$DHCP_OPTIONS -f"
+ }
+ ip netns exec "$NSPID" "$DHCP_CLIENT" -qi "$CONTAINER_IFNAME" \
+ -x "hostname:$GUESTNAME" \
+ -p "/var/run/udhcpc.$GUESTNAME.pid" \
+ $DHCP_OPTIONS
+ [ ! "$DHCP_FOREGROUND" ] && {
+ rm "/var/run/udhcpc.$GUESTNAME.pid"
+ }
+ ;;
+ dhclient)
+ ip netns exec "$NSPID" "$DHCP_CLIENT" "$CONTAINER_IFNAME" \
+ -pf "/var/run/dhclient.$GUESTNAME.pid" \
+ -lf "/etc/dhclient/dhclient.$GUESTNAME.leases" \
+ $DHCP_OPTIONS
+ # kill dhclient after get ip address to prevent device be used after container close
+ [ ! "$DHCP_FOREGROUND" ] && {
+ kill "$(cat "/var/run/dhclient.$GUESTNAME.pid")"
+ rm "/var/run/dhclient.$GUESTNAME.pid"
+ }
+ ;;
+ dhcpcd)
+ ip netns exec "$NSPID" "$DHCP_CLIENT" -q "$CONTAINER_IFNAME" -h "$GUESTNAME"
+ ;;
+ "")
+ if installed ipcalc; then
+ eval $(ipcalc -b $IPADDR)
+ ip netns exec "$NSPID" ip addr add "$IPADDR" brd "$BROADCAST" dev "$CONTAINER_IFNAME"
+ else
+ ip netns exec "$NSPID" ip addr add "$IPADDR" dev "$CONTAINER_IFNAME"
+ fi
+
+ [ "$GATEWAY" ] && {
+ ip netns exec "$NSPID" ip route delete default >/dev/null 2>&1 && true
+ }
+ ip netns exec "$NSPID" ip link set "$CONTAINER_IFNAME" up
+ [ "$GATEWAY" ] && {
+ ip netns exec "$NSPID" ip route get "$GATEWAY" >/dev/null 2>&1 || \
+ ip netns exec "$NSPID" ip route add "$GATEWAY/32" dev "$CONTAINER_IFNAME"
+ ip netns exec "$NSPID" ip route replace default via "$GATEWAY"
+ }
+ ;;
+ esac
+
+ # Give our ARP neighbors a nudge about the new interface
+ if installed arping; then
+ IPADDR=$(echo "$IPADDR" | cut -d/ -f1)
+ ip netns exec "$NSPID" arping -c 1 -A -I "$CONTAINER_IFNAME" "$IPADDR" > /dev/null 2>&1 || true
+ else
+ echo "Warning: arping not found; interface may not be immediately reachable"
+ fi
+fi
+# Remove NSPID to avoid `ip netns` catch it.
+rm -f "/var/run/netns/$NSPID"
+
+# vim: set tabstop=2 shiftwidth=2 softtabstop=2 expandtab :
diff --git a/src/test/cordvtn/access-docker/run-agent.sh b/src/test/cordvtn/access-docker/run-agent.sh
new file mode 100755
index 0000000..da6c69f
--- /dev/null
+++ b/src/test/cordvtn/access-docker/run-agent.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+sudo docker run --privileged --cap-add=ALL -d --name access-agent -t ubuntu:14.04 /bin/bash
+sudo ./pipework br-mgmt -i eth1 access 10.10.10.101/24
+sudo ./pipework br-int -i eth2 access 10.168.0.101/24
diff --git a/src/test/cordvtn/cordvtnTest.py b/src/test/cordvtn/cordvtnTest.py
new file mode 100644
index 0000000..d6e3944
--- /dev/null
+++ b/src/test/cordvtn/cordvtnTest.py
@@ -0,0 +1,178 @@
+#
+# Copyright 2016-present Ciena Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import unittest
+import os,sys
+import os
+import keystoneclient.v2_0.client as ksclient
+import keystoneclient.apiclient.exceptions
+import neutronclient.v2_0.client as nclient
+import neutronclient.common.exceptions
+import novaclient.v1_1.client as novaclient
+from multiprocessing import Pool
+from nose.tools import assert_equal
+from CordLogger import CordLogger
+
+class cordvtn_exchange(CordLogger):
+
+ app = 'org.opencord.cordvtn'
+
+ @classmethod
+ def setUpClass(cls):
+ cls.olt = OltConfig()
+ cls.port_map, _ = cls.olt.olt_port_map()
+ if not cls.port_map:
+ cls.port_map = g_subscriber_port_map
+ cls.iface = cls.port_map[1]
+
+ def setUp(self):
+ ''' Activate the cord vtn app'''
+ super(dhcp_exchange, self).setUp()
+ self.maxDiff = None ##for assert_equal compare outputs on failure
+ self.onos_ctrl = OnosCtrl(self.app)
+ status, _ = self.onos_ctrl.activate()
+ assert_equal(status, True)
+ time.sleep(3)
+
+ def tearDown(self):
+ '''Deactivate the cord vtn app'''
+ self.onos_ctrl.deactivate()
+ super(dhcp_exchange, self).tearDown()
+
+ def onos_load_config(self, config):
+ status, code = OnosCtrl.config(config)
+ if status is False:
+ log.info('JSON request returned status %d' %code)
+ assert_equal(status, True)
+ time.sleep(3)
+
+ def create_tenant(tenant_name):
+ new_tenant = keystone.tenants.create(tenant_name=tenant_name,
+ description="CORD Tenant \
+ created",
+ enabled=True)
+ tenant_id = new_tenant.id
+ tenant_status = True
+ user_data = []
+ for j in range(2):
+ j += 1
+ user_name = tenant_name + '-user-' + str(j)
+ user_data.append(create_user(user_name, tenant_id))
+
+ print " Tenant and User Created"
+
+ tenant_data = {'tenant_name': tenant_name,
+ 'tenant_id': tenant_id,
+ 'status': tenant_status}
+ return tenant_data
+
+ def create_user(user_name, tenant_id):
+ new_user = keystone.users.create(name=user_name,
+ password="ubuntu",
+ tenant_id=tenant_id)
+ print(' - Created User %s' % user_name)
+ keystone.roles.add_user_role(new_user, member_role, tenant_id)
+ if assign_admin:
+ admin_user = keystone.users.find(name='admin')
+ admin_role = keystone.roles.find(name='admin')
+ keystone.roles.add_user_role(admin_user, admin_role, tenant_id)
+ user_data = {'name': new_user.name,
+ 'id': new_user.id}
+ return user_data
+
+ def delete_tenant(tenant_name):
+ tenant = keystone.tenants.find(name=tenant_name)
+ for j in range(2):
+ j += 1
+ user_name = tenant_name + '-user-' + str(j)
+ delete_user(user_name, tenant.id)
+ tenant.delete()
+ print(' - Deleted Tenant %s ' % tenant_name)
+ return True
+
+ def delete_user(user_name, tenant_id):
+ user = keystone.users.find(name=user_name)
+ user.delete()
+
+ print(' - Deleted User %s' % user_name)
+ return True
+
+ def get_neutron_credentials():
+ d = {}
+ d['username'] = os.environ['OS_USERNAME']
+ d['password'] = os.environ['OS_PASSWORD']
+ d['auth_url'] = os.environ['OS_AUTH_URL']
+ d['tenant_name'] = os.environ['OS_TENANT_NAME']
+ return d
+
+
+ def create_network(i):
+ neutron_credentials = get_neutron_credentials()
+ neutron = neutron_client.Client(**neutron_credentials)
+ json = {'network': {'name': 'network-' + str(i),
+ 'admin_state_up': True}}
+ while True:
+ neutron.create_network(body=json)
+ print '\nnetwork-' + str(i) + ' created'
+ break
+
+ pool = Pool(processes=5)
+ os.system("neutron quota-update --network 105")
+ for i in range(1,5):
+ pool.apply_async(create_network, (i, ))
+ pool.close()
+ pool.join()
+
+ def test_cordvtn_basic_tenant(self):
+ pass
+
+ def test_cordvtn_mgmt_network(self):
+ pass
+
+ def test_cordvtn_data_network(self):
+ pass
+
+ def test_cordvtn_public_network(self):
+ pass
+
+ def test_cordvtn_in_same_network(self):
+ pass
+
+ def test_cordvtn_local_mgmt_network(self):
+ pass
+
+ def test_cordvtn_service_dependency(self):
+ pass
+
+ def test_cordvtn_service_dependency_with_xos(self):
+ pass
+
+ def test_cordvtn_vsg_xos_service_profile(self):
+ pass
+
+ def test_cordvtn_access_agent(self):
+ pass
+
+ def test_cordvtn_network_creation(self):
+ pass
+
+ def test_cordvtn_removing_service_network(self):
+ pass
+
+ def test_cordvtn_web_application(self):
+ pass
+
+ def test_cordvtn_service_port(self):
+ pass
diff --git a/src/test/cordvtn/network-cfg.json b/src/test/cordvtn/network-cfg.json
new file mode 100644
index 0000000..09ca263
--- /dev/null
+++ b/src/test/cordvtn/network-cfg.json
@@ -0,0 +1,49 @@
+{
+ "apps" : {
+ "org.opencord.vtn" : {
+ "cordvtn" : {
+ "privateGatewayMac" : "00:00:00:00:00:01",
+ "publicGateways" : [
+ {
+ "gatewayIp" : "20.0.0.1",
+ "gatewayMac" : "fe:00:00:00:00:01"
+ }
+ ],
+ "localManagementIp" : "172.27.0.1/24",
+ "ovsdbPort" : "6640",
+ "ssh" : {
+ "sshPort" : "22",
+ "sshUser" : "root",
+ "sshKeyFile" : "/root/node_key"
+ },
+ "openstack" : {
+ "endpoint" : "http://10.90.0.58:5000/v2.0/",
+ "tenant" : "admin",
+ "user" : "admin",
+ "password" : "ADMIN_PASS"
+ },
+ "xos" : {
+ "endpoint" : "http://10.90.0.58:80",
+ "user" : "padmin@vicci.org",
+ "password" : "letmein"
+ },
+ "nodes" : [
+ {
+ "hostname" : "compute-01",
+ "hostManagementIp" : "10.90.0.64/24",
+ "dataPlaneIp" : "192.168.199.1/24",
+ "dataPlaneIntf" : "veth1",
+ "bridgeId" : "of:0000000000000001"
+ },
+ {
+ "hostname" : "compute-02",
+ "hostManagementIp" : "10.90.0.65/24",
+ "dataPlaneIp" : "192.168.199.2/24",
+ "dataPlaneIntf" : "veth1",
+ "bridgeId" : "of:0000000000000002"
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/src/test/cordvtn/vtn-setup.sh b/src/test/cordvtn/vtn-setup.sh
new file mode 100644
index 0000000..cf6193f
--- /dev/null
+++ b/src/test/cordvtn/vtn-setup.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+sudo brctl addbr fabric
+sudo ip link set fabric up
+sudo ip link add veth0 type veth peer name veth1
+sudo ip link set veth0 up
+sudo ip link set veth1 up
+sudo brctl addif fabric veth0
+sudo brctl addif fabric eth1
+sudo ip addr flush eth1
+sudo ip link set address 00:00:00:00:00:01 dev fabric
+sudo ip link set address 00:00:00:00:00:01 dev eth1
+sudo ip address add 20.0.0.1/24 dev fabric
+sudo ip address add 10.168.0.1/24 dev fabric
+sudo iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE