Test-Scale:
Refactored and adding new scale tests with changes in utility functions
and also segregating util functions for all modules in seperate file.
Change-Id: I75a193cb563cbd57564f6d714b878ca96930cf9f
diff --git a/src/test/scale/scaleTest.py b/src/test/scale/scaleTest.py
index f6a1ee0..1c031a2 100644
--- a/src/test/scale/scaleTest.py
+++ b/src/test/scale/scaleTest.py
@@ -26,18 +26,27 @@
from OltConfig import OltConfig
from onosclidriver import OnosCliDriver
from SSHTestAgent import SSHTestAgent
-from Channels import IgmpChannel
+from Channels import Channels, IgmpChannel
from IGMP import *
from CordLogger import CordLogger
from VSGAccess import VSGAccess
+from OnosFlowCtrl import OnosFlowCtrl
+#imports for cord-subscriber module
+from subscriberDb import SubscriberDB
+from Stats import Stats
+from threadPool import ThreadPool
+import threading
+from EapTLS import TLSAuthTest
from CordTestUtils import log_test as log
-from CordTestConfig import setup_module, running_on_ciab, teardown_module
+from CordTestConfig import setup_module, running_on_ciab
from OnosCtrl import OnosCtrl
from CordContainer import Onos
from CordSubscriberUtils import CordSubscriberUtils, XosUtils
-from vsgTest import vsg_exchange
+from CordTestServer import cord_test_onos_restart, cord_test_quagga_restart, cord_test_shell, cord_test_radius_restart
+from Scale import scale
log.setLevel('INFO')
+
class scale_exchange(CordLogger):
HOST = "10.1.0.1"
USER = "vagrant"
@@ -46,13 +55,17 @@
HEAD_NODE = head_node + '.cord.lab' if len(head_node.split('.')) == 1 else head_node
test_path = os.path.dirname(os.path.realpath(__file__))
olt_conf_file = os.getenv('OLT_CONFIG_FILE', os.path.join(test_path, '..', 'setup/olt_config.json'))
+ restApiXos = None
+ cord_subscriber = None
SUBSCRIBER_ACCOUNT_NUM = 100
SUBSCRIBER_S_TAG = 500
SUBSCRIBER_C_TAG = 500
SUBSCRIBERS_PER_S_TAG = 8
+ subscriber_info = []
+ volt_subscriber_info = []
restore_methods = []
TIMEOUT=120
- NUM_SUBSCRIBERS = 100
+ NUM_SUBSCRIBERS = 16
wan_intf_ip = '10.6.1.129'
V_INF1 = 'veth0'
V_INF2 = 'veth1'
@@ -74,6 +87,7 @@
acl_app = 'org.onosproject.acl'
aaa_app = 'org.opencord.aaa'
app = 'org.onosproject.cli'
+ APP_NAME = 'org.ciena.xconnect'
INTF_TX_DEFAULT = 'veth2'
INTF_RX_DEFAULT = 'veth0'
default_port_map = {
@@ -83,56 +97,173 @@
INTF_RX_DEFAULT : PORT_RX_DEFAULT
}
vrouter_apps = ('org.onosproject.proxyarp', 'org.onosproject.hostprovider', 'org.onosproject.vrouter', 'org.onosproject.fwd')
+ MAX_PORTS = 100
+ subscriber_apps = ('org.opencord.aaa', 'org.onosproject.dhcp')
+ olt_apps = () #'org.opencord.cordmcast')
+ vtn_app = 'org.opencord.vtn'
+ table_app = 'org.ciena.cordigmp'
+ aaa_loaded = False
+ table_app_file = os.path.join(test_path, '..', 'apps/ciena-cordigmp-multitable-2.0-SNAPSHOT.oar')
+ app_file = os.path.join(test_path, '..', 'apps/ciena-cordigmp-2.0-SNAPSHOT.oar')
+ olt_app_file = os.path.join(test_path, '..', 'apps/olt-app-1.2-SNAPSHOT.oar')
+ olt_app_name = 'org.onosproject.olt'
+ onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
+ cpqd_path = os.path.join(test_path, '..', 'setup')
+ ovs_path = cpqd_path
+ test_services = ('IGMP', 'TRAFFIC')
+ num_joins = 0
+ num_subscribers = 0
+ leave_flag = True
+ num_channels = 0
+ recv_timeout = False
+ onos_restartable = bool(int(os.getenv('ONOS_RESTART', 0)))
+ SUBSCRIBER_TIMEOUT = 300
+ device_id = 'of:' + get_mac()
+
+ CLIENT_CERT = """-----BEGIN CERTIFICATE-----
+MIICuDCCAiGgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
+IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
+RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwNjA2MjExMjI3WhcN
+MTcwNjAxMjExMjI3WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
+BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
+hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkC
+gYEAwvXiSzb9LZ6c7uNziUfKvoHO7wu/uiFC5YUpXbmVGuGZizbVrny0xnR85Dfe
++9R4diansfDhIhzOUl1XjN3YDeSS9OeF5YWNNE8XDhlz2d3rVzaN6hIhdotBkUjg
+rUewjTg5OFR31QEyG3v8xR3CLgiE9xQELjZbSA07pD79zuUCAwEAAaNPME0wEwYD
+VR0lBAwwCgYIKwYBBQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5l
+eGFtcGxlLmNvbS9leGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOBgQDAjkrY
+6tDChmKbvr8w6Du/t8vHjTCoCIocHTN0qzWOeb1YsAGX89+TrWIuO1dFyYd+Z0KC
+PDKB5j/ygml9Na+AklSYAVJIjvlzXKZrOaPmhZqDufi+rXWti/utVqY4VMW2+HKC
+nXp37qWeuFLGyR1519Y1d6F/5XzqmvbwURuEug==
+-----END CERTIFICATE-----"""
+
CLIENT_CERT_INVALID = '''-----BEGIN CERTIFICATE-----
-MIIEyTCCA7GgAwIBAgIJAN3OagiHm6AXMA0GCSqGSIb3DQEBCwUAMIGLMQswCQYD
-VQQGEwJVUzELMAkGA1UECAwCQ0ExEjAQBgNVBAcMCVNvbWV3aGVyZTETMBEGA1UE
-CgwKQ2llbmEgSW5jLjEeMBwGCSqGSIb3DQEJARYPYWRtaW5AY2llbmEuY29tMSYw
-JAYDVQQDDB1FeGFtcGxlIENlcnRpZmljYXRlIEF1dGhvcml0eTAeFw0xNzAzMTEw
-MDQ3NDNaFw0yMjEwMzEwMDQ3NDNaMIGLMQswCQYDVQQGEwJVUzELMAkGA1UECAwC
-Q0ExEjAQBgNVBAcMCVNvbWV3aGVyZTETMBEGA1UECgwKQ2llbmEgSW5jLjEeMBwG
-CSqGSIb3DQEJARYPYWRtaW5AY2llbmEuY29tMSYwJAYDVQQDDB1FeGFtcGxlIENl
-cnRpZmljYXRlIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
-ggEBALYkVvncfeRel/apXy5iODla5H7sUpU7a+pwT7nephmjKDh0GPX/t5GUwgkB
-1zQAEj0IPoxZIfSAGSFP/mqTUK2sm7qerArih0E3kBRpnBKJZB/4r1OTZ04CsuRQ
-QJOqcI0mZJWUKEcahN4yZvRyxeiCeFFoc0Nw787MQHhD9lZTqJUoAvautUe1GCjG
-46DS4MzpWNGkqn5/ZC8lQ198AceMwf2pJRuOQg5cPwp65+dKNLUMLiSUV7JpvmAo
-of4MHtGaBxKHESZ2jPiNTT2uKI/7KxH3Pr/ctft3bcSX2d4q49B2tdEIRzC0ankm
-CrxFcq9Cb3MGaNuwWAtk3fOGKusCAwEAAaOCASwwggEoMB0GA1UdDgQWBBRtf8rH
-zJW7rliW1eZnbVbSb3obfDCBwAYDVR0jBIG4MIG1gBRtf8rHzJW7rliW1eZnbVbS
-b3obfKGBkaSBjjCBizELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRIwEAYDVQQH
-DAlTb21ld2hlcmUxEzARBgNVBAoMCkNpZW5hIEluYy4xHjAcBgkqhkiG9w0BCQEW
-D2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAwwdRXhhbXBsZSBDZXJ0aWZpY2F0ZSBB
-dXRob3JpdHmCCQDdzmoIh5ugFzAMBgNVHRMEBTADAQH/MDYGA1UdHwQvMC0wK6Ap
-oCeGJWh0dHA6Ly93d3cuZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZI
-hvcNAQELBQADggEBAKWjORcBc1WK3r8mq88ipUC2UR1qvxdON4K/hd+rdAj0E/xA
-QCJDORKno8f2MktqLfhU0amCVBvwdfmVFmVDtl38b1pu+mNFO+FDp04039Fd5ThM
-iYmiQjnJ2IcAi/CILtrjURvJUPSOX9lviOtcla0HW94dgA9IDRs5frrWO9jkcxXR
-+oz3LNMfVnXqhoHHQ1RtvqOozhEsUZZWY5MuUxRY25peeZ7m1vz+zDa/DbrV1wsP
-dxOocmYdGFIAT9AiRnR4Jc/hqabBVNMZlGAA+2dELajpaHqb4yx5gBLVkT7VgHjI
-7cp7jLRL7T+i4orZiAXpeEpAeOrP8r0DYTJi/8A=
+MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
+IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
+RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
+MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
+BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
+hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
+AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
+5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
+tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
+OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
+qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
+2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
+BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
+eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
+MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
+VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
+RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
+dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
+T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
+yg==
-----END CERTIFICATE-----'''
@classmethod
- def setUpClass(cls):
+ def setUpCordApi(cls):
num_subscribers = max(cls.NUM_SUBSCRIBERS, 10)
- vsg_exchange.vsgSetup(num_subscribers = num_subscribers,
- account_num = cls.SUBSCRIBER_ACCOUNT_NUM,
- s_tag = cls.SUBSCRIBER_S_TAG,
- c_tag = cls.SUBSCRIBER_C_TAG,
- subscribers_per_s_tag = cls.SUBSCRIBERS_PER_S_TAG)
+ cls.cord_subscriber = CordSubscriberUtils(num_subscribers,
+ account_num = cls.SUBSCRIBER_ACCOUNT_NUM,
+ s_tag = cls.SUBSCRIBER_S_TAG,
+ c_tag = cls.SUBSCRIBER_C_TAG,
+ subscribers_per_s_tag = cls.SUBSCRIBERS_PER_S_TAG)
+ cls.restApiXos = XosUtils.getRestApi()
+
+ @classmethod
+ def setUpClass(cls):
+ log.info('in setUp class 00000000000000')
+ cls.controllers = get_controllers()
+ cls.controller = cls.controllers[0]
+ cls.cli = None
+ cls.on_pod = running_on_pod()
+ cls.on_ciab = running_on_ciab()
+ cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
+ cls.vcpes = cls.olt.get_vcpes()
+ cls.vcpes_dhcp = cls.olt.get_vcpes_by_type('dhcp')
+ cls.vcpes_reserved = cls.olt.get_vcpes_by_type('reserved')
+ cls.dhcp_vcpes_reserved = [ 'vcpe{}.{}.{}'.format(i, cls.vcpes_reserved[i]['s_tag'], cls.vcpes_reserved[i]['c_tag'])
+ for i in xrange(len(cls.vcpes_reserved)) ]
+ cls.untagged_dhcp_vcpes_reserved = [ 'vcpe{}'.format(i) for i in xrange(len(cls.vcpes_reserved)) ]
+ cls.container_vcpes_reserved = [ 'vcpe-{}-{}'.format(vcpe['s_tag'], vcpe['c_tag']) for vcpe in cls.vcpes_reserved ]
+ vcpe_dhcp_reserved = None
+ vcpe_container_reserved = None
+ if cls.vcpes_reserved:
+ vcpe_dhcp_reserved = cls.dhcp_vcpes_reserved[0]
+ if cls.on_pod is False:
+ vcpe_dhcp_reserved = cls.untagged_dhcp_vcpes_reserved[0]
+ vcpe_container_reserved = cls.container_vcpes_reserved[0]
+
+ cls.vcpe_dhcp_reserved = vcpe_dhcp_reserved
+ cls.vcpe_container_reserved = vcpe_container_reserved
+ dhcp_vcpe_offset = len(cls.vcpes_reserved)
+ cls.dhcp_vcpes = [ 'vcpe{}.{}.{}'.format(i+dhcp_vcpe_offset, cls.vcpes_dhcp[i]['s_tag'], cls.vcpes_dhcp[i]['c_tag'])
+ for i in xrange(len(cls.vcpes_dhcp)) ]
+ cls.untagged_dhcp_vcpes = [ 'vcpe{}'.format(i+dhcp_vcpe_offset) for i in xrange(len(cls.vcpes_dhcp)) ]
+ cls.container_vcpes = [ 'vcpe-{}-{}'.format(vcpe['s_tag'], vcpe['c_tag']) for vcpe in cls.vcpes_dhcp ]
+ vcpe_dhcp = None
+ vcpe_container = None
+ #cache the first dhcp vcpe in the class for quick testing
+ if cls.vcpes_dhcp:
+ vcpe_container = cls.container_vcpes[0]
+ vcpe_dhcp = cls.dhcp_vcpes[0]
+ if cls.on_pod is False:
+ vcpe_dhcp = cls.untagged_dhcp_vcpes[0]
+ cls.vcpe_container = vcpe_container_reserved or vcpe_container
+ cls.vcpe_dhcp = vcpe_dhcp_reserved or vcpe_dhcp
+ VSGAccess.setUp()
+ cls.setUpCordApi()
+ if cls.on_pod is True:
+ cls.openVCPEAccess(cls.cord_subscriber.volt_subscriber_info)
@classmethod
def tearDownClass(cls):
- vsg_exchange.vsgTeardown()
+ VSGAccess.tearDown()
+ if cls.on_pod is True:
+ cls.closeVCPEAccess(cls.cord_subscriber.volt_subscriber_info)
def log_set(self, level = None, app = 'org.onosproject'):
CordLogger.logSet(level = level, app = app, controllers = self.controllers, forced = True)
+######################## vsg - vcpe utility functions #########################
+ @classmethod
+ def closeVCPEAccess(cls, volt_subscriber_info):
+ OnosCtrl.uninstall_app(cls.APP_NAME, onos_ip = cls.HEAD_NODE)
@classmethod
- def config_restore(cls):
- """Restore the vsg test configuration on test case failures"""
- for restore_method in cls.restore_methods:
- restore_method()
+ def openVCPEAccess(cls, volt_subscriber_info):
+ """
+ This code is used to configure leaf switch for head node access to compute node over fabric.
+ Care is to be taken to avoid overwriting existing/default vcpe flows.
+ The access is opened for generated subscriber info which should not overlap.
+ We target the fabric onos instance on head node.
+ """
+ version = Onos.getVersion(onos_ip = cls.HEAD_NODE)
+ app_version = '1.0-SNAPSHOT'
+ major = int(version.split('.')[0])
+ minor = int(version.split('.')[1])
+ if major > 1:
+ app_version = '2.0-SNAPSHOT'
+ elif major == 1 and minor > 10:
+ app_version = '2.0-SNAPSHOT'
+ cls.APP_FILE = os.path.join(cls.test_path, '..', 'apps/xconnect-{}.oar'.format(app_version))
+ OnosCtrl.install_app(cls.APP_FILE, onos_ip = cls.HEAD_NODE)
+ time.sleep(2)
+ s_tags = map(lambda tenant: int(tenant['voltTenant']['s_tag']), volt_subscriber_info)
+ #only get unique vlan tags
+ s_tags = list(set(s_tags))
+ devices = OnosCtrl.get_device_ids(controller = cls.HEAD_NODE)
+ if devices:
+ device_config = {}
+ for device in devices:
+ device_config[device] = []
+ for s_tag in s_tags:
+ xconnect_config = {'vlan': s_tag, 'ports' : [ cls.FABRIC_PORT_HEAD_NODE, cls.FABRIC_PORT_COMPUTE_NODE ] }
+ device_config[device].append(xconnect_config)
+
+ cfg = { 'apps' : { 'org.ciena.xconnect' : { 'xconnectTestConfig' : device_config } } }
+ OnosCtrl.config(cfg, controller = cls.HEAD_NODE)
def get_system_cpu_usage(self):
""" Getting compute node CPU usage """
@@ -142,248 +273,218 @@
assert_equal(status, True)
return float(output)
- def onos_load_config(self, config):
- #log_test.info('onos load config is %s'%config)
- status, code = OnosCtrl.config(config)
- if status is False:
- log_test.info('JSON request returned status %d' %code)
- assert_equal(status, True)
- time.sleep(2)
-
- def onos_ssm_table_load(self, groups, src_list = ['1.2.3.4'],flag = False):
- ssm_dict = {'apps' : { 'org.opencord.igmp' : { 'ssmTranslate' : [] } } }
- ssm_xlate_list = ssm_dict['apps']['org.opencord.igmp']['ssmTranslate']
- if flag: #to maintain seperate group-source pair.
- for i in range(len(groups)):
- d = {}
- d['source'] = src_list[i] or '0.0.0.0'
- d['group'] = groups[i]
- ssm_xlate_list.append(d)
- else:
- for g in groups:
- for s in src_list:
- d = {}
- d['source'] = s or '0.0.0.0'
- d['group'] = g
- ssm_xlate_list.append(d)
- self.onos_load_config(ssm_dict)
- cord_port_map = {}
- for g in groups:
- cord_port_map[g] = (self.PORT_TX_DEFAULT, self.PORT_RX_DEFAULT)
- IgmpChannel().cord_port_table_load(cord_port_map)
- time.sleep(2)
-
- def generate_random_multicast_ip_addresses(self,count=500):
- multicast_ips = []
- while(count >= 1):
- ip = '.'.join([str(random.randint(224,239)),str(random.randint(1,254)),str(random.randint(1,254)),str(random.randint(1,254))])
- if ip in multicast_ips:
- pass
- else:
- multicast_ips.append(ip)
- count -= 1
- return multicast_ips
-
- def generate_random_unicast_ip_addresses(self,count=500):
- unicast_ips = []
- while(count >= 1):
- ip = '.'.join([str(random.randint(11,126)),str(random.randint(1,254)),str(random.randint(1,254)),str(random.randint(1,254))])
- if ip in unicast_ips:
- pass
- else:
- unicast_ips.append(ip)
- count -= 1
- return unicast_ips
-
- def iptomac(self, mcast_ip):
- mcast_mac = '01:00:5e:'
- octets = mcast_ip.split('.')
- second_oct = int(octets[1]) & 127
- third_oct = int(octets[2])
- fourth_oct = int(octets[3])
- mcast_mac = mcast_mac + format(second_oct,'02x') + ':' + format(third_oct, '02x') + ':' + format(fourth_oct, '02x')
- return mcast_mac
-
- def send_igmp_join(self, groups, src_list = ['1.2.3.4'], record_type=IGMP_V3_GR_TYPE_INCLUDE,
- ip_pkt = None, iface = 'veth0', ssm_load = False, delay = 1):
- if ssm_load is True:
- self.onos_ssm_table_load(groups, src_list)
- igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
- gaddr=self.IP_DST)
- for g in groups:
- gr = IGMPv3gr(rtype= record_type, mcaddr=g)
- gr.sources = src_list
- igmp.grps.append(gr)
- if ip_pkt is None:
- ip_pkt = self.igmp_eth/self.igmp_ip
- pkt = ip_pkt/igmp
- IGMPv3.fixup(pkt)
- log.info('sending igmp join packet %s'%pkt.show())
- sendp(pkt, iface=iface)
- time.sleep(delay)
-
- def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
- dst_mac = self.iptomac(group)
- eth = Ether(dst= dst_mac)
- ip = IP(dst=group,src=source)
- data = repr(monotonic.monotonic())
- sendp(eth/ip/data,count=20, iface = intf)
-
- def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
- log_test.info('verifying multicast traffic for group %s from source %s'%(group,source))
- self.success = False
- def recv_task():
- def igmp_recv_cb(pkt):
- #log_test.info('received multicast data packet is %s'%pkt.show())
- log_test.info('multicast data received for group %s from source %s'%(group,source))
- self.success = True
- sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
- t = threading.Thread(target = recv_task)
- t.start()
- self.send_multicast_data_traffic(group,source=source)
- t.join()
- return self.success
-
- def incmac(self, mac):
- tmp = str(hex(int('0x'+mac,16)+1).split('x')[1])
- mac = '0'+ tmp if len(tmp) < 2 else tmp
- return mac
-
- def next_mac(self, mac):
- mac = mac.split(":")
- mac[5] = self.incmac(mac[5])
-
- if len(mac[5]) > 2:
- mac[0] = self.incmac(mac[0])
- mac[5] = '01'
-
- if len(mac[0]) > 2:
- mac[0] = '01'
- mac[1] = self.incmac(mac[1])
- mac[5] = '01'
- return ':'.join(mac)
-
-
- def to_egress_mac(cls, mac):
- mac = mac.split(":")
- mac[4] = '01'
-
- return ':'.join(mac)
-
- def inc_ip(self, ip, i):
-
- ip[i] =str(int(ip[i])+1)
- return '.'.join(ip)
-
-
- def next_ip(self, ip):
-
- lst = ip.split('.')
- for i in (3,0,-1):
- if int(lst[i]) < 255:
- return self.inc_ip(lst, i)
- elif int(lst[i]) == 255:
- lst[i] = '0'
- if int(lst[i-1]) < 255:
- return self.inc_ip(lst,i-1)
- elif int(lst[i-2]) < 255:
- lst[i-1] = '0'
- return self.inc_ip(lst,i-2)
- else:
- break
-
- def to_egress_ip(self, ip):
- lst=ip.split('.')
- lst[0] = '182'
- return '.'.join(lst)
-
- @classmethod
- def start_onos(cls, network_cfg = None):
- if type(network_cfg) is tuple:
- res = []
- for v in network_cfg:
- res += v.items()
- config = dict(res)
+ def vsg_for_external_connectivity(self, subscriber_index, reserved = False):
+ if reserved is True:
+ if self.on_pod is True:
+ vcpe = self.dhcp_vcpes_reserved[subscriber_index]
+ else:
+ vcpe = self.untagged_dhcp_vcpes_reserved[subscriber_index]
else:
- config = network_cfg
- log_test.info('Restarting ONOS with new network configuration')
- return cord_test_onos_restart(config = config)
+ if self.on_pod is True:
+ vcpe = self.dhcp_vcpes[subscriber_index]
+ else:
+ vcpe = self.untagged_dhcp_vcpes[subscriber_index]
+ mgmt = 'eth0'
+ host = '8.8.8.8'
+ self.success = False
+ assert_not_equal(vcpe, None)
+ vcpe_ip = VSGAccess.vcpe_get_dhcp(vcpe, mgmt = mgmt)
+ assert_not_equal(vcpe_ip, None)
+ log.info('Got DHCP IP %s for %s' %(vcpe_ip, vcpe))
+ log.info('Sending icmp echo requests to external network 8.8.8.8')
+ st, _ = getstatusoutput('ping -c 3 8.8.8.8')
+ VSGAccess.restore_interface_config(mgmt, vcpe = vcpe)
+ assert_equal(st, 0)
- def onos_aaa_config(self):
- aaa_dict = {'apps' : { self.app : { 'AAA' : { 'radiusSecret': 'radius_password',
- 'radiusIp': '172.17.0.2' } } } }
- radius_ip = os.getenv('ONOS_AAA_IP') or '172.17.0.2'
- aaa_dict['apps'][self.app]['AAA']['radiusIp'] = radius_ip
- self.onos_ctrl.activate()
- time.sleep(2)
- self.onos_load_config(aaa_dict)
+ def vsg_xos_subscriber_create(self, index, subscriber_info = None, volt_subscriber_info = None):
+ if self.on_pod is False:
+ return ''
+ if subscriber_info is None:
+ subscriber_info = self.cord_subscriber.subscriber_info[index]
+ if volt_subscriber_info is None:
+ volt_subscriber_info = self.cord_subscriber.volt_subscriber_info[index]
+ s_tag = int(volt_subscriber_info['voltTenant']['s_tag'])
+ c_tag = int(volt_subscriber_info['voltTenant']['c_tag'])
+ vcpe = 'vcpe-{}-{}'.format(s_tag, c_tag)
+ subId = self.cord_subscriber.subscriberCreate(index, subscriber_info, volt_subscriber_info)
+ if subId:
+ #if the vsg instance was already instantiated, then reduce delay
+ if c_tag % self.SUBSCRIBERS_PER_S_TAG == 0:
+ delay = 350
+ else:
+ delay = 90
+ log.info('Delaying %d seconds for the VCPE to be provisioned' %(delay))
+ time.sleep(delay)
+ #log.info('Testing for external connectivity to VCPE %s' %(vcpe))
+ #self.vsg_for_external_connectivity(index)
- def onos_load_config(self, config):
- status, code = OnosCtrl.config(config)
- if status is False:
- log_test.info('Configure request for AAA returned status %d' %code)
- assert_equal(status, True)
- time.sleep(3)
+ return subId
+ def vsg_delete(self, num_subscribers):
+ if self.on_pod is False:
+ return
+ num_subscribers = min(num_subscribers, len(self.cord_subscriber.subscriber_info))
+ for index in xrange(num_subscribers):
+ subId = self.vsg_xos_subscriber_id(index)
+ if subId and subId != '0':
+ self.vsg_xos_subscriber_delete(index, subId = subId)
+
+ def vsg_xos_subscriber_delete(self, index, subId = '', voltId = '', subscriber_info = None, volt_subscriber_info = None):
+ if self.on_pod is False:
+ return
+ self.cord_subscriber.subscriberDelete(index, subId = subId, voltId = voltId,
+ subscriber_info = subscriber_info,
+ volt_subscriber_info = volt_subscriber_info)
+
+ def vsg_xos_subscriber_id(self, index):
+ if self.on_pod is False:
+ return ''
+ return self.cord_subscriber.subscriberId(index)
+
+ def vsg_xos_subscriber_create_reserved(self):
+ if self.on_pod is False:
+ return
+ tags_reserved = [ (int(vcpe['s_tag']), int(vcpe['c_tag'])) for vcpe in self.vcpes_reserved ]
+ volt_tenants = self.restApiXos.ApiGet('TENANT_VOLT')
+ subscribers = self.restApiXos.ApiGet('TENANT_SUBSCRIBER')
+ reserved_tenants = filter(lambda tenant: (int(tenant['s_tag']), int(tenant['c_tag'])) in tags_reserved, volt_tenants)
+ reserved_config = []
+ for tenant in reserved_tenants:
+ for subscriber in subscribers:
+ if int(subscriber['id']) == int(tenant['subscriber']):
+ volt_subscriber_info = {}
+ volt_subscriber_info['voltTenant'] = dict(s_tag = tenant['s_tag'],
+ c_tag = tenant['c_tag'],
+ subscriber = tenant['subscriber'])
+ volt_subscriber_info['volt_id'] = tenant['id']
+ volt_subscriber_info['account_num'] = subscriber['identity']['account_num']
+ reserved_config.append( (subscriber, volt_subscriber_info) )
+ break
+ else:
+ log.info('Subscriber not found for tenant %s, s_tag: %s, c_tag: %s' %(str(tenant['subscriber']),
+ str(tenant['s_tag']),
+ str(tenant['c_tag'])))
+
+ for subscriber_info, volt_subscriber_info in reserved_config:
+ self.vsg_xos_subscriber_delete(0,
+ subId = str(subscriber_info['id']),
+ voltId = str(volt_subscriber_info['volt_id']),
+ subscriber_info = subscriber_info,
+ volt_subscriber_info = volt_subscriber_info)
+ subId = self.vsg_xos_subscriber_create(0,
+ subscriber_info = subscriber_info,
+ volt_subscriber_info = volt_subscriber_info)
+ log.info('Created reserved subscriber %s' %(subId))
+
+ @deferred(1800)
def test_scale_for_vsg_vm_creations(self):
- vsg = vsg_exchange('test_vsg_xos_subscriber_create_all')
- vsg.test_vsg_xos_subscriber_create_all()
+ try:
+ df = defer.Deferred()
+ def scale_vsg_vms(df):
+ for index in xrange(len(self.cord_subscriber.subscriber_info)):
+ #check if the index exists
+ subId = self.vsg_xos_subscriber_id(index)
+ log.info('test_vsg_xos_subscriber_creation - subId is %s'%subId)
+ if subId and subId != '0':
+ self.vsg_xos_subscriber_delete(index, subId = subId)
+ subId = self.vsg_xos_subscriber_create(index)
+ log.info('Created Subscriber %s' %(subId))
+ df.callback(0)
+ reactor.callLater(0, scale_vsg_vms, df)
+ return df
+ finally:
+ pass
+ #self.vsg_delete(len(self.cord_subscriber.subscriber_info))
+ self.vsg_xos_subscriber_create_reserved
+ @deferred(1800)
def test_scale_for_vcpe_creations(self):
- vsg = vsg_exchange('test_vsg_xos_subscriber_create_all')
- vsg.test_vsg_xos_subscriber_create_all()
+ try:
+ df = defer.Deferred()
+ def scale_vcpe_instances(df):
+ for index in xrange(len(self.cord_subscriber.subscriber_info)):
+ #check if the index exists
+ subId = self.vsg_xos_subscriber_id(index)
+ log.info('test_vsg_xos_subscriber_creation')
+ if subId and subId != '0':
+ self.vsg_xos_subscriber_delete(index, subId = subId)
+ subId = self.vsg_xos_subscriber_create(index)
+ log.info('Created Subscriber %s' %(subId))
+ df.callback(0)
+ reactor.callLater(0, scale_vcpe_instances, df)
+ return df
+ except:
+ self.vsg_delete(len(self.cord_subscriber.subscriber_info))
+ self.vsg_xos_subscriber_create_reserved
+ @deferred(1800)
def test_scale_of_subcriber_vcpe_creations_in_single_vsg_vm(self):
- #create 100 subscribers and delete them after creation
- vsg = vsg_exchange('vsg_create')
- try:
- vsg.vsg_create(100)
- finally:
- vsg.vsg_delete(100)
+ try:
+ df = defer.Deferred()
+ def scale_vcpe_instances(df):
+ subId = self.vsg_xos_subscriber_create(100)
+ if subId and subId != '0':
+ self.vsg_xos_subscriber_delete(100, subId)
+ df.callback(0)
+ reactor.callLater(0, scale_vsg_vms, df)
+ return df
+ except:
+ self.vsg_delete(len(self.cord_subscriber.subscriber_info))
+ self.vsg_xos_subscriber_create_reserved
+ @deferred(1800)
def test_scale_of_subcriber_vcpe_creations_in_multiple_vsg_vm(self):
- #create 100 subscribers and delete them after creation
- vsg = vsg_exchange('vsg_create')
try:
- vsg.vsg_create(100)
- finally:
- vsg.vsg_delete(100)
+ df = defer.Deferred()
+ def scale_vcpe_instances(df):
+ subId = self.vsg_xos_subscriber_create(100)
+ if subId and subId != '0':
+ self.vsg_xos_subscriber_delete(100, subId)
+ df.callback(0)
+ reactor.callLater(0, scale_vsg_vms, df)
+ return df
+ except:
+ self.vsg_delete(len(self.cord_subscriber.subscriber_info))
+ self.vsg_xos_subscriber_create_reserved
+ @deferred(1800)
def test_scale_of_subcriber_vcpe_creations_with_one_vcpe_in_one_vsg_vm(self):
- #create 100 subscribers and delete them after creation
- vsg = vsg_exchange('vsg_create')
try:
- vsg.vsg_create(100)
- finally:
- vsg.vsg_delete(100)
+ df = defer.Deferred()
+ def scale_vcpe_instances(df):
+ subId = self.vsg_xos_subscriber_create(100)
+ if subId and subId != '0':
+ self.vsg_xos_subscriber_delete(100, subId)
+ df.callback(0)
+ reactor.callLater(0, scale_vsg_vms, df)
+ return df
+ except:
+ self.vsg_delete(len(self.cord_subscriber.subscriber_info))
+ self.vsg_xos_subscriber_create_reserved
+ @deferred(1800)
def test_scale_for_cord_subscriber_creation_and_deletion(self):
- #create 100 subscribers and delete them after creation
- vsg = vsg_exchange('vsg_create')
try:
- vsg.vsg_create(100)
- finally:
- vsg.vsg_delete(100)
+ df = defer.Deferred()
+ def scale_vcpe_instances(df):
+ subId = self.vsg_xos_subscriber_create(100)
+ if subId and subId != '0':
+ self.vsg_xos_subscriber_delete(100, subId)
+ df.callback(0)
+ reactor.callLater(0, scale_vsg_vms, df)
+ return df
+ except:
+ self.vsg_delete(len(self.cord_subscriber.subscriber_info))
+ self.vsg_xos_subscriber_create_reserved
def test_cord_for_scale_of_subscriber_containers_per_compute_node(self):
pass
+ @deferred(10)
def test_latency_of_cord_for_control_packets_using_icmp_packet(self):
- cmd = "ping -c 4 {0} | tail -1| awk '{{print $4}}'".format(self.wan_intf_ip)
- st, out = getstatusoutput(cmd)
- if out != '':
- out = out.split('/')
- avg_rtt = out[1]
- latency = float(avg_rtt)/float(2)
- else:
- latency = None
- log.info('CORD setup latency calculated from icmp packet is = %s ms'%latency)
- assert_not_equal(latency,None)
-
- def test_latency_of_cord_for_control_packets_using_increasing_sizes_of_icmp_packet(self):
- pckt_sizes = [100,500,1000,1500]
- for size in pckt_sizes:
- cmd = "ping -c 4 -s {} {} | tail -1| awk '{{print $4}}'".format(size,self.wan_intf_ip)
+ df = defer.Deferred()
+ def scale_vcpe_instances(df):
+ cmd = "ping -c 4 {0} | tail -1| awk '{{print $4}}'".format(self.wan_intf_ip)
st, out = getstatusoutput(cmd)
if out != '':
out = out.split('/')
@@ -391,302 +492,465 @@
latency = float(avg_rtt)/float(2)
else:
latency = None
+ log.info('CORD setup latency calculated from icmp packet is = %s ms'%latency)
+ assert_not_equal(latency,None)
+ df.callback(0)
+ reactor.callLater(0, scale_vsg_vms, df)
+ return df
+
+ @deferred(20)
+ def test_latency_of_cord_for_control_packets_using_increasing_sizes_of_icmp_packet(self):
+ df = defer.Deferred()
+ def scale_vcpe_instances(df):
+ pckt_sizes = [100,500,1000,1500]
+ for size in pckt_sizes:
+ cmd = "ping -c 4 -s {} {} | tail -1| awk '{{print $4}}'".format(size,self.wan_intf_ip)
+ st, out = getstatusoutput(cmd)
+ if out != '':
+ out = out.split('/')
+ avg_rtt = out[1]
+ latency = float(avg_rtt)/float(2)
+ else:
+ latency = None
log.info('CORD setup latency calculated from icmp packet with size %s bytes is = %s ms'%(size,latency))
assert_not_equal(latency,None)
+ df.callback(0)
+ reactor.callLater(0, scale_vsg_vms, df)
+ return df
+ @deferred(10)
def test_latency_of_cord_with_traceroute(self):
- cmd = "traceroute -q1 {} | tail -1| awk '{{print $4}}'".format(self.wan_intf_ip)
- avg_rtt = float(0)
- latency = None
- for index in [1,2,3]:
- st, out = getstatusoutput(cmd)
- if out != '':
- avg_rtt += float(out)
- latency = float(avg_rtt)/float(6)
- log.info('CORD setup latency calculated from traceroute is = %s ms'%latency)
- assert_not_equal(latency,0.0)
+ df = defer.Deferred()
+ def scale_vcpe_instances(df):
+ cmd = "traceroute -q1 {} | tail -1| awk '{{print $4}}'".format(self.wan_intf_ip)
+ avg_rtt = float(0)
+ latency = None
+ for index in [1,2,3]:
+ st, out = getstatusoutput(cmd)
+ if out != '':
+ avg_rtt += float(out)
+ latency = float(avg_rtt)/float(6)
+ log.info('CORD setup latency calculated from traceroute is = %s ms'%latency)
+ assert_not_equal(latency,0.0)
+ assert_not_equal(latency,None)
+ df.callback(0)
+ reactor.callLater(0, scale_vsg_vms, df)
+ return df
+ #tested with 50 igmp joins on CiaB setup
+ @deferred(1000)
def test_scale_with_igmp_joins_for_500_multicast_groups_and_check_cpu_usage(self, group_count=500):
- OnosCtrl(self.igmp_app).activate()
- groups = self.generate_random_multicast_ip_addresses(count = group_count)
- sources = self.generate_random_unicast_ip_addresses(count = group_count)
- self.onos_ssm_table_load(groups,src_list=sources,flag=True)
- for index in range(group_count):
- self.send_igmp_join(groups = [groups[index]], src_list = [sources[index]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
+ df = defer.Deferred()
+ def scale_igmp_joins(df):
+ OnosCtrl(self.igmp_app).activate()
+ groups = scale().generate_random_multicast_ip_addresses(count = group_count)
+ sources = scale().generate_random_unicast_ip_addresses(count = group_count)
+ scale().onos_ssm_table_load(groups,src_list=sources,flag=True)
+ try:
+ for index in range(group_count):
+ scale().send_igmp_join(groups = [groups[index]], src_list = [sources[index]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1)
- status = self.verify_igmp_data_traffic(groups[index],intf=self.V_INF1,source=sources[index])
- assert_equal(status, True)
- log_test.info('data received for group %s from source %s - %d'%(groups[index],sources[index],index))
- if index % 50 == 0:
- cpu_usage = self.get_system_cpu_usage()
- log.info('CPU usage is %s for multicast group entries %s'%(cpu_usage,index+1))
+ status = scale().verify_igmp_data_traffic(groups[index],intf=self.V_INF1,source=sources[index])
+ assert_equal(status, True)
+ log_test.info('data received for group %s from source %s - %d'%(groups[index],sources[index],index))
+ except Exception as error:
+ log.info('Got unexpected error %s'%error)
+ raise
+ df.callback(0)
+ reactor.callLater(0, scale_igmp_joins, df)
+ return df
- def test_scale_with_igmp_joins_for_1000_multicast_groups_and_check_cpu_usage(self, group_count=1000):
- OnosCtrl(self.igmp_app).activate()
- groups = self.generate_random_multicast_ip_addresses(count = group_count)
- sources = self.generate_random_unicast_ip_addresses(count = group_count)
- self.onos_ssm_table_load(groups,src_list=sources,flag=True)
- for index in range(group_count):
- self.send_igmp_join(groups = [groups[index]], src_list = [sources[index]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
+ #tested with 50 igmp joins on CiaB setup
+ @deferred(1000)
+ def test_scale_with_igmp_joins_for_1000_multicast_groups_and_igmp_app_toggle(self, group_count=1000):
+ df = defer.Deferred()
+ def scale_igmp_joins(df):
+ OnosCtrl(self.igmp_app).activate()
+ groups = scale().generate_random_multicast_ip_addresses(count = group_count)
+ sources = scale().generate_random_unicast_ip_addresses(count = group_count)
+ scale().onos_ssm_table_load(groups,src_list=sources,flag=True)
+ try:
+ for index in range(group_count):
+ scale().send_igmp_join(groups = [groups[index]], src_list = [sources[index]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1)
- status = self.verify_igmp_data_traffic(groups[index],intf=self.V_INF1,source=sources[index])
- assert_equal(status, True)
- log_test.info('data received for group %s from source %s - %d'%(groups[index],sources[index],index))
- if index % 50 == 0:
- cpu_usage = self.get_system_cpu_usage()
- log.info('CPU usage is %s for multicast group entries %s'%(cpu_usage,index+1))
+ status = scale().verify_igmp_data_traffic(groups[index],intf=self.V_INF1,source=sources[index])
+ assert_equal(status, True)
+ log_test.info('data received for group %s from source %s - %d'%(groups[index],sources[index],index))
+ log_test.info('Deactivating igmp app in onos')
+ OnosCtrl(self.igmp_app).deactivate()
+ time.sleep(2)
+ for index in range(group_count):
+ status = scale().verify_igmp_data_traffic(groups[index],intf=self.V_INF1,source=sources[index])
+ assert_equal(status, False)
+ log_test.info('data received for group %s from source %s - %d'%(groups[index],sources[index],index))
+ OnosCtrl(self.igmp_app).ctivate()
+ except Exception as error:
+ log.info('Got unexpected error %s'%error)
+ OnosCtrl(self.igmp_app).activate()
+ raise
+ df.callback(0)
+ reactor.callLater(0, scale_igmp_joins, df)
+ return df
+ #tested with 50 igmp joins on CiaB setup
+ @deferred(1800)
def test_scale_with_igmp_joins_for_2000_multicast_groups_and_check_cpu_usage(self, group_count=2000):
- OnosCtrl(self.igmp_app).activate()
- groups = self.generate_random_multicast_ip_addresses(count = group_count)
- sources = self.generate_random_unicast_ip_addresses(count = group_count)
- self.onos_ssm_table_load(groups,src_list=sources,flag=True)
- for index in range(group_count):
- self.send_igmp_join(groups = [groups[index]], src_list = [sources[index]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
+ df = defer.Deferred()
+ def scale_igmp_joins(df):
+ OnosCtrl(self.igmp_app).activate()
+ groups = scale().generate_random_multicast_ip_addresses(count = group_count)
+ sources = scale().generate_random_unicast_ip_addresses(count = group_count)
+ scale().onos_ssm_table_load(groups,src_list=sources,flag=True)
+ try:
+ for index in range(group_count):
+ scale().send_igmp_join(groups = [groups[index]], src_list = [sources[index]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1)
- status = self.verify_igmp_data_traffic(groups[index],intf=self.V_INF1,source=sources[index])
- assert_equal(status, True)
- log_test.info('data received for group %s from source %s - %d'%(groups[index],sources[index],index))
- if index % 50 == 0:
- cpu_usage = self.get_system_cpu_usage()
- log.info('CPU usage is %s for multicast group entries %s'%(cpu_usage,index+1))
+ status = scale().verify_igmp_data_traffic(groups[index],intf=self.V_INF1,source=sources[index])
+ assert_equal(status, True)
+ log_test.info('data received for group %s from source %s - %d'%(groups[index],sources[index],index))
+ if index % 50 == 0:
+ cpu_usage = scale().get_system_cpu_usage()
+ log.info('CPU usage is %s for multicast group entries %s'%(cpu_usage,index+1))
+ except Exception as error:
+ log.info('Got unexpected error %s'%error)
+ raise
+ df.callback(0)
+ reactor.callLater(0, scale_igmp_joins, df)
+ return df
- def test_scale_of_igmp_joins_for_2000_multicast_groups_and_check_cpu_usage_after_app_deactivation_and_activation(self,group_count=500):
- OnosCtrl(self.igmp_app).activate()
- groups = self.generate_random_multicast_ip_addresses(count = group_count)
- sources = self.generate_random_unicast_ip_addresses(count = group_count)
- self.onos_ssm_table_load(groups,src_list=sources,flag=True)
- for index in range(group_count):
- self.send_igmp_join(groups = [groups[index]], src_list = [sources[index]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
+ #tested with 50 igmp joins on CiaB setup
+ @deferred(1000)
+ def test_scale_of_igmp_joins_for_2000_multicast_groups_and_check_cpu_usage_after_app_deactivation_and_activation(self,group_count=2000):
+ df = defer.Deferred()
+ def scale_igmp_joins(df):
+ cpu_usage1 = scale().get_system_cpu_usage()
+ OnosCtrl(self.igmp_app).activate()
+ groups = scale().generate_random_multicast_ip_addresses(count = group_count)
+ sources = scale().generate_random_unicast_ip_addresses(count = group_count)
+ scale().onos_ssm_table_load(groups,src_list=sources,flag=True)
+ try:
+ for index in range(group_count):
+ scale().send_igmp_join(groups = [groups[index]], src_list = [sources[index]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1)
- status = self.verify_igmp_data_traffic(groups[index],intf=self.V_INF1,source=sources[index])
- assert_equal(status, True)
- log_test.info('data received for group %s from source %s - %d'%(groups[index],sources[index],index))
- if index % 50 == 0:
- cpu_usage = self.get_system_cpu_usage()
- log.info('CPU usage is %s for multicast group entries %s'%(cpu_usage,index+1))
- OnosCtrl(self.igmp_app).deactivate()
- time.sleep(1)
- cpu_usage = self.get_system_cpu_usage()
- log.info('CPU usage is %s for multicast group entries %s after igmp app deactivated'%(cpu_usage,index+1))
+ status = scale().verify_igmp_data_traffic(groups[index],intf=self.V_INF1,source=sources[index])
+ assert_equal(status, True)
+ log_test.info('data received for group %s from source %s - %d'%(groups[index],sources[index],index))
+ if index % 50 == 0:
+ cpu_usage = self.get_system_cpu_usage()
+ log.info('CPU usage is %s for multicast group entries %s'%(cpu_usage,index+1))
+ cpu_usage2 = scale().get_system_cpu_usage()
+ OnosCtrl(self.igmp_app).deactivate()
+ time.sleep(2)
+ cpu_usage3 = scale().get_system_cpu_usage()
+ log.info('CPU usage before test start = %f after %d igmp entries registered in onos = %f and after the app deactivated = %f are'%(cpu_usage1,cpu_usage2,cpu_usage3))
+ except Exception as error:
+ log.info('Got unexpected error %s'%error)
+ raise
+ df.callback(0)
+ reactor.callLater(0, scale_igmp_joins, df)
+ return df
+ #tested with 100 flow entries on CiaB setup
+ @deferred(1000)
def test_scale_adding_1k_flow_entries_in_onos_with_dynamic_tcp_ports(self,count=1000):
- cpu_usage1 = self.get_system_cpu_usage()
- egress = 1
- ingress = 2
- egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1', 'tcp_port': random.randint(1024,65535) }
- ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1', 'tcp_port': random.randint(1024,65535) }
- for index in range(0,count):
- ingress_map['tcp_port'] = random.randint(1024,65535)
- egress_map['tcp_port'] = random.randint(1024,65535)
- flow = OnosFlowCtrl(deviceId = self.device_id,
- egressPort = egress + self.port_offset,
- ingressPort = ingress + self.port_offset,
+ scale().flows_setup()
+ df = defer.Deferred()
+ def scale_flow_entries(df):
+ egress = 1
+ ingress = 2
+ egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1', 'tcp_port': random.randint(1024,65535) }
+ ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1', 'tcp_port': random.randint(1024,65535) }
+ try:
+ for index in range(0,count):
+ ingress_map['tcp_port'] = random.randint(1024,65535)
+ egress_map['tcp_port'] = random.randint(1024,65535)
+ src_port = ingress_map['tcp_port']
+ egr_port = egress_map['tcp_port']
+ #log.info('ingress port is %d and egress port is %d'%(src_port,egr_port))
+ flow = OnosFlowCtrl(deviceId = self.device_id,
+ egressPort = egress + scale().port_offset,
+ ingressPort = ingress + scale().port_offset,
tcpSrc = ingress_map['tcp_port'],
tcpDst = egress_map['tcp_port']
)
- result = flow.addFlow()
- assert_equal(result, True)
- log_test.info("flow number = %d is added",index+1)
- if index % 50 == 0:
- cpu_usage = self.get_system_cpu_usage()
- log.info('CPU usage is %s for flow number %d added'%(cpu_usage,index+1))
- time.sleep(1)
- cpu_usage2 = self.get_system_cpu_usage()
- log.info('system cpu usage before flows added = %f and after %d flows added = %f'%(cpu_usage1,count,cpu_usage2))
+ result = flow.addFlow()
+ assert_equal(result, True)
+ log_test.info("flow number = %d is added",index+1)
+ def mac_recv_task():
+ def recv_cb(pkt):
+ log_test.info('Pkt seen with ingress TCP port %s, egress TCP port %s' %(pkt[TCP].sport, pkt[TCP].dport))
+ result = True
+ sniff(count=2, timeout=5,
+ lfilter = lambda p: TCP in p and p[TCP].dport == egr_port and p[TCP].sport == src_port ,prn = recv_cb, iface = scale().port_map[egress])
+ t = threading.Thread(target = mac_recv_task)
+ t.start()
+ L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
+ L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
+ L4 = TCP(sport = src_port, dport = egr_port)
+ pkt = L2/L3/L4
+ log_test.info('Sending packets to verify if flows are correct')
+ sendp(pkt, count=50, iface = scale().port_map[ingress])
+ t.join()
+ except Exception as error:
+ log.info('Got unexpected error %s'%error)
+ raise
+ df.callback(0)
+ reactor.callLater(0,scale_flow_entries, df)
+ return df
- def test_scale_adding_5k_constant_source_ip_flow_entries_in_onos_and_checking_cpu_usage(self,count=5000):
- cpu_usage1 = self.get_system_cpu_usage()
- egress = 1
- ingress = 2
- egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '182.0.0.0' }
- ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.0.0.0' }
- for i in range(0,count):
- ingress_map['ip'] = self.next_ip(ingress_map['ip'])
- assert_not_equal(ingress_map['ip'], None)
- egress_map['ip'] = self.to_egress_ip(ingress_map['ip'])
-
- flow = OnosFlowCtrl(deviceId = self.device_id,
- egressPort = egress + self.port_offset,
- ingressPort = ingress + self.port_offset,
+ #tested with 100 flow entries on CiaB setup
+ @deferred(1000)
+ def test_scale_adding_5k_ip_flow_entries_in_onos_and_checking_cpu_usage(self,count=5000):
+ scale().flows_setup()
+ df = defer.Deferred()
+ def scale_flow_entries(df):
+ cpu_usage1 = scale().get_system_cpu_usage()
+ egress = 1
+ ingress = 2
+ egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '182.0.0.0' }
+ ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.0.0.0' }
+ try:
+ for index in range(0,count):
+ ingress_map['ip'] = scale().generate_random_unicast_ip_addresses()[0] #next_ip(ingress_map['ip'])
+ assert_not_equal(ingress_map['ip'], None)
+ egress_map['ip'] = scale().generate_random_unicast_ip_addresses()[0] #to_egress_ip(ingress_map['ip'])
+ flow = OnosFlowCtrl(deviceId = self.device_id,
+ egressPort = egress + scale().port_offset,
+ ingressPort = ingress + scale().port_offset,
ethType = '0x0800',
ipSrc = ('IPV4_SRC', ingress_map['ip']+'/8'),
ipDst = ('IPV4_DST', egress_map['ip']+'/8')
)
- if index % 50 == 0:
- cpu_usage = self.get_system_cpu_usage()
- log.info('CPU usage is %s for flow number %d added'%(cpu_usage,index+1))
- time.sleep(1)
- cpu_usage2 = self.get_system_cpu_usage()
- log.info('system cpu usage before flows added = %f and after %d flows added = %f'%(cpu_usage1,count,cpu_usage2))
+ if index % 50 == 0:
+ cpu_usage = scale().get_system_cpu_usage()
+ log.info('CPU usage is %s for flow number %d added'%(cpu_usage,index+1))
+ time.sleep(1)
+ def mac_recv_task():
+ def recv_cb(pkt):
+ log_test.info('Pkt seen with ingress source IP %s, destination IP %s' %(pkt[IP].src, pkt[IP].dst))
+ result = True
+ sniff(count=2, timeout=5,
+ lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip'] ,prn = recv_cb, iface = scale().port_map[egress])
+ t = threading.Thread(target = mac_recv_task)
+ t.start()
+ L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
+ L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
+ pkt = L2/L3
+ log_test.info('Sending packets to verify if flows are correct')
+ sendp(pkt, count=50, iface = scale().port_map[ingress])
+ t.join()
+ cpu_usage2 = scale().get_system_cpu_usage()
+ log.info('system cpu usage before flows added = %f and after %d flows added = %f'%(cpu_usage1,count,cpu_usage2))
+ except Exception as error:
+ log.info('Got unexpected error %s'%error)
+ raise
+ df.callback(0)
+ reactor.callLater(0, scale_flow_entries, df)
+ return df
+ #tested with 100 flow entries on CiaB setup
+ @deferred(1000)
def test_scale_adding_10k_flow_entries_in_onos_with_dynamic_udp_ports(self,count=10000):
- cpu_usage1 = self.get_system_cpu_usage()
- egress = 1
- ingress = 2
- egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1', 'tcp_port': random.randint(1024,65535) }
- ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1', 'tcp_port': random.randint(1024,65535) }
- for index in range(0,count):
- ingress_map['tcp_port'] = random.randint(1024,65535)
- egress_map['tcp_port'] = random.randint(1024,65535)
- flow = OnosFlowCtrl(deviceId = self.device_id,
- egressPort = egress + self.port_offset,
- ingressPort = ingress + self.port_offset,
- tcpSrc = ingress_map['tcp_port'],
- tcpDst = egress_map['tcp_port']
+ scale().flows_setup()
+ df = defer.Deferred()
+ def scale_flow_entries(df):
+ egress = 1
+ ingress = 2
+ egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1', 'udp_port': random.randint(1024,65535) }
+ ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1', 'udp_port': random.randint(1024,65535) }
+ try:
+ for index in range(0,count):
+ ingress_map['udp_port'] = random.randint(1024,65535)
+ egress_map['udp_port'] = random.randint(1024,65535)
+ src_port = ingress_map['udp_port']
+ egr_port = egress_map['udp_port']
+ #log.info('ingress port is %d and egress port is %d'%(src_port,egr_port))
+ flow = OnosFlowCtrl(deviceId = self.device_id,
+ egressPort = egress + scale().port_offset,
+ ingressPort = ingress + scale().port_offset,
+ udpSrc = ingress_map['udp_port'],
+ udpDst = egress_map['udp_port']
)
- result = flow.addFlow()
- assert_equal(result, True)
- ##wait for flows to be added to ONOS
- log_test.info("flow number = %d is added",index+1)
- if index % 50 == 0:
- cpu_usage = self.get_system_cpu_usage()
- log.info('CPU usage is %s for flow number %d added'%(cpu_usage,index+1))
- time.sleep(1)
- cpu_usage2 = self.get_system_cpu_usage()
- log.info('system cpu usage before flows added = %f and after %d flows added = %f'%(cpu_usage1,count,cpu_usage2))
+ result = flow.addFlow()
+ assert_equal(result, True)
+ log_test.info("flow number = %d is added",index+1)
+ def mac_recv_task():
+ def recv_cb(pkt):
+ log_test.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
+ result = True
+ sniff(count=2, timeout=5,
+ lfilter = lambda p: UDP in p and p[UDP].dport == egr_port and p[UDP].sport == src_port ,prn = recv_cb, iface = scale().port_map[egress])
+ t = threading.Thread(target = mac_recv_task)
+ t.start()
+ L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
+ L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
+ L4 = UDP(sport = src_port, dport = egr_port)
+ pkt = L2/L3/L4
+ log_test.info('Sending packets to verify if flows are correct')
+ sendp(pkt, count=50, iface = scale().port_map[ingress])
+ t.join()
+ except Exception as error:
+ log.info('Got unexpected error %s'%error)
+ raise
+ df.callback(0)
+ reactor.callLater(0,scale_flow_entries, df)
+ return df
- def test_scale_adding_10k_constant_destination_mac_flow_entries_in_onos_and_check_cpu_usage(self,count=10000):
- cpu_usage1 = self.get_system_cpu_usage()
- egress = 1
- ingress = 2
- egress_mac = '00:00:00:00:01:01'
- ingress_mac = '02:00:00:00:00:00'
- for index in range(0,count):
- ingress_mac = self.next_mac(ingress_mac)
- flow = OnosFlowCtrl(deviceId = self.device_id,
- egressPort = egress + self.port_offset,
- ingressPort = ingress + self.port_offset,
+ #tested with 100 flow entries on CiaB setup
+ @deferred(1000)
+ def test_scale_adding_10k_constant_destination_mac_flow_entries_in_onos_and_check_cpu_usage(self,count=100):
+ scale().flows_setup()
+ df = defer.Deferred()
+ def scale_flow_entries(df):
+ cpu_usage1 = self.get_system_cpu_usage()
+ egress = 1
+ ingress = 2
+ egress_mac = '02:00:00:00:0:0'
+ ingress_mac = '03:00:00:00:00:00'
+ try:
+ for index in range(0,count):
+ result = False
+ ingress_mac = scale().next_mac(ingress_mac)
+ flow = OnosFlowCtrl(deviceId = self.device_id,
+ egressPort = egress + scale().port_offset,
+ ingressPort = ingress + scale().port_offset,
ethSrc = ingress_mac,
ethDst = egress_mac)
- result = flow.addFlow()
- assert_equal(result, True)
- log.info("flow number = %d is added",index+1)
- if index % 100 == 0:
- cpu_usage = self.get_system_cpu_usage()
- log.info('CPU usage is %s for multicast group entries %s'%(cpu_usage,index+1))
- time.sleep(1)
- cpu_usage2 = self.get_system_cpu_usage()
- log.info('system cpu usage before flows added = %f and after %d flows added = %f'%(cpu_usage1,count,cpu_usage2))
+ result = flow.addFlow()
+ assert_equal(result, True)
+ log.info("flow number = %d is added",index+1)
+ if index % 100 == 0:
+ cpu_usage = scale().get_system_cpu_usage()
+ log.info('CPU usage is %s for multicast group entries %s'%(cpu_usage,index+1))
+ time.sleep(1)
+ def mac_recv_task():
+ def recv_cb(pkt):
+ log_test.info('Pkt seen with ingress mac %s, egress mac %s' %(pkt.src , pkt.dst))
+ result = True
+ sniff(count=2, timeout=5,
+ lfilter = lambda p: p.src == ingress_mac and p.dst == egress_mac ,prn = recv_cb, iface = scale().port_map[egress])
+ t = threading.Thread(target = mac_recv_task)
+ t.start()
+ L2 = Ether(src = ingress_mac, dst = egress_mac)
+ pkt = L2/IP()
+ log_test.info('Sending packets to verify if flows are correct')
+ sendp(pkt, count=50, iface = scale().port_map[ingress])
+ t.join()
+ assert_equal(result, True)
+ cpu_usage2 = self.get_system_cpu_usage()
+ log.info('system cpu usage before flows added = %f and after %d flows added = %f'%(cpu_usage1,count,cpu_usage2))
+ except Exception as error:
+ log.info('Got unexpected error %s'%error)
+ raise
+ df.callback(0)
+ reactor.callLater(0,scale_flow_entries, df)
+ return df
+
+ @deferred(1000)
def test_scale_adding_10k_acl_rules_to_deny_matching_destination_tcp_port_traffic(self,count=10000):
- cpu_usage1 = self.get_system_cpu_usage()
- acl_rule = ACLTest()
- for index in range(0,count):
- src_ip = self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
- dst_ip = self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
- dst_port = random.randint(1024,65535)
- log.info('adding acl rule = %d with src ip = %s, dst ip = %s and dst tcp port = %d'%(index+1, src_ip,dst_ip,dst_port))
- status,code = acl_rule.adding_acl_rule('v4', srcIp=src_ip, dstIp = dst_ip, ipProto ='TCP', dstTpPort =dst_port, action = 'deny')
- assert_equal(status, True)
- if index % 100 == 0:
- cpu_usage = self.get_system_cpu_usage()
- log.info('CPU usage is %s for multicast group entries %s'%(cpu_usage,index+1))
- time.sleep(1)
- cpu_usage2 = self.get_system_cpu_usage()
- log.info('system cpu usage before flows added = %f and after %d flows added = %f'%(cpu_usage1,count,cpu_usage2))
+ df = defer.Deferred()
+ def scale_vcpe_instances(df):
+ cpu_usage1 = self.get_system_cpu_usage()
+ acl_rule = ACLTest()
+ for index in range(0,count):
+ src_ip = self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
+ dst_ip = self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
+ dst_port = random.randint(1024,65535)
+ log.info('adding acl rule = %d with src ip = %s, dst ip = %s and dst tcp port = %d'%(index+1, src_ip,dst_ip,dst_port))
+ status,code = acl_rule.adding_acl_rule('v4', srcIp=src_ip, dstIp = dst_ip, ipProto ='TCP', dstTpPort =dst_port, action = 'deny')
+ assert_equal(status, True)
+ if index % 100 == 0:
+ cpu_usage = self.get_system_cpu_usage()
+ log.info('CPU usage is %s for multicast group entries %s'%(cpu_usage,index+1))
+ time.sleep(1)
+ cpu_usage2 = self.get_system_cpu_usage()
+ log.info('system cpu usage before flows added = %f and after %d flows added = %f'%(cpu_usage1,count,cpu_usage2))
+ df.callback(0)
+ reactor.callLater(0, scale_vsg_vms, df)
+ return df
+ @deferred(1000)
def test_scale_adding_and_deleting_10k_acl_rules_to_allow_src_and_dst_ip_matching_traffic_check_cpu_usage(self,count=10000):
- cpu_usage1 = self.get_system_cpu_usage()
- acl_rule = ACLTest()
- for index in range(0,count):
- src_ip = self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
- dst_ip = self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
- dst_port = random.randint(1024,65535)
- log.info('adding acl rule = %d with src ip = %s, dst ip = %s '%(index+1, src_ip,dst_ip))
- status,code = acl_rule.adding_acl_rule('v4', srcIp=src_ip, dstIp = dst_ip,action = 'allow')
- assert_equal(status, True)
- if index % 100 == 0:
- cpu_usage = self.get_system_cpu_usage()
- log.info('CPU usage is %s for acl rule number %s'%(cpu_usage,index+1))
- time.sleep(1)
- cpu_usage2 = self.get_system_cpu_usage()
- result = acl_rule.get_acl_rules()
- result = result.json()['aclRules']
- for acl in result:
+ df = defer.Deferred()
+ def scale_vcpe_instances(df):
+ cpu_usage1 = self.get_system_cpu_usage()
+ acl_rule = ACLTest()
+ for index in range(0,count):
+ src_ip = self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
+ dst_ip = self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
+ dst_port = random.randint(1024,65535)
+ log.info('adding acl rule = %d with src ip = %s, dst ip = %s '%(index+1, src_ip,dst_ip))
+ status,code = acl_rule.adding_acl_rule('v4', srcIp=src_ip, dstIp = dst_ip,action = 'allow')
+ assert_equal(status, True)
+ if index % 100 == 0:
+ cpu_usage = self.get_system_cpu_usage()
+ log.info('CPU usage is %s for acl rule number %s'%(cpu_usage,index+1))
+ time.sleep(1)
+ cpu_usage2 = self.get_system_cpu_usage()
+ result = acl_rule.get_acl_rules()
+ result = result.json()['aclRules']
+ for acl in result:
acl_rule.remove_acl_rule(acl['id'])
#log.info('acl is %s'%acl)
- cpu_usage3 = self.get_system_cpu_usage()
- log.info('system cpu usage before flows added = %f and after %d flows added = %f, after deleting all acl rules = %f'%(cpu_usage1,count,cpu_usage2,cpu_usage3))
+ cpu_usage3 = self.get_system_cpu_usage()
+ log.info('system cpu usage before flows added = %f and after %d flows added = %f, after deleting all acl rules = %f'%(cpu_usage1,count,cpu_usage2,cpu_usage3))
+ df.callback(0)
+ reactor.callLater(0, scale_vsg_vms, df)
+ return df
+ @deferred(1000)
def test_scale_adding_20k_acl_rules_to_allow_src_and_dst_ip_matching_traffic_and_deactivate_acl_app_checking_cpu_usage(self,count=20000):
- cpu_usage1 = self.get_system_cpu_usage()
- acl_rule = ACLTest()
- for index in range(0,count):
- src_ip = self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
- dst_ip = self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
- dst_port = random.randint(1024,65535)
- log.info('adding acl rule = %d with src ip = %s, dst ip = %s '%(index+1, src_ip,dst_ip))
- status,code = acl_rule.adding_acl_rule('v4', srcIp=src_ip, dstIp = dst_ip,action = 'allow')
- assert_equal(status, True)
- if index % 200 == 0:
- cpu_usage = self.get_system_cpu_usage()
- log.info('CPU usage is %s for acl rule number %s'%(cpu_usage,index+1))
- time.sleep(1)
- cpu_usage2 = self.get_system_cpu_usage()
- OnosCtrl(cls.acl_app).deactivate()
- time.sleep(3)
- cpu_usage3 = self.get_system_cpu_usage()
- log.info('system cpu usage before flows added = %f, after %d flows added = %f, and after deactivating acl app = %f'%(cpu_usage1,count,cpu_usage2,cpu_usage3))
+ df = defer.Deferred()
+ def scale_vcpe_instances(df):
+ cpu_usage1 = self.get_system_cpu_usage()
+ acl_rule = ACLTest()
+ for index in range(0,count):
+ src_ip = self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
+ dst_ip = self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
+ dst_port = random.randint(1024,65535)
+ log.info('adding acl rule = %d with src ip = %s, dst ip = %s '%(index+1, src_ip,dst_ip))
+ status,code = acl_rule.adding_acl_rule('v4', srcIp=src_ip, dstIp = dst_ip,action = 'allow')
+ assert_equal(status, True)
+ if index % 200 == 0:
+ cpu_usage = self.get_system_cpu_usage()
+ log.info('CPU usage is %s for acl rule number %s'%(cpu_usage,index+1))
+ time.sleep(1)
+ cpu_usage2 = self.get_system_cpu_usage()
+ OnosCtrl(cls.acl_app).deactivate()
+ time.sleep(3)
+ cpu_usage3 = self.get_system_cpu_usage()
+ log.info('system cpu usage before flows added = %f, after %d flows added = %f, and after deactivating acl app = %f'%(cpu_usage1,count,cpu_usage2,cpu_usage3))
+ df.callback(0)
+ reactor.callLater(0, scale_vsg_vms, df)
+ return df
+ @deferred(1000)
def test_scale_adding_igmp_and_acl_with_flow_entries_and_check_cpu_usage(self,igmp_groups=1300, flows_count=10000):
- cpu_usage1 = self.get_system_cpu_usage()
- egress = 1
- ingress = 2
- egress_mac = '00:00:00:00:01:01'
- ingress_mac = '02:00:00:00:00:00'
- acl_rule = ACLTest()
- OnosCtrl(self.igmp_app).activate()
- groups = self.generate_random_multicast_ip_addresses(count = igmp_groups)
- sources = self.generate_random_unicast_ip_addresses(count = igmp_groups)
- self.onos_ssm_table_load(groups,src_list=sources,flag=True)
- for index in range(igmp_groups):
- self.send_igmp_join(groups = [groups[index]], src_list = [sources[index]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
- iface = self.V_INF1)
- status = self.verify_igmp_data_traffic(groups[index],intf=self.V_INF1,source=sources[index])
- assert_equal(status, True)
- log_test.info('data received for group %s from source %s - %d'%(groups[index],sources[index],index))
- for index in range(flows_count):
- src_ip = self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
- dst_ip = self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
- log.info('adding acl rule = %d with src ip = %s, dst ip = %s '%(index+1, src_ip,dst_ip))
- status,code = acl_rule.adding_acl_rule('v4', srcIp=src_ip, dstIp = dst_ip,action = 'allow')
- assert_equal(status, True)
- ingress_mac = self.next_mac(ingress_mac)
- flow = OnosFlowCtrl(deviceId = self.device_id,
- egressPort = egress + self.port_offset,
- ingressPort = ingress + self.port_offset,
- ethSrc = ingress_mac,
- ethDst = egress_mac)
- result = flow.addFlow()
- assert_equal(result, True)
- log.info("flow number = %d is added",index+1)
- if index % 200 == 0:
- cpu_usage = self.get_system_cpu_usage()
- log.info('CPU usage is %s for acl rule number %s'%(cpu_usage,index+1))
- time.sleep(1)
- cpu_usage2 = self.get_system_cpu_usage()
- log.info('system cpu usage before flows added = %f, after %d flows added = %f'%(cpu_usage1,count,cpu_usage2))
-
- def test_scale_adding_igmp_acl_and_flow_entries_and_simultaneously_toggling_app_activation(self,igmp_groups=1300, flows_count=10000):
- cpu_usage1 = self.get_system_cpu_usage()
- def adding_igmp_entries():
+ df = defer.Deferred()
+ def scale_vcpe_instances(df):
+ cpu_usage1 = self.get_system_cpu_usage()
+ egress = 1
+ ingress = 2
+ egress_mac = '00:00:00:00:01:01'
+ ingress_mac = '02:00:00:00:00:00'
+ acl_rule = ACLTest()
OnosCtrl(self.igmp_app).activate()
groups = self.generate_random_multicast_ip_addresses(count = igmp_groups)
sources = self.generate_random_unicast_ip_addresses(count = igmp_groups)
self.onos_ssm_table_load(groups,src_list=sources,flag=True)
for index in range(igmp_groups):
self.send_igmp_join(groups = [groups[index]], src_list = [sources[index]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
- iface = self.V_INF1)
+ iface = self.V_INF1)
status = self.verify_igmp_data_traffic(groups[index],intf=self.V_INF1,source=sources[index])
assert_equal(status, True)
log_test.info('data received for group %s from source %s - %d'%(groups[index],sources[index],index))
- def adding_flow_entries():
- egress = 1
- ingress = 2
- egress_mac = '00:00:00:00:01:01'
- ingress_mac = '02:00:00:00:00:00'
for index in range(flows_count):
+ src_ip = self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
+ dst_ip = self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
+ log.info('adding acl rule = %d with src ip = %s, dst ip = %s '%(index+1, src_ip,dst_ip))
+ status,code = acl_rule.adding_acl_rule('v4', srcIp=src_ip, dstIp = dst_ip,action = 'allow')
+ assert_equal(status, True)
ingress_mac = self.next_mac(ingress_mac)
flow = OnosFlowCtrl(deviceId = self.device_id,
egressPort = egress + self.port_offset,
@@ -696,62 +960,236 @@
result = flow.addFlow()
assert_equal(result, True)
log.info("flow number = %d is added",index+1)
- def adding_acl_entries():
+ if index % 200 == 0:
+ cpu_usage = self.get_system_cpu_usage()
+ log.info('CPU usage is %s for acl rule number %s'%(cpu_usage,index+1))
+ time.sleep(1)
+ cpu_usage2 = self.get_system_cpu_usage()
+ log.info('system cpu usage before flows added = %f, after %d flows added = %f'%(cpu_usage1,count,cpu_usage2))
+ df.callback(0)
+ reactor.callLater(0, scale_vsg_vms, df)
+ return df
+
+ @deferred(1000)
+ def test_scale_adding_igmp_acl_and_flow_entries_and_simultaneously_toggling_app_activation(self,igmp_groups=1300, flows_count=10000):
+ df = defer.Deferred()
+ def scale_vcpe_instances(df):
+ cpu_usage1 = self.get_system_cpu_usage()
+ def adding_igmp_entries():
+ OnosCtrl(self.igmp_app).activate()
+ groups = self.generate_random_multicast_ip_addresses(count = igmp_groups)
+ sources = self.generate_random_unicast_ip_addresses(count = igmp_groups)
+ self.onos_ssm_table_load(groups,src_list=sources,flag=True)
+ for index in range(igmp_groups):
+ self.send_igmp_join(groups = [groups[index]], src_list = [sources[index]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
+ iface = self.V_INF1)
+ status = self.verify_igmp_data_traffic(groups[index],intf=self.V_INF1,source=sources[index])
+ assert_equal(status, True)
+ log_test.info('data received for group %s from source %s - %d'%(groups[index],sources[index],index))
+ def adding_flow_entries():
+ egress = 1
+ ingress = 2
+ egress_mac = '00:00:00:00:01:01'
+ ingress_mac = '02:00:00:00:00:00'
+ for index in range(flows_count):
+ ingress_mac = self.next_mac(ingress_mac)
+ flow = OnosFlowCtrl(deviceId = self.device_id,
+ egressPort = egress + self.port_offset,
+ ingressPort = ingress + self.port_offset,
+ ethSrc = ingress_mac,
+ ethDst = egress_mac)
+ result = flow.addFlow()
+ assert_equal(result, True)
+ log.info("flow number = %d is added",index+1)
+ def adding_acl_entries():
+ OnosCtrl(self.acl_app).activate()
+ for index in range(flows_count):
+ src_ip = self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
+ dst_ip = self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
+ dst_port = random.randint(1024,65535)
+ log.info('adding acl rule = %d with src ip = %s, dst ip = %s and dst tcp port = %d'%(index+1, src_ip,dst_ip,dst_port))
+ status,code = acl_rule.adding_acl_rule('v4', srcIp=src_ip, dstIp = dst_ip, ipProto ='TCP', dstTpPort =dst_port, action = 'deny')
+ assert_equal(status, True)
+ igmp_thread = threading.Thread(target = adding_igmp_entries)
+ flows_thread = threading.Thread(target = adding_flow_entries)
+ acl_thread = threading.Thread(target = adding_acl_entries)
+ igmp_thread.start()
+ flows_thread.start()
+ acl_thread.start()
+ time.sleep(1)
+ igmp_thread.join()
+ flows_thread.join()
+ acl_thread.join()
+ cpu_usage2 = self.get_system_cpu_usage()
+ OnosCtrl(self.igmp_app).deactivate()
+ OnosCtrl(self.acl_app).deactivate()
+ cpu_usage3 = self.get_system_cpu_usage()
+ log.info('cpu usage before test start = %f, after igmp,flow and acl entries loaded = %f and after the apps deactivated = %f'%(cpu_usage1,cpu_usage2,cpu_usage3))
+ OnosCtrl(self.igmp_app).activate()
OnosCtrl(self.acl_app).activate()
- for index in range(flows_count):
- src_ip = self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
- dst_ip = self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
- dst_port = random.randint(1024,65535)
- log.info('adding acl rule = %d with src ip = %s, dst ip = %s and dst tcp port = %d'%(index+1, src_ip,dst_ip,dst_port))
- status,code = acl_rule.adding_acl_rule('v4', srcIp=src_ip, dstIp = dst_ip, ipProto ='TCP', dstTpPort =dst_port, action = 'deny')
- assert_equal(status, True)
- igmp_thread = threading.Thread(target = adding_igmp_entries)
- flows_thread = threading.Thread(target = adding_flow_entries)
- acl_thread = threading.Thread(target = adding_acl_entries)
- igmp_thread.start()
- flows_thread.start()
- acl_thread.start()
- time.sleep(1)
- igmp_thread.join()
- flows_thread.join()
- acl_thread.join()
- cpu_usage2 = self.get_system_cpu_usage()
- OnosCtrl(self.igmp_app).deactivate()
- OnosCtrl(self.acl_app).deactivate()
- cpu_usage3 = self.get_system_cpu_usage()
- log.info('cpu usage before test start = %f, after igmp,flow and acl entries loaded = %f and after the apps deactivated = %f'%(cpu_usage1,cpu_usage2,cpu_usage3))
- OnosCtrl(self.igmp_app).activate()
- OnosCtrl(self.acl_app).activate()
+ df.callback(0)
+ reactor.callLater(0, scale_vsg_vms, df)
+ return df
- def vrouter_scale(self, num_routes, peers = 1):
- from vrouterTest import vrouter_exchange
- vrouter_exchange.setUpClass()
- vrouter = vrouter_exchange('vrouter_scale')
- res = vrouter.vrouter_scale(num_routes, peers = peers)
- vrouter_exchange.tearDownClass()
- assert_equal(res, True)
+ @deferred(1000)
+ def test_scale_for_vrouter_with_10_routes_with_10_peers(self):
+ scale().vrouter_setup()
+ df = defer.Deferred()
+ def scale_vrouter_routes(df):
+ try:
+ res = scale().vrouter_network_verify(100, peers = 10)
+ assert_equal(res, True)
+ except Exception as error:
+ log.info('Got Unexpected error %s'%error)
+ raise
+ df.callback(0)
+ reactor.callLater(0, scale_vrouter_routes, df)
+ return df
- def test_scale_for_vrouter_with_10000_routes(self):
- self.vrouter_scale(10000, peers = 1)
+ #tested with 100 routes on CiaB
+ @deferred(1000)
+ def test_scale_for_vrouter_with_20000_routes_with_100_peers(self):
+ scale().vrouter_setup()
+ df = defer.Deferred()
+ def scale_vrouter_routes(df):
+ try:
+ res = scale().vrouter_network_verify(10000, peers = 100)
+ assert_equal(res, True)
+ except Exception as error:
+ log.info('Got Unexpected error %s'%error)
+ raise
+ df.callback(0)
+ reactor.callLater(0, scale_vrouter_routes, df)
+ return df
- def test_scale_for_vrouter_with_20000_routes(self):
- self.vrouter_scale(20000, peers = 2)
+ #tested with 100 routes on CiaB
+ @deferred(1500)
+ def test_scale_for_vrouter_with_20000_routes_with_100_peers(self):
+ scale().vrouter_setup()
+ df = defer.Deferred()
+ def scale_vrouter_routes(df):
+ try:
+ res = scale().vrouter_network_verify(20000, peers = 100)
+ assert_equal(res, True)
+ except Exception as error:
+ log.info('Got Unexpected error %s'%error)
+ raise
+ df.callback(0)
+ reactor.callLater(0, scale_vrouter_routes, df)
+ return df
- def test_scale_for_vrouter_with_20000_routes_100_peers(self):
- self.vrouter_scale(20000, peers = 100)
-
- def tls_scale(self, num_sessions):
- from tlsTest import eap_auth_exchange
- tls = eap_auth_exchange('tls_scale')
- tls.setUp()
- tls.tls_scale(num_sessions)
-
- #simulating authentication for multiple users, 5K in this test case
- @deferred(TIMEOUT+1800)
+ #tested with 100 subscribers on CiaB
+ @deferred(1800)
def test_scale_of_eap_tls_with_5k_sessions_using_diff_mac(self):
+ OnosCtrl('org.opencord.aaa').activate()
df = defer.Deferred()
def eap_tls_5k_with_diff_mac(df):
- self.tls_scale(5000)
+ try:
+ for i in xrange(5000):
+ tls = TLSAuthTest(src_mac = 'random')
+ tls.runTest()
+ log_test.info('Authentication successfull for user %d'%i)
+ except Exception as error:
+ log.info('Got Unexpected error %s'%error)
+ raise
df.callback(0)
reactor.callLater(0, eap_tls_5k_with_diff_mac, df)
return df
+
+ #tested with 100 subscribers on CiaB
+ @deferred(1800)
+ def test_scale_of_eap_tls_with_5k_sessions_using_diff_mac_with_aaa_deactivate_and_activated(self):
+ OnosCtrl('org.opencord.aaa').activate()
+ df = defer.Deferred()
+ def eap_tls_5k_with_diff_mac(df):
+ try:
+ for i in xrange(5000):
+ tls = TLSAuthTest(src_mac = 'random')
+ tls.runTest()
+ log_test.info('Authentication successfull for user %d'%i)
+ OnosCtrl('org.opencord.aaa').deactivate()
+ time.sleep(2)
+ OnosCtrl('org.opencord.aaa').activate()
+ for i in xrange(100):
+ tls = TLSAuthTest(src_mac = 'random')
+ tls.runTest()
+ log_test.info('Authentication successfull for user %d'%i)
+ OnosCtrl('org.opencord.aaa').activate()
+ except Exception as error:
+ log.info('Got Unexpected error %s'%error)
+ OnosCtrl('org.opencord.aaa').activate()
+ raise
+ df.callback(0)
+ reactor.callLater(0, eap_tls_5k_with_diff_mac, df)
+ return df
+
+ #tested with 10 subscribers on CiaB
+ @deferred(1800)
+ def test_scale_5k_cord_subscribers_authentication_with_valid_and_invalid_certificates_and_channel_surfing(self):
+ scale().subscriber_setup()
+ df = defer.Deferred()
+ def cordsub_auth_invalid_cert(df):
+ num_subscribers = 2
+ num_channels = 1
+ try:
+ test_status = scale().subscriber_join_verify(num_subscribers = num_subscribers,
+ num_channels = num_channels,
+ cbs = (scale().tls_invalid_cert, scale().dhcp_verify, scale().igmp_verify),
+ port_list = scale().generate_port_list(num_subscribers, num_channels), negative_subscriber_auth = 'half')
+ assert_equal(test_status, True)
+ except Exception as error:
+ log.info('Got Unexpected error %s'%error)
+ raise
+ finally:
+ scale().subscriber_teardown()
+ df.callback(0)
+ reactor.callLater(0, cordsub_auth_invalid_cert, df)
+ return df
+
+ #tested with 10 subscribers on CiaB
+ @deferred(1800)
+ def test_scale_5k_cord_subscribers_igmp_join_jump_1500channel(self):
+ scale().subscriber_setup()
+ df = defer.Deferred()
+ def cordsub_igmp_join_jump(df):
+ num_subscribers = 5000
+ num_channels = 1500
+ try:
+ test_status = scale().subscriber_join_verify(num_subscribers = num_subscribers,
+ num_channels = num_channels,
+ cbs = (scale().tls_verify, scale().dhcp_jump_verify, scale().igmp_jump_verify),
+ port_list = scale().generate_port_list(num_subscribers, num_channels),
+ negative_subscriber_auth = 'all')
+ assert_equal(test_status, True)
+ except Exception as error:
+ log.info('Got Unexpected error %s'%error)
+ raise
+ finally:
+ scale().subscriber_teardown()
+ df.callback(0)
+ reactor.callLater(0, cordsub_igmp_join_jump, df)
+ return df
+
+ #tested with 10 subscribers on CiaB
+ @deferred(1800)
+ def test_scale_10k_cord_subscribers_authentication_with_valid_and_non_ca_authorized_certificates_and_channel_surfing(self):
+ scale().subscriber_setup()
+ df = defer.Deferred()
+ def cordsub_auth_valid_cert(df):
+ num_subscribers = 10000
+ num_channels = 1
+ try:
+ test_status = scale().subscriber_join_verify(num_subscribers = num_subscribers,
+ num_channels = num_channels,
+ cbs = (scale().tls_non_ca_authrized_cert, scale().dhcp_verify, scale().igmp_verify),
+ port_list = scale().generate_port_list(num_subscribers, num_channels),
+ negative_subscriber_auth = 'onethird')
+ assert_equal(test_status, True)
+ except Exception as error:
+ log.info('Got Unexpected error %s'%error)
+ raise
+ finally:
+ scale().subscriber_teardown()
+ df.callback(0)
+ reactor.callLater(0, cordsub_auth_valid_cert, df)
+ return df
diff --git a/src/test/utils/Scale.py b/src/test/utils/Scale.py
new file mode 100644
index 0000000..1a53c52
--- /dev/null
+++ b/src/test/utils/Scale.py
@@ -0,0 +1,988 @@
+import os
+from nose.tools import *
+from scapy.all import *
+import requests
+from twisted.internet import defer
+from nose.twistedtools import reactor, deferred
+from CordTestUtils import *
+from CordTestUtils import log_test as log
+from OltConfig import OltConfig
+from onosclidriver import OnosCliDriver
+from SSHTestAgent import SSHTestAgent
+from Channels import Channels, IgmpChannel
+from IGMP import *
+import time, monotonic
+from CordLogger import CordLogger
+from VSGAccess import VSGAccess
+#imports for cord-subscriber module
+from subscriberDb import SubscriberDB
+from Stats import Stats
+from threadPool import ThreadPool
+import threading
+from EapTLS import TLSAuthTest
+from CordTestUtils import log_test as log
+from CordTestConfig import setup_module, running_on_ciab
+from OnosCtrl import OnosCtrl
+from CordContainer import Onos
+from CordSubscriberUtils import CordSubscriberUtils, XosUtils
+from CordTestServer import cord_test_onos_restart, cord_test_quagga_restart, cord_test_shell, cord_test_radius_restart
+
+
+log.setLevel('INFO')
+
+class Subscriber(Channels):
+ log.info('in Subscriber class 0000000')
+ PORT_TX_DEFAULT = 2
+ PORT_RX_DEFAULT = 1
+ INTF_TX_DEFAULT = 'veth2'
+ INTF_RX_DEFAULT = 'veth0'
+ STATS_RX = 0
+ STATS_TX = 1
+ STATS_JOIN = 2
+ STATS_LEAVE = 3
+ SUBSCRIBER_SERVICES = 'DHCP IGMP TLS'
+
+ def __init__(self, name = 'sub', service = SUBSCRIBER_SERVICES, port_map = None,
+ num = 1, channel_start = 0,
+ tx_port = PORT_TX_DEFAULT, rx_port = PORT_RX_DEFAULT,
+ iface = INTF_RX_DEFAULT, iface_mcast = INTF_TX_DEFAULT,
+ mcast_cb = None, loginType = 'wireless'):
+ self.tx_port = tx_port
+ self.rx_port = rx_port
+ self.port_map = port_map or g_subscriber_port_map
+ try:
+ self.tx_intf = self.port_map[tx_port]
+ self.rx_intf = self.port_map[rx_port]
+ except:
+ self.tx_intf = self.port_map[self.PORT_TX_DEFAULT]
+ self.rx_intf = self.port_map[self.PORT_RX_DEFAULT]
+
+ log_test.info('Subscriber %s, rx interface %s, uplink interface %s' %(name, self.rx_intf, self.tx_intf))
+ Channels.__init__(self, num, channel_start = channel_start,
+ iface = self.rx_intf, iface_mcast = self.tx_intf, mcast_cb = mcast_cb)
+ self.name = name
+ self.service = service
+ self.service_map = {}
+ services = self.service.strip().split(' ')
+ for s in services:
+ self.service_map[s] = True
+ self.loginType = loginType
+ ##start streaming channels
+ self.join_map = {}
+ ##accumulated join recv stats
+ self.join_rx_stats = Stats()
+ self.recv_timeout = False
+ def has_service(self, service):
+ if self.service_map.has_key(service):
+ return self.service_map[service]
+ if self.service_map.has_key(service.upper()):
+ return self.service_map[service.upper()]
+ return False
+
+ def channel_join_update(self, chan, join_time):
+ self.join_map[chan] = ( Stats(), Stats(), Stats(), Stats() )
+ self.channel_update(chan, self.STATS_JOIN, 1, t = join_time)
+ def channel_join(self, chan = 0, delay = 2):
+ '''Join a channel and create a send/recv stats map'''
+ if self.join_map.has_key(chan):
+ del self.join_map[chan]
+ self.delay = delay
+ chan, join_time = self.join(chan)
+ self.channel_join_update(chan, join_time)
+ return chan
+
+ def channel_join_next(self, delay = 2, leave_flag = True):
+ '''Joins the next channel leaving the last channel'''
+ if self.last_chan:
+ if self.join_map.has_key(self.last_chan):
+ del self.join_map[self.last_chan]
+ self.delay = delay
+ chan, join_time = self.join_next(leave_flag = leave_flag)
+ self.channel_join_update(chan, join_time)
+ return chan
+
+ def channel_jump(self, delay = 2):
+ '''Jumps randomly to the next channel leaving the last channel'''
+ if self.last_chan is not None:
+ if self.join_map.has_key(self.last_chan):
+ del self.join_map[self.last_chan]
+ self.delay = delay
+ chan, join_time = self.jump()
+ self.channel_join_update(chan, join_time)
+ return chan
+
+ def channel_leave(self, chan = 0, force = False):
+ if self.join_map.has_key(chan):
+ del self.join_map[chan]
+ self.leave(chan, force = force)
+
+ def channel_update(self, chan, stats_type, packets, t=0):
+ if type(chan) == type(0):
+ chan_list = (chan,)
+ else:
+ chan_list = chan
+ for c in chan_list:
+ if self.join_map.has_key(c):
+ self.join_map[c][stats_type].update(packets = packets, t = t)
+ def channel_receive(self, chan, cb = None, count = 1, timeout = 5):
+ log_test.info('Subscriber %s on port %s receiving from group %s, channel %d' %
+ (self.name, self.rx_intf, self.gaddr(chan), chan))
+ r = self.recv(chan, cb = cb, count = count, timeout = timeout)
+ if len(r) == 0:
+ log_test.info('Subscriber %s on port %s timed out' %(self.name, self.rx_intf))
+ else:
+ log_test.info('Subscriber %s on port %s received %d packets' %(self.name, self.rx_intf, len(r)))
+ if self.recv_timeout:
+ ##Negative test case is disabled for now
+ assert_equal(len(r), 0)
+
+ def recv_channel_cb(self, pkt):
+ ##First verify that we have received the packet for the joined instance
+ log_test.info('Packet received for group %s, subscriber %s, port %s' %
+ (pkt[IP].dst, self.name, self.rx_intf))
+ if self.recv_timeout:
+ return
+ chan = self.caddr(pkt[IP].dst)
+ assert_equal(chan in self.join_map.keys(), True)
+ recv_time = monotonic.monotonic() * 1000000
+ join_time = self.join_map[chan][self.STATS_JOIN].start
+ delta = recv_time - join_time
+ self.join_rx_stats.update(packets=1, t = delta, usecs = True)
+ self.channel_update(chan, self.STATS_RX, 1, t = delta)
+ log_test.debug('Packet received in %.3f usecs for group %s after join' %(delta, pkt[IP].dst))
+
+class subscriber_pool:
+
+ def __init__(self, subscriber, test_cbs):
+ self.subscriber = subscriber
+ self.test_cbs = test_cbs
+
+ def pool_cb(self):
+ for cb in self.test_cbs:
+ if cb:
+ self.test_status = cb(self.subscriber)
+ if self.test_status is not True:
+ ## This is chaning for other sub status has to check again
+ self.test_status = True
+ log_test.info('This service is failed and other services will not run for this subscriber')
+ break
+ log_test.info('This Subscriber is tested for multiple service eligibility ')
+ self.test_status = True
+
+class scale(object):
+
+ USER = "vagrant"
+ PASS = "vagrant"
+ head_node = os.getenv('HEAD_NODE', 'prod')
+ HEAD_NODE = head_node + '.cord.lab' if len(head_node.split('.')) == 1 else head_node
+ MAX_PORTS = 100
+ device_id = 'of:' + get_mac()
+ test_path = os.path.dirname(os.path.realpath(__file__))
+ olt_conf_file = os.getenv('OLT_CONFIG_FILE', os.path.join(test_path, '..', 'setup/olt_config.json'))
+ olt = OltConfig(olt_conf_file = olt_conf_file)
+ APP_NAME = 'org.ciena.xconnect'
+ olt_apps = ()
+ table_app = 'org.ciena.cordigmp'
+ table_app_file = os.path.join(test_path, '..', 'apps/ciena-cordigmp-multitable-2.0-SNAPSHOT.oar')
+ app_file = os.path.join(test_path, '..', 'apps/ciena-cordigmp-2.0-SNAPSHOT.oar')
+ cpqd_path = os.path.join(test_path, '..', 'setup')
+ ovs_path = cpqd_path
+ test_services = ('IGMP', 'TRAFFIC')
+ num_joins = 0
+ num_subscribers = 0
+ leave_flag = True
+ recv_timeout = False
+ onos_restartable = bool(int(os.getenv('ONOS_RESTART', 0)))
+ PORT_TX_DEFAULT = 2
+ PORT_RX_DEFAULT = 1
+ IP_DST = '224.0.0.22'
+ IGMP_DST_MAC = "01:00:5e:00:00:16"
+ igmp_eth = Ether(dst = IGMP_DST_MAC, type = ETH_P_IP)
+ igmp_ip = IP(dst = IP_DST)
+
+
+
+ CLIENT_CERT = """-----BEGIN CERTIFICATE-----
+MIICuDCCAiGgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
+IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
+RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwNjA2MjExMjI3WhcN
+MTcwNjAxMjExMjI3WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
+BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
+hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkC
+gYEAwvXiSzb9LZ6c7uNziUfKvoHO7wu/uiFC5YUpXbmVGuGZizbVrny0xnR85Dfe
++9R4diansfDhIhzOUl1XjN3YDeSS9OeF5YWNNE8XDhlz2d3rVzaN6hIhdotBkUjg
+rUewjTg5OFR31QEyG3v8xR3CLgiE9xQELjZbSA07pD79zuUCAwEAAaNPME0wEwYD
+VR0lBAwwCgYIKwYBBQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5l
+eGFtcGxlLmNvbS9leGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOBgQDAjkrY
+6tDChmKbvr8w6Du/t8vHjTCoCIocHTN0qzWOeb1YsAGX89+TrWIuO1dFyYd+Z0KC
+PDKB5j/ygml9Na+AklSYAVJIjvlzXKZrOaPmhZqDufi+rXWti/utVqY4VMW2+HKC
+nXp37qWeuFLGyR1519Y1d6F/5XzqmvbwURuEug==
+-----END CERTIFICATE-----"""
+
+ CLIENT_CERT_INVALID = '''-----BEGIN CERTIFICATE-----
+MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
+IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
+RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
+MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
+BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
+hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
+AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
+5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
+tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
+OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
+qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
+2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
+BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
+eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
+MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
+VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
+RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
+dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
+T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
+yg==
+-----END CERTIFICATE-----'''
+
+############ IGMP utility functions #######################
+ def onos_ssm_table_load(self, groups, src_list = ['1.2.3.4'],flag = False):
+ ssm_dict = {'apps' : { 'org.opencord.igmp' : { 'ssmTranslate' : [] } } }
+ ssm_xlate_list = ssm_dict['apps']['org.opencord.igmp']['ssmTranslate']
+ if flag: #to maintain seperate group-source pair.
+ for i in range(len(groups)):
+ d = {}
+ d['source'] = src_list[i] or '0.0.0.0'
+ d['group'] = groups[i]
+ ssm_xlate_list.append(d)
+ else:
+ for g in groups:
+ for s in src_list:
+ d = {}
+ d['source'] = s or '0.0.0.0'
+ d['group'] = g
+ ssm_xlate_list.append(d)
+ self.onos_load_config(ssm_dict)
+ cord_port_map = {}
+ for g in groups:
+ cord_port_map[g] = (self.PORT_TX_DEFAULT, self.PORT_RX_DEFAULT)
+ IgmpChannel().cord_port_table_load(cord_port_map)
+ time.sleep(2)
+
+ def generate_random_multicast_ip_addresses(self,count=500):
+ multicast_ips = []
+ while(count >= 1):
+ ip = '.'.join([str(random.randint(224,239)),str(random.randint(1,254)),str(random.randint(1,254)),str(random.randint(1,254))])
+ if ip in multicast_ips:
+ pass
+ else:
+ multicast_ips.append(ip)
+ count -= 1
+ return multicast_ips
+
+ def generate_random_unicast_ip_addresses(self,count=1):
+ unicast_ips = []
+ while(count >= 1):
+ ip = '.'.join([str(random.randint(11,126)),str(random.randint(1,254)),str(random.randint(1,254)),str(random.randint(1,254))])
+ if ip in unicast_ips:
+ pass
+ else:
+ unicast_ips.append(ip)
+ count -= 1
+ return unicast_ips
+
+ def iptomac(self, mcast_ip):
+ mcast_mac = '01:00:5e:'
+ octets = mcast_ip.split('.')
+ second_oct = int(octets[1]) & 127
+ third_oct = int(octets[2])
+ fourth_oct = int(octets[3])
+ mcast_mac = mcast_mac + format(second_oct,'02x') + ':' + format(third_oct, '02x') + ':' + format(fourth_oct, '02x')
+ return mcast_mac
+
+ def send_igmp_join(self, groups, src_list = ['1.2.3.4'], record_type=IGMP_V3_GR_TYPE_INCLUDE,
+ ip_pkt = None, iface = 'veth0', ssm_load = False, delay = 1):
+ if ssm_load is True:
+ self.onos_ssm_table_load(groups, src_list)
+ igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
+ gaddr=self.IP_DST)
+ for g in groups:
+ gr = IGMPv3gr(rtype= record_type, mcaddr=g)
+ gr.sources = src_list
+ igmp.grps.append(gr)
+ if ip_pkt is None:
+ ip_pkt = self.igmp_eth/self.igmp_ip
+ pkt = ip_pkt/igmp
+ IGMPv3.fixup(pkt)
+ log.info('sending igmp join packet %s'%pkt.show())
+ sendp(pkt, iface=iface)
+ time.sleep(delay)
+
+ def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
+ dst_mac = self.iptomac(group)
+ eth = Ether(dst= dst_mac)
+ ip = IP(dst=group,src=source)
+ data = repr(monotonic.monotonic())
+ sendp(eth/ip/data,count=20, iface = intf)
+
+ def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
+ log_test.info('verifying multicast traffic for group %s from source %s'%(group,source))
+ self.success = False
+ def recv_task():
+ def igmp_recv_cb(pkt):
+ #log_test.info('received multicast data packet is %s'%pkt.show())
+ log_test.info('multicast data received for group %s from source %s'%(group,source))
+ self.success = True
+ sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
+ t = threading.Thread(target = recv_task)
+ t.start()
+ self.send_multicast_data_traffic(group,source=source)
+ t.join()
+ return self.success
+
+ @classmethod
+ def vrouter_setup(cls):
+ apps = ('org.onosproject.proxyarp', 'org.onosproject.hostprovider', 'org.onosproject.vrouter', 'org.onosproject.fwd')
+ for app in apps:
+ OnosCtrl(app).activate()
+ cls.port_map, cls.port_list = cls.olt.olt_port_map()
+ cls.vrouter_device_dict = { "devices" : {
+ "{}".format(cls.device_id) : {
+ "basic" : {
+ "driver" : "softrouter"
+ }
+ }
+ },
+ }
+ cls.zebra_conf = '''
+password zebra
+log stdout
+service advanced-vty
+!
+!debug zebra rib
+!debug zebra kernel
+!debug zebra fpm
+!
+!interface eth1
+! ip address 10.10.0.3/16
+line vty
+ exec-timeout 0 0
+'''
+ @classmethod
+ def start_quagga(cls, networks = 4, peer_address = None, router_address = None):
+ log_test.info('Restarting Quagga container with configuration for %d networks' %(networks))
+ config = cls.generate_conf(networks = networks, peer_address = peer_address, router_address = router_address)
+ if networks <= 10000:
+ boot_delay = 25
+ else:
+ delay_map = [60, 100, 150, 200, 300, 450, 600, 800, 1000, 1200]
+ n = min(networks/100000, len(delay_map)-1)
+ boot_delay = delay_map[n]
+ cord_test_quagga_restart(config = config, boot_delay = boot_delay)
+ @classmethod
+ def generate_vrouter_conf(cls, networks = 4, peers = 1, peer_address = None, router_address = None):
+ num = 0
+ if peer_address is None:
+ start_peer = ( 192 << 24) | ( 168 << 16) | (10 << 8) | 0
+ end_peer = ( 200 << 24 ) | (168 << 16) | (10 << 8) | 0
+ else:
+ ip = peer_address[0][0]
+ start_ip = ip.split('.')
+ start_peer = ( int(start_ip[0]) << 24) | ( int(start_ip[1]) << 16) | ( int(start_ip[2]) << 8) | 0
+ end_peer = ((int(start_ip[0]) + 8) << 24 ) | (int(start_ip[1]) << 16) | (int(start_ip[2]) << 8) | 0
+ local_network = end_peer + 1
+ ports_dict = { 'ports' : {} }
+ interface_list = []
+ peer_list = []
+ for n in xrange(start_peer, end_peer, 256):
+ port_map = ports_dict['ports']
+ port = num + 1 if num < cls.MAX_PORTS - 1 else cls.MAX_PORTS - 1
+ device_port_key = '{0}/{1}'.format(cls.device_id, port)
+ try:
+ interfaces = port_map[device_port_key]['interfaces']
+ except:
+ port_map[device_port_key] = { 'interfaces' : [] }
+ interfaces = port_map[device_port_key]['interfaces']
+ ip = n + 2
+ peer_ip = n + 1
+ ips = '%d.%d.%d.%d/24'%( (ip >> 24) & 0xff, ( (ip >> 16) & 0xff ), ( (ip >> 8 ) & 0xff ), ip & 0xff)
+ peer = '%d.%d.%d.%d' % ( (peer_ip >> 24) & 0xff, ( ( peer_ip >> 16) & 0xff ), ( (peer_ip >> 8 ) & 0xff ), peer_ip & 0xff )
+ mac = RandMAC()._fix()
+ peer_list.append((peer, mac))
+ if num < cls.MAX_PORTS - 1:
+ interface_dict = { 'name' : 'b1-{}'.format(port), 'ips': [ips], 'mac' : mac }
+ interfaces.append(interface_dict)
+ interface_list.append(interface_dict['name'])
+ else:
+ interfaces[0]['ips'].append(ips)
+ num += 1
+ if num == peers:
+ break
+ quagga_dict = { 'apps': { 'org.onosproject.router' : { 'router' : {}, 'bgp' : { 'bgpSpeakers' : [] } } } }
+ quagga_router_dict = quagga_dict['apps']['org.onosproject.router']['router']
+ quagga_router_dict['ospfEnabled'] = True
+ quagga_router_dict['interfaces'] = interface_list
+ quagga_router_dict['controlPlaneConnectPoint'] = '{0}/{1}'.format(cls.device_id, peers + 1)
+
+ #bgp_speaker_dict = { 'apps': { 'org.onosproject.router' : { 'bgp' : { 'bgpSpeakers' : [] } } } }
+ bgp_speakers_list = quagga_dict['apps']['org.onosproject.router']['bgp']['bgpSpeakers']
+ speaker_dict = {}
+ speaker_dict['name'] = 'bgp{}'.format(peers+1)
+ speaker_dict['connectPoint'] = '{0}/{1}'.format(cls.device_id, peers + 1)
+ speaker_dict['peers'] = peer_list
+ bgp_speakers_list.append(speaker_dict)
+ cls.peer_list = peer_list
+ return (cls.vrouter_device_dict, ports_dict, quagga_dict)
+
+ @classmethod
+ def generate_conf(cls, networks = 4, peer_address = None, router_address = None):
+ num = 0
+ if router_address is None:
+ start_network = ( 11 << 24) | ( 10 << 16) | ( 10 << 8) | 0
+ end_network = ( 172 << 24 ) | ( 0 << 16) | (0 << 8) | 0
+ network_mask = 24
+ else:
+ ip = router_address
+ start_ip = ip.split('.')
+ network_mask = int(start_ip[3].split('/')[1])
+ start_ip[3] = (start_ip[3].split('/'))[0]
+ start_network = (int(start_ip[0]) << 24) | ( int(start_ip[1]) << 16) | ( int(start_ip[2]) << 8) | 0
+ end_network = (172 << 24 ) | (int(start_ip[1]) << 16) | (int(start_ip[2]) << 8) | 0
+ net_list = []
+ peer_list = peer_address if peer_address is not None else cls.peer_list
+ network_list = []
+ for n in xrange(start_network, end_network, 256):
+ net = '%d.%d.%d.0'%( (n >> 24) & 0xff, ( ( n >> 16) & 0xff ), ( (n >> 8 ) & 0xff ) )
+ network_list.append(net)
+ gateway = peer_list[num % len(peer_list)][0]
+ net_route = 'ip route {0}/{1} {2}'.format(net, network_mask, gateway)
+ net_list.append(net_route)
+ num += 1
+ if num == networks:
+ break
+ cls.network_list = network_list
+ cls.network_mask = network_mask
+ zebra_routes = '\n'.join(net_list)
+ #log_test.info('Zebra routes: \n:%s\n' %cls.zebra_conf + zebra_routes)
+ return cls.zebra_conf + zebra_routes
+
+ @classmethod
+ def vrouter_host_load(cls, peer_address = None):
+ index = 1
+ peer_info = peer_address if peer_address is not None else cls.peer_list
+
+ for host,_ in peer_info:
+ iface = cls.port_map[index]
+ index += 1
+ log_test.info('Assigning ip %s to interface %s' %(host, iface))
+ config_cmds = ( 'ifconfig {} 0'.format(iface),
+ 'ifconfig {0} {1}'.format(iface, host),
+ 'arping -I {0} {1} -c 2'.format(iface, host),
+ )
+ for cmd in config_cmds:
+ os.system(cmd)
+ @classmethod
+ def vrouter_host_unload(cls, peer_address = None):
+ index = 1
+ peer_info = peer_address if peer_address is not None else cls.peer_list
+
+ for host,_ in peer_info:
+ iface = cls.port_map[index]
+ index += 1
+ config_cmds = ('ifconfig {} 0'.format(iface), )
+ for cmd in config_cmds:
+ os.system(cmd)
+
+ @classmethod
+ def vrouter_config_get(cls, networks = 4, peers = 1, peer_address = None,
+ route_update = None, router_address = None):
+ vrouter_configs = cls.generate_vrouter_conf(networks = networks, peers = peers,
+ peer_address = peer_address, router_address = router_address)
+ return vrouter_configs
+
+ @classmethod
+ def vrouter_configure(cls, networks = 4, peers = 1, peer_address = None,
+ route_update = None, router_address = None, time_expire = None, adding_new_routes = None):
+ vrouter_configs = cls.vrouter_config_get(networks = networks, peers = peers,
+ peer_address = peer_address, route_update = route_update)
+ cls.start_onos(network_cfg = vrouter_configs)
+ time.sleep(5)
+ cls.vrouter_host_load()
+ ##Start quagga
+ cls.start_quagga(networks = networks, peer_address = peer_address, router_address = router_address)
+ return vrouter_configs
+ def vrouter_port_send_recv(self, ingress, egress, dst_mac, dst_ip, positive_test = True):
+ src_mac = '00:00:00:00:00:02'
+ src_ip = '1.1.1.1'
+ self.success = False if positive_test else True
+ timeout = 10 if positive_test else 1
+ count = 2 if positive_test else 1
+ self.start_sending = True
+ def recv_task():
+ def recv_cb(pkt):
+ log_test.info('Pkt seen with ingress ip %s, egress ip %s' %(pkt[IP].src, pkt[IP].dst))
+ self.success = True if positive_test else False
+ sniff(count=count, timeout=timeout,
+ lfilter = lambda p: IP in p and p[IP].dst == dst_ip and p[IP].src == src_ip,
+ prn = recv_cb, iface = self.port_map[ingress])
+ self.start_sending = False
+
+ t = threading.Thread(target = recv_task)
+ t.start()
+ L2 = Ether(src = src_mac, dst = dst_mac)
+ L3 = IP(src = src_ip, dst = dst_ip)
+ pkt = L2/L3
+ log_test.info('Sending a packet with dst ip %s, dst mac %s on port %s to verify if flows are correct' %
+ (dst_ip, dst_mac, self.port_map[egress]))
+ while self.start_sending is True:
+ sendp(pkt, count=50, iface = self.port_map[egress])
+ t.join()
+ assert_equal(self.success, True)
+
+ def vrouter_traffic_verify(self, positive_test = True, peer_address = None):
+ if peer_address is None:
+ peers = len(self.peer_list)
+ peer_list = self.peer_list
+ else:
+ peers = len(peer_address)
+ peer_list = peer_address
+ egress = peers + 1
+ num = 0
+ num_hosts = 5 if positive_test else 1
+ src_mac = '00:00:00:00:00:02'
+ src_ip = '1.1.1.1'
+ if self.network_mask != 24:
+ peers = 1
+ for network in self.network_list:
+ num_ips = num_hosts
+ octets = network.split('.')
+ for i in xrange(num_ips):
+ octets[-1] = str(int(octets[-1]) + 1)
+ dst_ip = '.'.join(octets)
+ dst_mac = peer_list[ num % peers ] [1]
+ port = (num % peers)
+ ingress = port + 1
+ #Since peers are on the same network
+ ##Verify if flows are setup by sending traffic across
+ self.vrouter_port_send_recv(ingress, egress, dst_mac, dst_ip, positive_test = positive_test)
+ num += 1
+ def vrouter_network_verify(self, networks, peers = 1, positive_test = True,
+ start_network = None, start_peer_address = None, route_update = None,
+ invalid_peers = None, time_expire = None, unreachable_route_traffic = None,
+ deactivate_activate_vrouter = None, adding_new_routes = None):
+ print 'no.of networks are.....', networks
+ self.vrouter_setup()
+ _, ports_map, egress_map = self.vrouter_configure(networks = networks, peers = peers,
+ peer_address = start_peer_address,
+ route_update = route_update,
+ router_address = start_network,
+ time_expire = time_expire,
+ adding_new_routes = adding_new_routes)
+ self.vrouter_traffic_verify()
+ self.vrouter_host_unload()
+ return True
+
+############### Cord Subscriber utility functions #########################
+
+ @classmethod
+ def flows_setup(cls):
+ cls.olt = OltConfig()
+ cls.port_map, _ = cls.olt.olt_port_map()
+ if not cls.port_map:
+ cls.port_map = cls.default_port_map
+ cls.device_id = OnosCtrl.get_device_id()
+ num_ports = len(cls.port_map['ports'] + cls.port_map['relay_ports'])
+ cls.port_offset = int(os.getenv('TEST_INSTANCE', 0)) * num_ports
+
+ @classmethod
+ def update_apps_version(cls):
+ version = Onos.getVersion()
+ major = int(version.split('.')[0])
+ minor = int(version.split('.')[1])
+ cordigmp_app_version = '2.0-SNAPSHOT'
+ olt_app_version = '1.2-SNAPSHOT'
+ if major > 1:
+ cordigmp_app_version = '3.0-SNAPSHOT'
+ olt_app_version = '2.0-SNAPSHOT'
+ elif major == 1:
+ if minor > 10:
+ cordigmp_app_version = '3.0-SNAPSHOT'
+ olt_app_version = '2.0-SNAPSHOT'
+ elif minor <= 8:
+ olt_app_version = '1.1-SNAPSHOT'
+ cls.app_file = os.path.join(cls.test_path, '..', 'apps/ciena-cordigmp-{}.oar'.format(cordigmp_app_version))
+ cls.table_app_file = os.path.join(cls.test_path, '..', 'apps/ciena-cordigmp-multitable-{}.oar'.format(cordigmp_app_version))
+ cls.olt_app_file = os.path.join(cls.test_path, '..', 'apps/olt-app-{}.oar'.format(olt_app_version))
+
+ @classmethod
+ def subscriber_setup(cls):
+ log.info('in subscriber_setup function 000000000')
+ cls.subscriber_apps = ('org.opencord.aaa', 'org.onosproject.dhcp')
+ for app in cls.subscriber_apps:
+ OnosCtrl(app).activate()
+ cls.update_apps_version()
+ #dids = OnosCtrl.get_device_ids()
+ #device_map = {}
+ #for did in dids:
+ # device_map[did] = { 'basic' : { 'driver' : 'pmc-olt' } }
+ #network_cfg = {}
+ #network_cfg = { 'devices' : device_map }
+ #Restart ONOS with cpqd driver config for OVS
+ #cls.start_onos(network_cfg = network_cfg)
+ cls.port_map, cls.port_list = cls.olt.olt_port_map()
+ cls.switches = cls.port_map['switches']
+ cls.num_ports = cls.port_map['num_ports']
+ if cls.num_ports > 1:
+ cls.num_ports -= 1 ##account for the tx port
+ #Uninstall the existing app if any
+ #OnosCtrl.uninstall_app(cls.table_app)
+ #log_test.info('Installing the multi table app %s for subscriber test' %(cls.table_app_file))
+ #OnosCtrl.install_app(cls.table_app_file)
+
+ @classmethod
+ def subscriber_teardown(cls):
+ log.info('in subscriber_teardown function 000000000')
+ apps = cls.olt_apps + cls.subscriber_apps #( cls.table_app,)
+ for app in apps:
+ OnosCtrl(app).deactivate()
+ #cls.start_onos(network_cfg = {})
+ #OnosCtrl.uninstall_app(cls.table_app)
+ #log_test.info('Installing back the cord igmp app %s for subscriber test on exit' %(cls.app_file))
+ #OnosCtrl.install_app(cls.app_file)
+
+ @classmethod
+ def start_cpqd(cls, mac = '00:11:22:33:44:55'):
+ dpid = mac.replace(':', '')
+ cpqd_file = os.sep.join( (cls.cpqd_path, 'cpqd.sh') )
+ cpqd_cmd = '{} {}'.format(cpqd_file, dpid)
+ ret = os.system(cpqd_cmd)
+ assert_equal(ret, 0)
+ time.sleep(10)
+ device_id = 'of:{}{}'.format('0'*4, dpid)
+ return device_id
+
+ @classmethod
+ def start_ovs(cls):
+ ovs_file = os.sep.join( (cls.ovs_path, 'of-bridge.sh') )
+ ret = os.system(ovs_file)
+ assert_equal(ret, 0)
+ time.sleep(30)
+ @classmethod
+ def ovs_cleanup(cls):
+ log.info('executing ovs_cleanup function 000000000000000000')
+ ##For every test case, delete all the OVS groups
+ cmd = 'ovs-ofctl del-groups br-int -OOpenFlow11 >/dev/null 2>&1'
+ try:
+ cord_test_shell(cmd)
+ ##Since olt config is used for this test, we just fire a careless local cmd as well
+ os.system(cmd)
+ finally:
+ return
+ def tls_verify(self, subscriber):
+ def tls_fail_cb():
+ log_test.info('TLS verification failed')
+ if subscriber.has_service('TLS'):
+ #OnosCtrl('org.opencord.aaa').deactivate()
+ #time.sleep(2)
+ #OnosCtrl('org.opencord.aaa').activate()
+ #time.sleep(5)
+ tls = TLSAuthTest(fail_cb = tls_fail_cb, intf = subscriber.rx_intf)
+ log_test.info('Running subscriber %s tls auth test' %subscriber.name)
+ tls.runTest()
+ assert_equal(tls.failTest, False)
+ self.test_status = True
+ return self.test_status
+ else:
+ self.test_status = True
+ return self.test_status
+
+ def generate_port_list(self, subscribers, channels):
+ log.info('port list in generate port list is %s'%self.port_list)
+ return self.port_list[:subscribers]
+ def subscriber_load(self, create = True, num = 10, num_channels = 1, channel_start = 0, port_list = [], services = None):
+ '''Load the subscriber from the database'''
+ log.info('executing subscriber_load finction 000000000')
+ test_services = services if services else self.test_services
+ self.subscriber_db = SubscriberDB(create = create, services = test_services)
+ if create is True:
+ self.subscriber_db.generate(num)
+ self.subscriber_info = self.subscriber_db.read(num)
+ self.subscriber_list = []
+ if not port_list:
+ port_list = self.generate_port_list(num, num_channels)
+ log.info('port_list in subscriber load is %s'%port_list)
+ index = 0
+ for info in self.subscriber_info:
+ self.subscriber_list.append(Subscriber(name=info['Name'],
+ service=info['Service'],
+ port_map = self.port_map,
+ num=num_channels,
+ channel_start = channel_start,
+ tx_port = port_list[index][0],
+ rx_port = port_list[index][1]))
+ if num_channels > 1:
+ channel_start += num_channels
+ index += 1
+ #load the ssm list for all subscriber channels
+ igmpChannel = IgmpChannel()
+ ssm_groups = map(lambda sub: sub.channels, self.subscriber_list)
+ ssm_list = reduce(lambda ssm1, ssm2: ssm1+ssm2, ssm_groups)
+ igmpChannel.igmp_load_ssm_config(ssm_list)
+
+ def subscriber_join_verify( self, num_subscribers = 10, num_channels = 1,
+ channel_start = 0, cbs = None, port_list = [],
+ services = None, negative_subscriber_auth = None):
+ log.info('in subscriber_join_verify function 000000000')
+ self.test_status = False
+ self.ovs_cleanup()
+ subscribers_count = num_subscribers
+ sub_loop_count = num_subscribers
+ self.subscriber_load(create = True, num = num_subscribers,
+ num_channels = num_channels, channel_start = channel_start, port_list = port_list,
+ services = services)
+ self.onos_aaa_config()
+ self.thread_pool = ThreadPool(min(100, subscribers_count), queue_size=1, wait_timeout=1)
+ chan_leave = False #for single channel, multiple subscribers
+ if cbs is None:
+ cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify, self.traffic_verify)
+ chan_leave = True
+ cbs_negative = cbs
+ for subscriber in self.subscriber_list:
+ if services and 'IGMP' in services:
+ subscriber.start()
+ if negative_subscriber_auth is 'half' and sub_loop_count%2 is not 0:
+ cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify, self.traffic_verify)
+ elif negative_subscriber_auth is 'onethird' and sub_loop_count%3 is not 0:
+ cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify, self.traffic_verify)
+ else:
+ cbs = cbs_negative
+ sub_loop_count = sub_loop_count - 1
+ pool_object = subscriber_pool(subscriber, cbs)
+ self.thread_pool.addTask(pool_object.pool_cb)
+ self.thread_pool.cleanUpThreads()
+ for subscriber in self.subscriber_list:
+ if services and 'IGMP' in services:
+ subscriber.stop()
+ if chan_leave is True:
+ subscriber.channel_leave(0)
+ subscribers_count = 0
+ return self.test_status
+ def tls_invalid_cert(self, subscriber):
+ log.info('in tls_invalid_cert function 000000000000000')
+ if subscriber.has_service('TLS'):
+ time.sleep(2)
+ log_test.info('Running subscriber %s tls auth test' %subscriber.name)
+ tls = TLSAuthTest(client_cert = self.CLIENT_CERT_INVALID)
+ tls.runTest()
+ if tls.failTest == True:
+ self.test_status = False
+ return self.test_status
+ else:
+ self.test_status = True
+ return self.test_status
+
+ def tls_verify(self, subscriber):
+ def tls_fail_cb():
+ log_test.info('TLS verification failed')
+ if subscriber.has_service('TLS'):
+ tls = TLSAuthTest(fail_cb = tls_fail_cb, intf = subscriber.rx_intf)
+ log_test.info('Running subscriber %s tls auth test' %subscriber.name)
+ tls.runTest()
+ assert_equal(tls.failTest, False)
+ self.test_status = True
+ return self.test_status
+ else:
+ self.test_status = True
+ return self.test_status
+
+ def tls_non_ca_authrized_cert(self, subscriber):
+ if subscriber.has_service('TLS'):
+ time.sleep(2)
+ log_test.info('Running subscriber %s tls auth test' %subscriber.name)
+ tls = TLSAuthTest(client_cert = self.CLIENT_CERT_NON_CA_AUTHORIZED)
+ tls.runTest()
+ if tls.failTest == False:
+ self.test_status = True
+ return self.test_status
+ else:
+ self.test_status = True
+ return self.test_status
+
+ def dhcp_verify(self, subscriber):
+ log.info('in dhcp_verify function 000000000000000')
+ if subscriber.has_service('DHCP'):
+ cip, sip = self.dhcp_request(subscriber, update_seed = True)
+ log_test.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
+ subscriber.src_list = [cip]
+ self.test_status = True
+ return self.test_status
+ else:
+ subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
+ self.test_status = True
+ return self.test_status
+ def dhcp_jump_verify(self, subscriber):
+ if subscriber.has_service('DHCP'):
+ cip, sip = self.dhcp_request(subscriber, seed_ip = '10.10.200.1')
+ log_test.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
+ subscriber.src_list = [cip]
+ self.test_status = True
+ return self.test_status
+ else:
+ subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
+ self.test_status = True
+ return self.test_status
+
+ def igmp_verify(self, subscriber):
+ log.info('in igmp_verify function 000000000000000')
+ chan = 0
+ if subscriber.has_service('IGMP'):
+ ##We wait for all the subscribers to join before triggering leaves
+ if subscriber.rx_port > 1:
+ time.sleep(5)
+ subscriber.channel_join(chan, delay = 0)
+ self.num_joins += 1
+ while self.num_joins < self.num_subscribers:
+ time.sleep(5)
+ log_test.info('All subscribers have joined the channel')
+ for i in range(10):
+ subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 10)
+ log_test.info('Leaving channel %d for subscriber %s' %(chan, subscriber.name))
+ subscriber.channel_leave(chan)
+ time.sleep(5)
+ log_test.info('Interface %s Join RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name,subscriber.join_rx_stats))
+ #Should not receive packets for this subscriber
+ self.recv_timeout = True
+ subscriber.recv_timeout = True
+ subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 10)
+ subscriber.recv_timeout = False
+ self.recv_timeout = False
+ log_test.info('Joining channel %d for subscriber %s' %(chan, subscriber.name))
+ subscriber.channel_join(chan, delay = 0)
+ self.test_status = True
+ return self.test_status
+
+ def igmp_jump_verify(self, subscriber):
+ if subscriber.has_service('IGMP'):
+ for i in xrange(subscriber.num):
+ log_test.info('Subscriber %s jumping channel' %subscriber.name)
+ chan = subscriber.channel_jump(delay=0)
+ subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1)
+ log_test.info('Verified receive for channel %d, subscriber %s' %(chan, subscriber.name))
+ time.sleep(3)
+ log_test.info('Interface %s Jump RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name, subscriber.join_rx_stats))
+ self.test_status = True
+ return self.test_status
+ def traffic_verify(self, subscriber):
+ if subscriber.has_service('TRAFFIC'):
+ url = 'http://www.google.com'
+ resp = requests.get(url)
+ self.test_status = resp.ok
+ if resp.ok == False:
+ log_test.info('Subscriber %s failed get from url %s with status code %d'
+ %(subscriber.name, url, resp.status_code))
+ else:
+ log_test.info('GET request from %s succeeded for subscriber %s'
+ %(url, subscriber.name))
+ return self.test_status
+################## common utility functions #######################
+ def get_system_cpu_usage(self):
+ """ Getting compute node CPU usage """
+ ssh_agent = SSHTestAgent(host = self.HEAD_NODE, user = self.USER, password = self.PASS)
+ cmd = "top -b -n1 | grep 'Cpu(s)' | awk '{print $2 + $4}'"
+ status, output = ssh_agent.run_cmd(cmd)
+ assert_equal(status, True)
+ return float(output)
+
+ @classmethod
+ def start_onos(cls, network_cfg = None):
+ if type(network_cfg) is tuple:
+ res = []
+ for v in network_cfg:
+ res += v.items()
+ config = dict(res)
+ else:
+ config = network_cfg
+ log_test.info('Restarting ONOS with new network configuration')
+ return cord_test_onos_restart(config = config)
+
+ @classmethod
+ def config_restore(cls):
+ """Restore the vsg test configuration on test case failures"""
+ for restore_method in cls.restore_methods:
+ restore_method()
+
+ def onos_aaa_config(self):
+ log.info('executing onos_aaa_config function 000000000000000000')
+ aaa_dict = {'apps' : { 'org.opencord.aaa' : { 'AAA' : { 'radiusSecret': 'radius_password',
+ 'radiusIp': '172.17.0.2' } } } }
+ radius_ip = os.getenv('ONOS_AAA_IP') or '172.17.0.2'
+ aaa_dict['apps']['org.opencord.aaa']['AAA']['radiusIp'] = radius_ip
+ self.onos_load_config(aaa_dict)
+
+ def onos_load_config(self, config):
+ status, code = OnosCtrl.config(config)
+ if status is False:
+ log_test.info('Configure request for AAA returned status %d' %code)
+ assert_equal(status, True)
+ time.sleep(3)
+ def cliEnter(self):
+ retries = 0
+ while retries < 3:
+ self.cli = OnosCliDriver(connect = True)
+ if self.cli.handle:
+ break
+ else:
+ retries += 1
+ time.sleep(2)
+
+ def cliExit(self):
+ self.cli.disconnect()
+
+ def incmac(self, mac):
+ tmp = str(hex(int('0x'+mac,16)+1).split('x')[1])
+ mac = '0'+ tmp if len(tmp) < 2 else tmp
+ return mac
+
+ def next_mac(self, mac):
+ mac = mac.split(":")
+ mac[5] = self.incmac(mac[5])
+
+ if len(mac[5]) > 2:
+ mac[0] = self.incmac(mac[0])
+ mac[5] = '01'
+
+ if len(mac[0]) > 2:
+ mac[0] = '01'
+ mac[1] = self.incmac(mac[1])
+ mac[5] = '01'
+ return ':'.join(mac)
+
+ def to_egress_mac(cls, mac):
+ mac = mac.split(":")
+ mac[4] = '01'
+ return ':'.join(mac)
+
+ def inc_ip(self, ip, i):
+ ip[i] =str(int(ip[i])+1)
+ return '.'.join(ip)
+
+ def next_ip(self, ip):
+
+ lst = ip.split('.')
+ for i in (3,0,-1):
+ if int(lst[i]) < 255:
+ return self.inc_ip(lst, i)
+ elif int(lst[i]) == 255:
+ lst[i] = '0'
+ if int(lst[i-1]) < 255:
+ return self.inc_ip(lst,i-1)
+ elif int(lst[i-2]) < 255:
+ lst[i-1] = '0'
+ return self.inc_ip(lst,i-2)
+ else:
+ break
+
+ def to_egress_ip(self, ip):
+ lst=ip.split('.')
+ lst[0] = '182'
+ return '.'.join(lst)
+