Test: Cluster based scenarios with multiple OVS switches
and CORD apps .
Change-Id: I47068d545f7b1f3ff5f4ec08254abe7469f91b99
diff --git a/src/test/cluster/clusterTest.py b/src/test/cluster/clusterTest.py
index 4a494c3..55b47f8 100644
--- a/src/test/cluster/clusterTest.py
+++ b/src/test/cluster/clusterTest.py
@@ -107,8 +107,8 @@
self.cliExit()
return False
except:
- raise
- return False
+ raise Exception('Failed to get cluster members')
+ return False
def get_cluster_current_member_ips(self,controller = None):
tries = 0
@@ -131,13 +131,15 @@
raise Exception('Failed to get cluster members')
return cluster_ips
- def get_cluster_container_names_ips(self):
- onos_names_ips = {}
- onos_ips = self.get_cluster_current_member_ips()
- onos_names_ips[onos_ips[0]] = Onos.NAME
- for i in range(1, len(onos_ips)):
+ def get_cluster_container_names_ips(self,controller=None):
+ onos_names_ips = {}
+ onos_ips = self.get_cluster_current_member_ips(controller=controller)
+ #onos_names = [Onos.NAME]
+ onos_names_ips[onos_ips[0]] = Onos.NAME
+ for i in range(1,len(onos_ips)):
name = '{0}-{1}'.format(Onos.NAME,i+1)
onos_names_ips[onos_ips[i]] = name
+ #onos_names.append(name)
return onos_names_ips
@@ -164,16 +166,85 @@
return master, standbys
else:
tries += 1
+ time.sleep(1)
+ self.cliExit()
+ return master,standbys
+ except:
+ raise Exception('Failed to get cluster members')
+ return master,standbys
+
+ def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
+ ''' returns master and standbys of all the connected devices to ONOS cluster instance'''
+ device_dict = {}
+ tries = 0
+ try:
+ cli = self.cliEnter(controller = controller)
+ while tries <= 10:
+ device_dict = {}
+ roles = json.loads(self.cli.roles(jsonFormat = True))
+ log.info("cluster 'roles' command output is %s"%roles)
+ if roles:
+ for device in roles:
+ device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
+ for i in range(len(device_dict[device['id']]['standbys'])):
+ device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
+ log.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
+ self.cliExit()
+ return device_dict
+ else:
+ tries += 1
time.sleep(1)
self.cliExit()
- return master, standbys
+ return device_dict
+ except:
+ raise Exception('Failed to get cluster members')
+ return device_dict
+
+ #identify current master of a connected device, not tested
+ def get_cluster_connected_devices(self,controller=None):
+ '''returns all the devices connected to ONOS cluster'''
+ device_list = []
+ tries = 0
+ try:
+ cli = self.cliEnter(controller = controller)
+ while tries <= 10:
+ device_list = []
+ devices = json.loads(self.cli.devices(jsonFormat = True))
+ log.info("cluster 'devices' command output is %s"%devices)
+ if devices:
+ for device in devices:
+ log.info('device id is %s'%device['id'])
+ device_list.append(str(device['id']))
+ self.cliExit()
+ return device_list
+ else:
+ tries += 1
+ time.sleep(1)
+ self.cliExit()
+ return device_list
+ except:
+ raise Exception('Failed to get cluster members')
+ return device_list
+
+ def get_number_of_devices_of_master(self,controller=None):
+ '''returns master-device pairs, which master having what devices'''
+ master_count = {}
+ try:
+ cli = self.cliEnter(controller = controller)
+ masters = json.loads(self.cli.masters(jsonFormat = True))
+ if masters:
+ for master in masters:
+ master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
+ return master_count
+ else:
+ return master_count
except:
- raise Exception('Cannot get cluster master and standbys')
- return master, standbys
+ raise Exception('Failed to get cluster members')
+ return master_count
def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None):
if new_master is None: return False
- self.cliEnter()
+ self.cliEnter(controller=controller)
cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
command = self.cli.command(cmd = cmd, jsonFormat = False)
self.cliExit()
@@ -182,15 +253,27 @@
assert_equal(master,new_master)
log.info('Cluster master changed to %s successfully'%new_master)
-############# Cluster Test cases ###########################
+ def withdraw_cluster_current_mastership(self,master_ip=None,device_id=device_id,controller=None):
+ '''current master looses its mastership and hence new master will be elected'''
+ self.cliEnter(controller=controller)
+ cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
+ command = self.cli.command(cmd = cmd, jsonFormat = False)
+ self.cliExit()
+ time.sleep(60)
+ new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
+ assert_not_equal(new_master_ip,master_ip)
+ log.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
+ log.info('Cluster new master is %s'%new_master_ip)
+ return True
+
#pass
- def test_onos_cluster_formation_verify(self,onos_instances = ONOS_INSTANCES):
- status = self.verify_cluster_status(onos_instances = onos_instances)
- assert_equal(status, True)
- log.info('Cluster exists with %d ONOS instances'%onos_instances)
+ def test_cluster_formation_and_verification(self,onos_instances = ONOS_INSTANCES):
+ status = self.verify_cluster_status(onos_instances = onos_instances)
+ assert_equal(status, True)
+ log.info('Cluster exists with %d ONOS instances'%onos_instances)
#nottest cluster not coming up properly if member goes down
- def test_onos_cluster_adding_members(self,add = 2, onos_instances = ONOS_INSTANCES):
+ def test_cluster_adding_members(self, add = 2, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
onos_ips = self.get_cluster_current_member_ips()
@@ -200,7 +283,7 @@
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
- def test_onos_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
+ def test_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
@@ -215,9 +298,9 @@
assert_equal(status, True)
new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
assert_not_equal(master,new_master)
- log.info('Successfully removed cluster master instance')
+ log.info('Successfully removed clusters master instance')
- def test_onos_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
+ def test_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
@@ -231,7 +314,7 @@
status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
assert_equal(status, True)
- def test_onos_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
+ def test_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
@@ -248,7 +331,7 @@
status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
assert_equal(status, True)
- def test_onos_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
+ def test_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
@@ -264,7 +347,7 @@
assert_equal(status, True)
#nottest test cluster not coming up properly if member goes down
- def test_onos_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES ,add = 2, remove = 2):
+ def test_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES , add = 2, remove = 2):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
onos_ips = self.get_cluster_current_member_ips()
@@ -284,7 +367,7 @@
assert_equal(status, True)
#nottest cluster not coming up properly if member goes down
- def test_onos_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
+ def test_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
onos_ips = self.get_cluster_current_member_ips()
@@ -303,7 +386,7 @@
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
- def test_onos_cluster_restart(self,onos_instances = ONOS_INSTANCES):
+ def test_cluster_restart(self, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
log.info('Restarting cluster')
@@ -311,7 +394,7 @@
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
- def test_onos_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
+ def test_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
@@ -324,7 +407,7 @@
log.info('Cluster came up after master restart as expected')
#test fail. master changing after restart. Need to check correct behavior.
- def test_onos_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
+ def test_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master1, standbys = self.get_cluster_current_master_standbys()
@@ -338,7 +421,7 @@
assert_equal(master1,master2)
log.info('Cluster master is same before and after cluster master restart as expected')
- def test_onos_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
+ def test_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
@@ -351,7 +434,7 @@
assert_equal(status, True)
log.info('Cluster came up as expected after restarting one member')
- def test_onos_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
+ def test_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
@@ -366,7 +449,7 @@
assert_equal(status, True)
log.info('Cluster came up as expected after restarting two members')
- def test_onos_cluster_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
+ def test_cluster_state_with_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status,True)
master, standbys = self.get_cluster_current_master_standbys()
@@ -381,17 +464,17 @@
assert_equal(status, True)
log.info('Cluster came up as expected after restarting %d members'%members)
- def test_onos_cluster_master_change(self,onos_instances = ONOS_INSTANCES):
+ def test_cluster_state_with_master_change(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
- log.info('Cluster current master of device is %s'%master)
+ log.info('Cluster current master of devices is %s'%master)
self.change_master_current_cluster(new_master=standbys[0])
log.info('Cluster master changed successfully')
#tested on single onos setup.
- def test_onos_cluster_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
+ def test_cluster_with_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
onos_ips = self.get_cluster_current_member_ips()
@@ -419,7 +502,7 @@
raise
#tested on single onos setup.
- def test_onos_cluster_vrouter_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
+ def test_cluster_with_vrouter_and_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
onos_ips = self.get_cluster_current_member_ips()
@@ -431,11 +514,11 @@
assert_equal(res,True)
cord_test_onos_shutdown(node = master_onos_name)
time.sleep(60)
- log.info('Verifying vrouter traffic after cluster master down')
+ log.info('Verifying vrouter traffic after cluster master is down')
self.vrouter.vrouter_traffic_verify()
#tested on single onos setup.
- def test_onos_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
+ def test_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
onos_ips = self.get_cluster_current_member_ips()
@@ -449,7 +532,7 @@
self.vrouter.vrouter_traffic_verify()
#tested on single onos setup.
- def test_onos_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
+ def test_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
self.vrouter.setUpClass()
@@ -461,7 +544,7 @@
self.vrouter.vrouter_activate(deactivate=False)
#tested on single onos setup.
- def test_onos_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
+ def test_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
@@ -482,7 +565,7 @@
self.vrouter.vrouter_activate(deactivate=False)
#tested on single onos setup.
- def test_onos_cluster_for_vrouter_app_and_making_member_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
+ def test_cluster_for_vrouter_app_and_making_member_down(self, networks = 5,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
@@ -498,7 +581,7 @@
self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
#tested on single onos setup.
- def test_onos_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
+ def test_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
@@ -513,7 +596,7 @@
self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
#tested on single onos setup.
- def test_onos_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
+ def test_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
self.vrouter.setUpClass()
@@ -526,7 +609,7 @@
#test fails because flow state is in pending_add in onos
- def test_onos_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
+ def test_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
@@ -574,7 +657,7 @@
t.join()
assert_equal(self.success, True)
- def test_onos_cluster_making_master_change_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
+ def test_cluster_state_changing_master_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
@@ -619,8 +702,215 @@
t.join()
assert_equal(self.success, True)
+ #pass
+ def test_cluster_flow_for_ipv6_extension_header_and_master_restart(self,onos_instances = ONOS_INSTANCES):
+ status = self.verify_cluster_status(onos_instances=onos_instances)
+ assert_equal(status, True)
+ master,standbys = self.get_cluster_current_master_standbys()
+ onos_names_ips = self.get_cluster_container_names_ips()
+ master_onos_name = onos_names_ips[master]
+ self.flows.setUpClass()
+ egress = 1
+ ingress = 2
+ egress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
+ ingress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
+ flow = OnosFlowCtrl(deviceId = self.device_id,
+ egressPort = egress,
+ ingressPort = ingress,
+ ipv6_extension = 0,
+ controller=master
+ )
+
+ result = flow.addFlow()
+ assert_equal(result, True)
+ ##wait for flows to be added to ONOS
+ time.sleep(1)
+ self.success = False
+ def mac_recv_task():
+ def recv_cb(pkt):
+ log.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s'%(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
+ self.success = True
+ sniff(timeout=2,count=5,
+ lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.flows.port_map[egress])
+ for i in [0,1]:
+ if i == 1:
+ log.info('Restart cluster current master %s'%master)
+ Container(master_onos_name,Onos.IMAGE).restart()
+ time.sleep(45)
+ log.info('Verifying flow traffic after master restart')
+ else:
+ log.info('Verifying flow traffic before master restart')
+ t = threading.Thread(target = mac_recv_task)
+ t.start()
+ L2 = self.flows_eth
+ L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
+ pkt = L2/L3
+ log.info('Sending packets to verify if flows are correct')
+ sendp(pkt, count=50, iface = self.flows.port_map[ingress])
+ t.join()
+ assert_equal(self.success, True)
+
+ def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
+ dst_mac = self.igmp.iptomac(group)
+ eth = Ether(dst= dst_mac)
+ ip = IP(dst=group,src=source)
+ data = repr(monotonic.monotonic())
+ sendp(eth/ip/data,count=20, iface = intf)
+ pkt = (eth/ip/data)
+ log.info('multicast traffic packet %s'%pkt.show())
+
+ def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
+ log.info('verifying multicast traffic for group %s from source %s'%(group,source))
+ self.success = False
+ def recv_task():
+ def igmp_recv_cb(pkt):
+ log.info('multicast data received for group %s from source %s'%(group,source))
+ self.success = True
+ sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
+ t = threading.Thread(target = recv_task)
+ t.start()
+ self.send_multicast_data_traffic(group,source=source)
+ t.join()
+ return self.success
+
+ #pass
+ def test_cluster_with_igmp_include_exclude_modes_and_restarting_master(self, onos_instances=ONOS_INSTANCES):
+ status = self.verify_cluster_status(onos_instances=onos_instances)
+ assert_equal(status, True)
+ master, standbys = self.get_cluster_current_master_standbys()
+ assert_equal(len(standbys), (onos_instances-1))
+ onos_names_ips = self.get_cluster_container_names_ips()
+ master_onos_name = onos_names_ips[master]
+ self.igmp.setUp(controller=master)
+ groups = ['224.2.3.4','230.5.6.7']
+ src_list = ['2.2.2.2','3.3.3.3']
+ self.igmp.onos_ssm_table_load(groups, src_list=src_list, controller=master)
+ self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
+ iface = self.V_INF1, delay = 2)
+ self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
+ iface = self.V_INF1, delay = 2)
+ status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
+ assert_equal(status,True)
+ status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
+ assert_equal(status,False)
+ log.info('restarting cluster master %s'%master)
+ Container(master_onos_name,Onos.IMAGE).restart()
+ time.sleep(60)
+ log.info('verifying multicast data traffic after master restart')
+ status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
+ assert_equal(status,True)
+ status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
+ assert_equal(status,False)
+
+ #pass
+ def test_cluster_with_igmp_include_exclude_modes_and_making_master_down(self, onos_instances=ONOS_INSTANCES):
+ status = self.verify_cluster_status(onos_instances=onos_instances)
+ assert_equal(status, True)
+ master, standbys = self.get_cluster_current_master_standbys()
+ assert_equal(len(standbys), (onos_instances-1))
+ onos_names_ips = self.get_cluster_container_names_ips()
+ master_onos_name = onos_names_ips[master]
+ self.igmp.setUp(controller=master)
+ groups = [self.igmp.random_mcast_ip(),self.igmp.random_mcast_ip()]
+ src_list = [self.igmp.randomsourceip()]
+ self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
+ self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
+ iface = self.V_INF1, delay = 2)
+ self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
+ iface = self.V_INF1, delay = 2)
+ status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
+ assert_equal(status,True)
+ status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
+ assert_equal(status,False)
+ log.info('Killing cluster master %s'%master)
+ Container(master_onos_name,Onos.IMAGE).kill()
+ time.sleep(60)
+ status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=standbys[0])
+ assert_equal(status, True)
+ log.info('Verifying multicast data traffic after cluster master down')
+ status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
+ assert_equal(status,True)
+ status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
+ assert_equal(status,False)
+
+ def test_cluster_with_igmp_include_mode_checking_traffic_recovery_time_after_master_is_down(self, onos_instances=ONOS_INSTANCES):
+ status = self.verify_cluster_status(onos_instances=onos_instances)
+ assert_equal(status, True)
+ master, standbys = self.get_cluster_current_master_standbys()
+ assert_equal(len(standbys), (onos_instances-1))
+ onos_names_ips = self.get_cluster_container_names_ips()
+ master_onos_name = onos_names_ips[master]
+ self.igmp.setUp(controller=master)
+ groups = [self.igmp.random_mcast_ip()]
+ src_list = [self.igmp.randomsourceip()]
+ self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
+ self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
+ iface = self.V_INF1, delay = 2)
+ status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
+ assert_equal(status,True)
+ log.info('Killing clusters master %s'%master)
+ Container(master_onos_name,Onos.IMAGE).kill()
+ count = 0
+ for i in range(60):
+ log.info('Verifying multicast data traffic after cluster master down')
+ status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
+ if status:
+ break
+ else:
+ count += 1
+ time.sleep(1)
+ assert_equal(status, True)
+ log.info('Time taken to recover traffic after clusters master down is %d seconds'%count)
+
+
+ #pass
+ def test_cluster_state_with_igmp_leave_group_after_master_change(self, onos_instances=ONOS_INSTANCES):
+ status = self.verify_cluster_status(onos_instances=onos_instances)
+ assert_equal(status, True)
+ master, standbys = self.get_cluster_current_master_standbys()
+ assert_equal(len(standbys), (onos_instances-1))
+ self.igmp.setUp(controller=master)
+ groups = [self.igmp.random_mcast_ip()]
+ src_list = [self.igmp.randomsourceip()]
+ self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
+ self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
+ iface = self.V_INF1, delay = 2)
+ status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
+ assert_equal(status,True)
+ log.info('Changing cluster master %s to %s'%(master,standbys[0]))
+ self.change_cluster_current_master(new_master=standbys[0])
+ log.info('Verifying multicast traffic after cluster master change')
+ status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
+ assert_equal(status,True)
+ log.info('Sending igmp TO_EXCLUDE message to leave the group %s'%groups[0])
+ self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
+ iface = self.V_INF1, delay = 1)
+ time.sleep(10)
+ status = self.verify_igmp_data_traffic(groups[0],intf = self.V_INF1,source= src_list[0])
+ assert_equal(status,False)
+
+ #pass
+ def test_cluster_state_with_igmp_join_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
+ status = self.verify_cluster_status(onos_instances=onos_instances)
+ assert_equal(status, True)
+ master,standbys = self.get_cluster_current_master_standbys()
+ assert_equal(len(standbys), (onos_instances-1))
+ self.igmp.setUp(controller=master)
+ groups = [self.igmp.random_mcast_ip()]
+ src_list = [self.igmp.randomsourceip()]
+ self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
+ log.info('Changing cluster master %s to %s'%(master,standbys[0]))
+ self.change_cluster_current_master(new_master = standbys[0])
+ self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
+ iface = self.V_INF1, delay = 2)
+ time.sleep(1)
+ self.change_cluster_current_master(new_master = master)
+ status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
+ assert_equal(status,True)
+
+ #pass
@deferred(TLS_TIMEOUT)
- def test_onos_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
+ def test_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
@@ -635,7 +925,7 @@
return df
@deferred(120)
- def test_onos_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
+ def test_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys), (onos_instances-1))
self.tls.setUp()
@@ -655,7 +945,7 @@
return df
@deferred(TLS_TIMEOUT)
- def test_onos_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
+ def test_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
@@ -681,7 +971,7 @@
return df
@deferred(TLS_TIMEOUT)
- def test_onos_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
+ def test_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
@@ -709,14 +999,204 @@
reactor.callLater(0, eap_tls_no_cert, df)
return df
-###### Dhcp Relay Test cases ######################################
+ #pass
+ def test_cluster_proxyarp_master_change_and_app_deactivation(self,onos_instances=ONOS_INSTANCES,hosts = 3):
+ status = self.verify_cluster_status(onos_instances=onos_instances)
+ assert_equal(status,True)
+ master,standbys = self.get_cluster_current_master_standbys()
+ assert_equal(len(standbys),(onos_instances-1))
+ self.proxyarp.setUpClass()
+ ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
+ ingress = hosts+1
+ for hostip, hostmac in hosts_config:
+ self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
+ time.sleep(1)
+ log.info('changing cluster current master from %s to %s'%(master,standbys[0]))
+ self.change_cluster_current_master(new_master=standbys[0])
+ log.info('verifying proxyarp after master change')
+ for hostip, hostmac in hosts_config:
+ self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
+ time.sleep(1)
+ log.info('Deactivating proxyarp app and expecting proxyarp functionality not to work')
+ self.proxyarp.proxyarp_activate(deactivate = True,controller=standbys[0])
+ time.sleep(3)
+ for hostip, hostmac in hosts_config:
+ self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
+ time.sleep(1)
+ log.info('activating proxyarp app and expecting to get arp reply from ONOS')
+ self.proxyarp.proxyarp_activate(deactivate = False,controller=standbys[0])
+ time.sleep(3)
+ for hostip, hostmac in hosts_config:
+ self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
+ time.sleep(1)
- def test_onos_cluster_with_dhcpRelay_app_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
+ #pass
+ def test_cluster_with_proxyarp_and_one_member_down(self,hosts=3,onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
+ assert_equal(len(standbys), (onos_instances-1))
+ onos_names_ips = self.get_cluster_container_names_ips()
+ member_onos_name = onos_names_ips[standbys[1]]
+ self.proxyarp.setUpClass()
+ ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
+ ingress = hosts+1
+ for hostip, hostmac in hosts_config:
+ self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
+ time.sleep(1)
+ log.info('killing cluster member %s'%standbys[1])
+ Container(member_onos_name,Onos.IMAGE).kill()
+ time.sleep(20)
+ status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=master,verify=True)
+ assert_equal(status, True)
+ log.info('cluster came up with %d instances after member down'%(onos_instances-1))
+ log.info('verifying proxy arp functionality after cluster member down')
+ for hostip, hostmac in hosts_config:
+ self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
+ time.sleep(1)
+
+ #pass
+ def test_cluster_with_proxyarp_and_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts=10,onos_instances=ONOS_INSTANCES):
+ status = self.verify_cluster_status(onos_instances=onos_instances)
+ assert_equal(status, True)
+ self.proxyarp.setUpClass()
+ master, standbys = self.get_cluster_current_master_standbys()
+ assert_equal(len(standbys), (onos_instances-1))
+ ports_map, egress_map, hosts_config = self.proxyarp.proxyarp_config(hosts = hosts, controller=master)
+ self.success = True
+ ingress = hosts+1
+ ports = range(ingress,ingress+10)
+ hostmac = []
+ hostip = []
+ for ip,mac in hosts_config:
+ hostmac.append(mac)
+ hostip.append(ip)
+ success_dir = {}
+ def verify_proxyarp(*r):
+ ingress, hostmac, hostip = r[0],r[1],r[2]
+ def mac_recv_task():
+ def recv_cb(pkt):
+ log.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
+ success_dir[current_thread().name] = True
+ sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
+ prn = recv_cb, iface = self.proxyarp.port_map[ingress])
+ t = threading.Thread(target = mac_recv_task)
+ t.start()
+ pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
+ log.info('Sending arp request for dest ip %s on interface %s' %
+ (hostip,self.proxyarp.port_map[ingress]))
+ sendp(pkt, count = 10,iface = self.proxyarp.port_map[ingress])
+ t.join()
+ t = []
+ for i in range(10):
+ t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
+ for i in range(10):
+ t[i].start()
+ time.sleep(2)
+ for i in range(10):
+ t[i].join()
+ if len(success_dir) != 10:
+ self.success = False
+ assert_equal(self.success, True)
+
+ #pass
+ def test_cluster_with_acl_rule_before_master_change_and_remove_acl_rule_after_master_change(self,onos_instances=ONOS_INSTANCES):
+ status = self.verify_cluster_status(onos_instances=onos_instances)
+ assert_equal(status, True)
+ master,standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
- self.dhcprelay.setUpClass()
+ self.acl.setUp()
+ acl_rule = ACLTest()
+ status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
+ if status is False:
+ log.info('JSON request returned status %d' %code)
+ assert_equal(status, True)
+ result = acl_rule.get_acl_rules(controller=master)
+ aclRules1 = result.json()['aclRules']
+ log.info('Added acl rules is %s'%aclRules1)
+ acl_Id = map(lambda d: d['id'], aclRules1)
+ log.info('Changing cluster current master from %s to %s'%(master,standbys[0]))
+ self.change_cluster_current_master(new_master=standbys[0])
+ status,code = acl_rule.remove_acl_rule(acl_Id[0],controller=standbys[0])
+ if status is False:
+ log.info('JSON request returned status %d' %code)
+ assert_equal(status, True)
+
+ #pass
+ def test_cluster_verifying_acl_rule_in_new_master_after_current_master_is_down(self,onos_instances=ONOS_INSTANCES):
+ status = self.verify_cluster_status(onos_instances=onos_instances)
+ assert_equal(status, True)
+ master,standbys = self.get_cluster_current_master_standbys()
+ assert_equal(len(standbys),(onos_instances-1))
+ onos_names_ips = self.get_cluster_container_names_ips()
+ master_onos_name = onos_names_ips[master]
+ self.acl.setUp()
+ acl_rule = ACLTest()
+ status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
+ if status is False:
+ log.info('JSON request returned status %d' %code)
+ assert_equal(status, True)
+ result1 = acl_rule.get_acl_rules(controller=master)
+ aclRules1 = result1.json()['aclRules']
+ log.info('Added acl rules is %s'%aclRules1)
+ acl_Id1 = map(lambda d: d['id'], aclRules1)
+ log.info('Killing cluster current master %s'%master)
+ Container(master_onos_name,Onos.IMAGE).kill()
+ time.sleep(45)
+ status = self.verify_cluster_status(onos_instances=onos_instances,controller=standbys[0])
+ assert_equal(status, True)
+ new_master,standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
+ assert_equal(len(standbys),(onos_instances-2))
+ assert_not_equal(new_master,master)
+ result2 = acl_rule.get_acl_rules(controller=new_master)
+ aclRules2 = result2.json()['aclRules']
+ acl_Id2 = map(lambda d: d['id'], aclRules2)
+ log.info('Acl Ids before and after master down are %s and %s'%(acl_Id1,acl_Id2))
+ assert_equal(acl_Id2,acl_Id1)
+
+ #acl traffic scenario not working as acl rule is not getting added to onos
+ def test_cluster_with_acl_traffic_before_and_after_two_members_down(self,onos_instances=ONOS_INSTANCES):
+ status = self.verify_cluster_status(onos_instances=onos_instances)
+ assert_equal(status, True)
+ master,standbys = self.get_cluster_current_master_standbys()
+ assert_equal(len(standbys),(onos_instances-1))
+ onos_names_ips = self.get_cluster_container_names_ips()
+ member1_onos_name = onos_names_ips[standbys[0]]
+ member2_onos_name = onos_names_ips[standbys[1]]
+ ingress = self.acl.ingress_iface
+ egress = self.acl.CURRENT_PORT_NUM
+ acl_rule = ACLTest()
+ status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.acl.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.acl.HOST_DST_IP)
+ self.acl.CURRENT_PORT_NUM += 1
+ time.sleep(5)
+ if status is False:
+ log.info('JSON request returned status %d' %code)
+ assert_equal(status, True)
+ srcMac = '00:00:00:00:00:11'
+ dstMac = host_ip_mac[0][1]
+ self.acl.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1, egress_iface_num = egress )
+ status, code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'deny',controller=master)
+ time.sleep(10)
+ if status is False:
+ log.info('JSON request returned status %d' %code)
+ assert_equal(status, True)
+ self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
+ log.info('killing cluster members %s and %s'%(standbys[0],standbys[1]))
+ Container(member1_onos_name, Onos.IMAGE).kill()
+ Container(member2_onos_name, Onos.IMAGE).kill()
+ time.sleep(40)
+ status = self.verify_cluster_status(onos_instances=onos_instances-2,verify=True,controller=master)
+ assert_equal(status, True)
+ self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
+ self.acl.acl_hosts_remove(egress_iface_count = 1, egress_iface_num = egress)
+
+ #pass
+ def test_cluster_with_dhcpRelay_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
+ status = self.verify_cluster_status(onos_instances=onos_instances)
+ assert_equal(status, True)
+ master,standbys = self.get_cluster_current_master_standbys()
+ assert_equal(len(standbys),(onos_instances-1))
+ self.dhcprelay.setUpClass(controller=master)
mac = self.dhcprelay.get_mac(iface)
self.dhcprelay.host_load(iface)
##we use the defaults for this test that serves as an example for others
@@ -728,7 +1208,8 @@
self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
config = config,
options = options,
- subnet = subnet)
+ subnet = subnet,
+ controller=master)
self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
cip, sip = self.dhcprelay.send_recv(mac)
log.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
@@ -741,14 +1222,51 @@
assert_equal(cip, cip2)
log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
assert_equal(self.dhcprelay.dhcp.release(cip2), True)
- self.dhcprelay.tearDownClass()
+ self.dhcprelay.tearDownClass(controller=standbys[0])
- def test_onos_cluster_with_dhcpRelay_app_simulating_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
- #status = self.verify_cluster_status(onos_instances=onos_instances)
- #assert_equal(status, True)
- master, standbys = self.get_cluster_current_master_standbys()
+
+ def test_cluster_with_dhcpRelay_and_verify_dhcp_ip_after_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
+ status = self.verify_cluster_status(onos_instances=onos_instances)
+ assert_equal(status, True)
+ master,standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
- self.dhcprelay.setUpClass()
+ onos_names_ips = self.get_cluster_container_names_ips()
+ master_onos_name = onos_names_ips[master]
+ self.dhcprelay.setUpClass(controller=master)
+ mac = self.dhcprelay.get_mac(iface)
+ self.dhcprelay.host_load(iface)
+ ##we use the defaults for this test that serves as an example for others
+ ##You don't need to restart dhcpd server if retaining default config
+ config = self.dhcprelay.default_config
+ options = self.dhcprelay.default_options
+ subnet = self.dhcprelay.default_subnet_config
+ dhcpd_interface_list = self.dhcprelay.relay_interfaces
+ self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
+ config = config,
+ options = options,
+ subnet = subnet,
+ controller=master)
+ self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
+ log.info('Initiating dhcp process from client %s'%mac)
+ cip, sip = self.dhcprelay.send_recv(mac)
+ log.info('Killing cluster current master %s'%master)
+ Container(master_onos_name, Onos.IMAGE).kill()
+ time.sleep(60)
+ status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=standbys[0])
+ assert_equal(status, True)
+ mac = self.dhcprelay.dhcp.get_mac(cip)[0]
+ log.info("Verifying dhcp clients gets same IP after cluster master restarts")
+ new_cip, new_sip = self.dhcprelay.dhcp.only_request(cip, mac)
+ assert_equal(new_cip, cip)
+ self.dhcprelay.tearDownClass(controller=standbys[0])
+
+ #pass
+ def test_cluster_with_dhcpRelay_and_simulate_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
+ status = self.verify_cluster_status(onos_instances=onos_instances)
+ assert_equal(status, True)
+ master,standbys = self.get_cluster_current_master_standbys()
+ assert_equal(len(standbys),(onos_instances-1))
+ self.dhcprelay.setUpClass(controller=master)
macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3']
self.dhcprelay.host_load(iface)
##we use the defaults for this test that serves as an example for others
@@ -760,7 +1278,8 @@
self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
config = config,
options = options,
- subnet = subnet)
+ subnet = subnet,
+ controller=master)
self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip1, sip1 = self.dhcprelay.send_recv(macs[0])
assert_not_equal(cip1,None)
@@ -775,16 +1294,12 @@
cip3, sip3 = self.dhcprelay.send_recv(macs[2])
assert_not_equal(cip3,None)
log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master))
- self.dhcprelay.tearDownClass()
+ self.dhcprelay.tearDownClass(controller=standbys[0])
-
-############ Cord Subscriber Test cases ##################
-
- def test_onos_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
+ def test_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
- """Test subscriber join next for channel surfing"""
- self.subscriber.setUpClass()
+ self.subscriber.setUpClass(controller=master)
self.subscriber.num_subscribers = 5
self.subscriber.num_channels = 10
for i in [0,1]:
@@ -803,23 +1318,50 @@
port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
self.subscriber.num_channels))
assert_equal(test_status, True)
- self.subscriber.tearDownClass()
+ self.subscriber.tearDownClass(controller=master)
- def test_onos_cluster_with_cord_subscriber_joining_10channels_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
+ #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
+ def test_cluster_with_cord_subscriber_join_next_channel_before_and_after_cluster_mastership_is_withdrawn(self,onos_instances=ONOS_INSTANCES):
+ status = self.verify_cluster_status(onos_instances=onos_instances)
+ assert_equal(status, True)
+ master,standbys = self.get_cluster_current_master_standbys()
+ assert_equal(len(standbys),(onos_instances-1))
+ self.subscriber.setUpClass(controller=master)
+ self.subscriber.num_subscribers = 5
+ self.subscriber.num_channels = 10
+ for i in [0,1]:
+ if i == 1:
+ status=self.withdraw_cluster_current_mastership(master_ip=master)
+ asser_equal(status, True)
+ master,standbys = self.get_cluster_current_master_standbys()
+ log.info('verifying cord subscriber functionality after cluster current master withdraw mastership')
+ else:
+ log.info('verifying cord subscriber functionality before cluster master withdraw mastership')
+ test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
+ num_channels = self.subscriber.num_channels,
+ cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
+ self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
+ port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
+ self.subscriber.num_channels),controller=master)
+ assert_equal(test_status, True)
+ self.subscriber.tearDownClass(controller=master)
+
+ #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
+ def test_cluster_with_cord_subscriber_join_recv_traffic_from_10channels_and_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
onos_names_ips = self.get_cluster_container_names_ips()
member_onos_name = onos_names_ips[standbys[0]]
- self.subscriber.setUpClass()
+ self.subscriber.setUpClass(controller=master)
num_subscribers = 1
num_channels = 10
for i in [0,1]:
if i == 1:
cord_test_onos_shutdown(node = member_onos_name)
time.sleep(30)
- status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True)
+ status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
assert_equal(status, True)
log.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0])
else:
@@ -829,11 +1371,11 @@
cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify,
self.subscriber.igmp_verify, self.subscriber.traffic_verify),
port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
- negative_subscriber_auth = 'all')
+ negative_subscriber_auth = 'all',controller=master)
assert_equal(test_status, True)
- self.subscriber.tearDownClass()
+ self.subscriber.tearDownClass(controller=master)
- def test_onos_cluster_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
+ def test_cluster_with_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
@@ -841,7 +1383,7 @@
onos_names_ips = self.get_cluster_container_names_ips()
member1_onos_name = onos_names_ips[standbys[0]]
member2_onos_name = onos_names_ips[standbys[1]]
- self.subscriber.setUpClass()
+ self.subscriber.setUpClass(controller=master)
num_subscribers = 1
num_channels = 10
for i in [0,1]:
@@ -861,5 +1403,135 @@
port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
negative_subscriber_auth = 'all')
assert_equal(test_status, True)
- self.subscriber.tearDownClass()
+ self.subscriber.tearDownClass(controller=master)
+
+ #pass
+ def test_cluster_with_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
+ status = self.verify_cluster_status(onos_instances=onos_instances)
+ assert_equal(status, True)
+ device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
+ for device in device_dict.keys():
+ log.info("Device is %s"%device_dict[device])
+ assert_not_equal(device_dict[device]['master'],'none')
+ log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
+ assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
+
+ #pass
+ def test_cluster_state_in_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
+ status = self.verify_cluster_status(onos_instances=onos_instances)
+ assert_equal(status, True)
+ device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
+ cluster_ips = self.get_cluster_current_member_ips()
+ for ip in cluster_ips:
+ device_dict= self.get_cluster_current_master_standbys_of_connected_devices(controller = ip)
+ assert_equal(len(device_dict.keys()),onos_instances)
+ for device in device_dict.keys():
+ log.info("Device is %s"%device_dict[device])
+ assert_not_equal(device_dict[device]['master'],'none')
+ log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
+ assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
+
+ #pass
+ def test_cluster_verifying_multiple_ovs_switches_after_master_is_restarted(self,onos_instances = ONOS_INSTANCES):
+ status = self.verify_cluster_status(onos_instances=onos_instances)
+ assert_equal(status, True)
+ onos_names_ips = self.get_cluster_container_names_ips()
+ master_count = self.get_number_of_devices_of_master()
+ log.info('Master count information is %s'%master_count)
+ total_devices = 0
+ for master in master_count.keys():
+ total_devices += master_count[master]['size']
+ if master_count[master]['size'] != 0:
+ restart_ip = master
+ assert_equal(total_devices,onos_instances)
+ member_onos_name = onos_names_ips[restart_ip]
+ log.info('Restarting cluster member %s having ip %s'%(member_onos_name,restart_ip))
+ Container(member_onos_name, Onos.IMAGE).restart()
+ time.sleep(40)
+ master_count = self.get_number_of_devices_of_master()
+ log.info('Master count information after restart is %s'%master_count)
+ total_devices = 0
+ for master in master_count.keys():
+ total_devices += master_count[master]['size']
+ if master == restart_ip:
+ assert_equal(master_count[master]['size'], 0)
+ assert_equal(total_devices,onos_instances)
+
+ #pass
+ def test_cluster_verifying_multiple_ovs_switches_with_one_master_down(self,onos_instances = ONOS_INSTANCES):
+ status = self.verify_cluster_status(onos_instances=onos_instances)
+ assert_equal(status, True)
+ onos_names_ips = self.get_cluster_container_names_ips()
+ master_count = self.get_number_of_devices_of_master()
+ log.info('Master count information is %s'%master_count)
+ total_devices = 0
+ for master in master_count.keys():
+ total_devices += master_count[master]['size']
+ if master_count[master]['size'] != 0:
+ restart_ip = master
+ assert_equal(total_devices,onos_instances)
+ master_onos_name = onos_names_ips[restart_ip]
+ log.info('Shutting down cluster member %s having ip %s'%(master_onos_name,restart_ip))
+ Container(master_onos_name, Onos.IMAGE).kill()
+ time.sleep(40)
+ for ip in onos_names_ips.keys():
+ if ip != restart_ip:
+ controller_ip = ip
+ status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=controller_ip)
+ assert_equal(status, True)
+ master_count = self.get_number_of_devices_of_master(controller=controller_ip)
+ log.info('Master count information after restart is %s'%master_count)
+ total_devices = 0
+ for master in master_count.keys():
+ total_devices += master_count[master]['size']
+ if master == restart_ip:
+ assert_equal(master_count[master]['size'], 0)
+ assert_equal(total_devices,onos_instances)
+
+ #pass
+ def test_cluster_verifying_multiple_ovs_switches_with_current_master_withdrawing_mastership(self,onos_instances = ONOS_INSTANCES):
+ status = self.verify_cluster_status(onos_instances=onos_instances)
+ assert_equal(status, True)
+ master_count = self.get_number_of_devices_of_master()
+ log.info('Master count information is %s'%master_count)
+ total_devices = 0
+ for master in master_count.keys():
+ total_devices += int(master_count[master]['size'])
+ if master_count[master]['size'] != 0:
+ master_ip = master
+ log.info('Devices of master %s are %s'%(master_count[master]['devices'],master))
+ device_id = str(master_count[master]['devices'][0])
+ device_count = master_count[master]['size']
+ assert_equal(total_devices,onos_instances)
+ log.info('Withdrawing mastership of device %s for controller %s'%(device_id,master_ip))
+ status=self.withdraw_cluster_current_mastership(master_ip=master_ip,device_id = device_id)
+ assert_equal(status, True)
+ master_count = self.get_number_of_devices_of_master()
+ log.info('Master count information after cluster mastership withdraw is %s'%master_count)
+ total_devices = 0
+ for master in master_count.keys():
+ total_devices += int(master_count[master]['size'])
+ if master == master_ip:
+ assert_equal(master_count[master]['size'], device_count-1)
+ assert_equal(total_devices,onos_instances)
+
+ #pass
+ def test_cluster_verifying_multiple_ovs_switches_and_restarting_cluster(self,onos_instances = ONOS_INSTANCES):
+ status = self.verify_cluster_status(onos_instances=onos_instances)
+ assert_equal(status, True)
+ master_count = self.get_number_of_devices_of_master()
+ log.info('Master count information is %s'%master_count)
+ total_devices = 0
+ for master in master_count.keys():
+ total_devices += master_count[master]['size']
+ assert_equal(total_devices,onos_instances)
+ log.info('Restarting cluster')
+ cord_test_onos_restart()
+ time.sleep(60)
+ master_count = self.get_number_of_devices_of_master()
+ log.info('Master count information after restart is %s'%master_count)
+ total_devices = 0
+ for master in master_count.keys():
+ total_devices += master_count[master]['size']
+ assert_equal(total_devices,onos_instances)
diff --git a/src/test/utils/ACL.py b/src/test/utils/ACL.py
index 74a9e52..55333c4 100644
--- a/src/test/utils/ACL.py
+++ b/src/test/utils/ACL.py
@@ -19,6 +19,7 @@
from scapy.all import *
from OnosCtrl import OnosCtrl, get_mac, get_controller
from OnosFlowCtrl import OnosFlowCtrl
+log.setLevel('INFO')
conf.verb = 0 # Disable Scapy verbosity
conf.checkIPaddr = 0 # Don't check response packets for matching destination IPs
@@ -49,7 +50,7 @@
self.iface_ip = iface_ip
self.device_id = OnosCtrl.get_device_id()
- def adding_acl_rule(self, ipv4Prefix, srcIp, dstIp, ipProto ='null', dstTpPort='null', action= 'include'):
+ def adding_acl_rule(self, ipv4Prefix, srcIp, dstIp, ipProto ='null', dstTpPort='null', action= 'include',controller=None):
'''This function is generating ACL json file and post to ONOS for creating a ACL rule'''
if ipv4Prefix is 'v4':
acl_dict = {}
@@ -62,21 +63,39 @@
if dstTpPort is not 'null':
acl_dict['dstTpPort'] = '{}'.format(dstTpPort)
json_data = json.dumps(acl_dict)
- resp = requests.post(self.add_acl_rule_url, auth = self.auth, data = json_data)
+ if controller is None:
+ # if controller ip is not passed, it will default controller ip
+ resp = requests.post(self.add_acl_rule_url, auth = self.auth, data = json_data)
+ else:
+ add_acl_rule_url = 'http://%s:8181/onos/v1/acl/rules' %(controller)
+ log.info('add_acl_rule_acl url is %s'%add_acl_rule_url)
+ resp = requests.post(add_acl_rule_url, auth = self.auth, data = json_data)
return resp.ok, resp.status_code
- def get_acl_rules(self):
+ def get_acl_rules(self,controller=None):
'''This function is getting a ACL rules from ONOS with json formate'''
- resp = requests.get(self.add_acl_rule_url, auth = self.auth)
+ if controller is None:
+ resp = requests.get(self.add_acl_rule_url, auth = self.auth)
+ else:
+ add_acl_rule_url = 'http://%s:8181/onos/v1/acl/rules' %(controller)
+ log.info('get_acl_rule_url is %s'%add_acl_rule_url)
+ resp = requests.get(add_acl_rule_url, auth = self.auth)
return resp
@classmethod
- def remove_acl_rule(cls,id = None):
+ def remove_acl_rule(cls,id = None,controller=None):
'''This function is delete one or all ACL rules in ONOS'''
if id is None:
- remove_acl_rule_url = 'http://%s:8181/onos/v1/acl/rules' %(cls.controller)
+ if controller is None:
+ remove_acl_rule_url = 'http://%s:8181/onos/v1/acl/rules' %(cls.controller)
+ else:
+ remove_acl_rule_url = 'http://%s:8181/onos/v1/acl/rules' %(controller)
else:
- remove_acl_rule_url = 'http://%s:8181/onos/v1/acl/rules/%s' %(cls.controller, id)
+ if controller is None:
+ remove_acl_rule_url = 'http://%s:8181/onos/v1/acl/rules/%s' %(cls.controller, id)
+ else:
+ remove_acl_rule_url = 'http://%s:8181/onos/v1/acl/rules/%s' %(controller, id)
+ log.info('remove_acl_rule_url is %s'%remove_acl_rule_url)
resp = requests.delete(remove_acl_rule_url, auth = cls.auth)
return resp.ok, resp.status_code
diff --git a/src/test/utils/Channels.py b/src/test/utils/Channels.py
index f0f5960..71cda6a 100644
--- a/src/test/utils/Channels.py
+++ b/src/test/utils/Channels.py
@@ -38,12 +38,13 @@
igmp_ip = IP(dst = IP_DST, src = IP_SRC)
ssm_list = []
- def __init__(self, iface = 'veth0', ssm_list = [], src_list = ['1.2.3.4'], delay = 2):
+ def __init__(self, iface = 'veth0', ssm_list = [], src_list = ['1.2.3.4'], delay = 2,controller=None):
+ self.controller=controller
self.iface = iface
self.ssm_list += ssm_list
self.src_list = src_list
self.delay = delay
- self.onos_ctrl = OnosCtrl('org.onosproject.igmp')
+ self.onos_ctrl = OnosCtrl('org.onosproject.igmp',controller=self.controller)
self.onos_ctrl.activate()
def igmp_load_ssm_config(self, ssm_list = []):
@@ -80,7 +81,7 @@
time.sleep(self.delay)
def onos_load_config(self, config):
- status, code = OnosCtrl.config(config)
+ status, code = OnosCtrl.config(config,controller=self.controller)
if status is False:
log.info('JSON config request returned status %d' %code)
time.sleep(2)
diff --git a/src/test/utils/Cluster.py b/src/test/utils/Cluster.py
index bb17d03..f361d4d 100644
--- a/src/test/utils/Cluster.py
+++ b/src/test/utils/Cluster.py
@@ -33,7 +33,6 @@
import collections
import requests
log.setLevel('INFO')
-
class cluster_igmp(object):
V_INF1 = 'veth0'
V_INF2 = 'veth1'
@@ -63,33 +62,33 @@
ROVER_JOIN_TIMEOUT = 60
@classmethod
- def setUpClass(cls):
+ def setUpClass(cls,controller=None):
cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
cls.port_map, _ = cls.olt.olt_port_map()
- OnosCtrl.cord_olt_config(cls.olt.olt_device_data())
+ OnosCtrl.cord_olt_config(cls.olt.olt_device_data(),controller=controller)
@classmethod
def tearDownClass(cls): pass
- def setUp(self):
- self.setUpClass()
+ def setUp(self,controller=None):
+ self.setUpClass(controller=controller)
self.get_igmp_intf()
''' Activate the igmp app'''
- self.onos_ctrl = OnosCtrl(self.app)
+ self.onos_ctrl = OnosCtrl(self.app,controller=controller)
self.onos_ctrl.activate()
- self.igmp_channel = IgmpChannel()
+ self.igmp_channel = IgmpChannel(controller=controller)
def tearDown(self): pass
- def onos_load_config(self, config):
+ def onos_load_config(self, config,controller=None):
log.info('onos load config is %s'%config)
- status, code = OnosCtrl.config(config)
+ status, code = OnosCtrl(self.app).config(config,controller=controller)
if status is False:
log.info('JSON request returned status %d' %code)
assert_equal(status, True)
time.sleep(2)
- def onos_ssm_table_load(self, groups, src_list = ['1.2.3.4'],flag = False):
+ def onos_ssm_table_load(self, groups, src_list = ['1.2.3.4'],controller=None,flag = False):
ssm_dict = {'apps' : { 'org.onosproject.igmp' : { 'ssmTranslate' : [] } } }
ssm_xlate_list = ssm_dict['apps']['org.onosproject.igmp']['ssmTranslate']
if flag: #to maintain seperate group-source pair.
@@ -105,7 +104,7 @@
d['source'] = s or '0.0.0.0'
d['group'] = g
ssm_xlate_list.append(d)
- self.onos_load_config(ssm_dict)
+ self.onos_load_config(ssm_dict,controller=controller)
cord_port_map = {}
for g in groups:
cord_port_map[g] = (self.PORT_TX_DEFAULT, self.PORT_RX_DEFAULT)
@@ -294,22 +293,24 @@
-----END CERTIFICATE-----'''
def __init__(self):
pass
- def setUp(self):
- self.onos_ctrl = OnosCtrl(self.eap_app)
- self.onos_aaa_config()
+ def setUp(self,controller=None):
+ self.onos_ctrl = OnosCtrl(self.eap_app,controller=controller)
+ self.onos_aaa_config(controller=controller)
- def onos_aaa_config(self):
+ def onos_aaa_config(self,controller=None):
+ log.info('controller in onos_aaa_config is %s'%controller)
aaa_dict = {'apps' : { 'org.onosproject.aaa' : { 'AAA' : { 'radiusSecret': 'radius_password',
'radiusIp': '172.17.0.2' } } } }
radius_ip = os.getenv('ONOS_AAA_IP') or '172.17.0.2'
aaa_dict['apps']['org.onosproject.aaa']['AAA']['radiusIp'] = radius_ip
self.onos_ctrl.activate()
time.sleep(2)
- self.onos_load_config(aaa_dict)
+ self.onos_load_config(aaa_dict,controller=controller)
- def onos_load_config(self, config):
+ def onos_load_config(self, config,controller=None):
+ log.info('controller in onos_load_config is %s'%controller)
log.info('onos load config is %s'%config)
- status, code = OnosCtrl.config(config)
+ status, code = OnosCtrl(self.eap_app).config(config,controller=controller)
if status is False:
log.info('JSON request returned status %d' %code)
assert_equal(status, True)
@@ -421,7 +422,8 @@
cls.device_id = OnosCtrl.get_device_id()
class cluster_proxyarp():
- apps = ('org.onosproject.vrouter','org.onosproject.proxyarp')
+ #apps = ('org.onosproject.vrouter','org.onosproject.proxyarp')
+ app = 'org.onosproject.proxyarp'
device_id = 'of:' + get_mac()
device_dict = { "devices" : {
"{}".format(device_id) : {
@@ -489,14 +491,14 @@
else:
config = interface_cfg
cfg = json.dumps(config)
- with open('{}/network-cfg.json'.format(cls.onos_config_path), 'w') as f:
- f.write(cfg)
- return cord_test_onos_restart(config=config)
+ #with open('{}/network-cfg.json'.format(cls.onos_config_path), 'w') as f:
+ # f.write(cfg)
+ #return cord_test_onos_restart(config=config)
@classmethod
- def host_config_load(cls, host_config = None):
+ def host_config_load(cls, host_config = None, controller=None):
for host in host_config:
- status, code = OnosCtrl.host_config(host)
+ status, code = OnosCtrl(cls.app).host_config(host,onos_ip=controller)
if status is False:
log.info('JSON request returned status %d' %code)
assert_equal(status, True)
@@ -547,9 +549,9 @@
return hosts_dict.values()
@classmethod
- def proxyarp_activate(cls, deactivate = False):
+ def proxyarp_activate(cls, deactivate = False,controller=None):
app = 'org.onosproject.proxyarp'
- onos_ctrl = OnosCtrl(app)
+ onos_ctrl = OnosCtrl(app,controller=controller)
if deactivate is True:
onos_ctrl.deactivate()
else:
@@ -557,11 +559,11 @@
time.sleep(3)
@classmethod
- def proxyarp_config(cls, hosts = 1):
+ def proxyarp_config(cls, hosts = 1,controller=None):
proxyarp_configs = cls.generate_interface_config(hosts = hosts)
cls.interface_config_load(interface_cfg = proxyarp_configs)
hostcfg = cls.generate_host_config()
- cls.host_config_load(host_config = hostcfg)
+ cls.host_config_load(host_config = hostcfg,controller=controller)
return proxyarp_configs
def proxyarp_arpreply_verify(self, ingress, hostip, hostmac, PositiveTest=True):
@@ -672,10 +674,10 @@
},
}
- def cliEnter(self):
+ def cliEnter(self,controller=None):
retries = 0
while retries < 3:
- self.cli = OnosCliDriver(connect = True)
+ self.cli = OnosCliDriver(connect = True,controller=controller)
if self.cli.handle:
break
else:
@@ -686,8 +688,8 @@
self.cli.disconnect()
@classmethod
- def onos_load_config(cls, config):
- status, code = OnosCtrl.config(config)
+ def onos_load_config(cls, config,controller=None):
+ status, code = OnosCtrl.config(config,controller=controller)
if status is False:
log.info('JSON request returned status %d' %code)
assert_equal(status, True)
@@ -700,6 +702,25 @@
return vrouter_configs
@classmethod
+ def vrouter_host_load(cls,peer_address = None,controller=None):
+ index = 1
+ hosts_dict = {}
+ peer_info = peer_address if peer_address is not None else cls.peer_list
+ for host,_ in peer_info:
+ #iface = cls.port_map[index]
+ mac = RandMAC()._fix()
+ #port = num + 1 if num < cls.MAX_PORTS - 1 else cls.MAX_PORTS - 1
+ log.info('creating host with ip %s and mac %s'%(host,mac))
+ hosts_dict[host] = {'mac':mac, 'vlan':'none', 'ipAddresses':[host], 'location':{ 'elementId' : '{}'.format(cls.device_id), 'port': index}}
+ index += 1
+ for host in hosts_dict.values():
+ status, code = OnosCtrl.host_config(host,onos_ip=controller)
+ if status is False:
+ log.info('JSON request returned status %d' %code)
+ return False
+ return True
+
+ """@classmethod
def vrouter_host_load(cls, peer_address = None):
#cls.setUpClass()
index = 1
@@ -715,7 +736,7 @@
)
for cmd in config_cmds:
os.system(cmd)
-
+ """
@classmethod
def vrouter_host_unload(cls, peer_address = None):
index = 1
@@ -737,9 +758,12 @@
config = dict(res)
else:
config = network_cfg
+ cfg = json.dumps(config)
log.info('Restarting ONOS with new network configuration %s'%config)
- #return CordTestServer().restart_onos(config)
- return cord_test_onos_restart(config = config)
+ #return cord_test_onos_restart(config = config)
+ with open('{}/network-cfg.json'.format(cls.onos_config_path), 'w') as f:
+ f.write(cfg)
+ return cord_test_onos_restart(config=config)
@classmethod
def start_quagga(cls, networks = 4, peer_address = None, router_address = None):
@@ -841,9 +865,9 @@
return cls.zebra_conf + zebra_routes
@classmethod
- def vrouter_activate(cls, deactivate = False):
+ def vrouter_activate(cls, deactivate = False,controller=None):
app = 'org.onosproject.vrouter'
- onos_ctrl = OnosCtrl(app)
+ onos_ctrl = OnosCtrl(app,controller=controller)
if deactivate is True:
onos_ctrl.deactivate()
else:
@@ -852,11 +876,11 @@
@classmethod
def vrouter_configure(cls, networks = 4, peers = 1, peer_address = None,
- route_update = None, router_address = None, time_expire = None, adding_new_routes = None):
+ route_update = None, router_address = None, time_expire = None, adding_new_routes = None,controller=None):
vrouter_configs = cls.vrouter_config_get(networks = networks, peers = peers,
peer_address = peer_address, route_update = route_update)
cls.start_onos(network_cfg = vrouter_configs)
- cls.vrouter_host_load()
+ cls.vrouter_host_load(controller=controller)
##Start quagga
cls.start_quagga(networks = networks, peer_address = peer_address, router_address = router_address)
return vrouter_configs
@@ -920,15 +944,16 @@
def vrouter_network_verify(self, networks, peers = 1, positive_test = True,
start_network = None, start_peer_address = None, route_update = None,
invalid_peers = None, time_expire = None, unreachable_route_traffic = None,
- deactivate_activate_vrouter = None, adding_new_routes = None):
+ deactivate_activate_vrouter = None, adding_new_routes = None,controller=None):
_, ports_map, egress_map = self.vrouter_configure(networks = networks, peers = peers,
peer_address = start_peer_address,
route_update = route_update,
router_address = start_network,
time_expire = time_expire,
- adding_new_routes = adding_new_routes)
- self.cliEnter()
+ adding_new_routes = adding_new_routes,
+ controller=controller)
+ self.cliEnter(controller=controller)
##Now verify
hosts = json.loads(self.cli.hosts(jsonFormat = True))
log.info('Discovered hosts: %s' %hosts)
@@ -1000,7 +1025,7 @@
return cord_test_quagga_shell(quagga_cmd)
class cluster_acl(object):
- app = ('org.onosproject.acl')
+ app = 'org.onosproject.acl'
device_id = 'of:' + get_mac('ovsbr0')
test_path = os.path.dirname(os.path.realpath(__file__))
onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
@@ -1032,11 +1057,11 @@
@classmethod
def tearDownClass(cls):
'''Deactivate the acl app'''
- def setUp(self):
+ def setUp(self,controller=None):
self.setUpClass()
''' Activate the acl app'''
self.maxDiff = None ##for assert_equal compare outputs on failure
- self.onos_ctrl = OnosCtrl(self.app)
+ self.onos_ctrl = OnosCtrl(self.app,controller=controller)
status, _ = self.onos_ctrl.activate()
assert_equal(status, True)
time.sleep(1)
@@ -1132,8 +1157,8 @@
assert_equal(self.success, True)
@classmethod
- def onos_load_config(cls, config):
- status, code = OnosCtrl.config(config)
+ def onos_load_config(cls, config,controller=None):
+ status, code = OnosCtrl.config(config,controller=controller)
if status is False:
log.info('JSON request returned status %d' %code)
assert_equal(status, True)
@@ -1176,31 +1201,33 @@
onos_restartable = bool(int(os.getenv('ONOS_RESTART', 0)))
@classmethod
- def setUpClass(cls):
+ def setUpClass(cls,controller=None):
+ log.info('controller ip in dhcp setup def is %s'%controller)
''' Activate the dhcprelay app'''
- OnosCtrl(cls.app_dhcp).deactivate()
+ OnosCtrl(cls.app_dhcp,controller=controller).deactivate()
time.sleep(3)
- cls.onos_ctrl = OnosCtrl(cls.app)
+ cls.onos_ctrl = OnosCtrl(cls.app,controller=controller)
status, _ = cls.onos_ctrl.activate()
assert_equal(status, True)
time.sleep(3)
- cls.dhcp_relay_setup()
+ cls.dhcp_relay_setup(controller=controller)
##start dhcpd initially with default config
- cls.dhcpd_start()
+ cls.dhcpd_start(controller=controller)
@classmethod
- def tearDownClass(cls):
+ def tearDownClass(cls,controller=None):
'''Deactivate the dhcp relay app'''
try:
os.unlink('{}/dhcpd.conf'.format(cls.dhcp_data_dir))
os.unlink('{}/dhcpd.leases'.format(cls.dhcp_data_dir))
except: pass
- cls.onos_ctrl.deactivate()
+ OnosCtrl(cls.app,controller=controller).deactivate()
+ #cls.onos_ctrl.deactivate()
cls.dhcpd_stop()
- cls.dhcp_relay_cleanup()
+ #cls.dhcp_relay_cleanup()
@classmethod
- def dhcp_relay_setup(cls):
+ def dhcp_relay_setup(cls,controller=None):
did = OnosCtrl.get_device_id()
cls.relay_device_id = did
cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
@@ -1232,7 +1259,7 @@
relay_ip = cls.get_host_ip(interface_list[0][0])
relay_mac = cls.get_mac(cls.port_map[cls.relay_interface_port])
interface_list.append((cls.relay_interface_port, relay_ip, relay_mac))
- cls.onos_interface_load(interface_list)
+ cls.onos_interface_load(interface_list,controller=controller)
@classmethod
def dhcp_relay_cleanup(cls):
@@ -1242,15 +1269,16 @@
return cord_test_onos_restart(config = {})
@classmethod
- def onos_load_config(cls, config):
- status, code = OnosCtrl.config(config)
+ def onos_load_config(cls, config,controller=None):
+ log.info('loading onos config in controller %s'%controller)
+ status, code = OnosCtrl.config(config,controller=controller)
if status is False:
log.info('JSON request returned status %d' %code)
assert_equal(status, True)
time.sleep(2)
@classmethod
- def onos_interface_load(cls, interface_list):
+ def onos_interface_load(cls, interface_list,controller=None):
interface_dict = { 'ports': {} }
for port_num, ip, mac in interface_list:
port_map = interface_dict['ports']
@@ -1263,10 +1291,10 @@
}
interface_list.append(interface_map)
- cls.onos_load_config(interface_dict)
+ cls.onos_load_config(interface_dict,controller=controller)
@classmethod
- def onos_dhcp_relay_load(cls, server_ip, server_mac):
+ def onos_dhcp_relay_load(cls, server_ip, server_mac,controller=None):
relay_device_map = '{}/{}'.format(cls.relay_device_id, cls.relay_interface_port)
dhcp_dict = {'apps':{'org.onosproject.dhcp-relay':{'dhcprelay':
{'dhcpserverConnectPoint':relay_device_map,
@@ -1276,7 +1304,7 @@
}
}
}
- cls.onos_load_config(dhcp_dict)
+ cls.onos_load_config(dhcp_dict,controller=controller)
@classmethod
def get_host_ip(cls, port):
@@ -1317,7 +1345,7 @@
@classmethod
def dhcpd_start(cls, intf_list = None,
config = default_config, options = default_options,
- subnet = default_subnet_config):
+ subnet = default_subnet_config,controller=None):
'''Start the dhcpd server by generating the conf file'''
if intf_list is None:
intf_list = cls.relay_interfaces
@@ -1352,7 +1380,7 @@
time.sleep(3)
cls.relay_interfaces_last = cls.relay_interfaces
cls.relay_interfaces = intf_list
- cls.onos_dhcp_relay_load(*intf_info[0])
+ cls.onos_dhcp_relay_load(*intf_info[0],controller=controller)
@classmethod
def dhcpd_stop(cls):
@@ -1658,7 +1686,8 @@
return did
@classmethod
- def setUpClass(cls):
+ def setUpClass(cls,controller=None):
+ log.info('controller ip in cluster.py setupclass is %s'%controller)
'''Load the OLT config and activate relevant apps'''
did = cls.load_device_id()
network_cfg = { "devices" : {
@@ -1677,47 +1706,47 @@
# log.info('JSON config request for app %s returned status %d' %(app, code))
#assert_equal(status, True)
#time.sleep(2)
- cls.install_app_table()
+ cls.install_app_table(controller=controller)
cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
- OnosCtrl.cord_olt_config(cls.olt.olt_device_data())
+ OnosCtrl.cord_olt_config(cls.olt.olt_device_data(),controller=controller)
cls.port_map, cls.port_list = cls.olt.olt_port_map()
- cls.activate_apps(cls.apps + cls.olt_apps)
+ cls.activate_apps(cls.apps + cls.olt_apps,controller=controller)
@classmethod
- def tearDownClass(cls):
+ def tearDownClass(cls,controller=None):
'''Deactivate the olt apps and restart OVS back'''
apps = cls.olt_apps + ( cls.table_app,)
for app in apps:
- onos_ctrl = OnosCtrl(app)
+ onos_ctrl = OnosCtrl(app,controller=controller)
onos_ctrl.deactivate()
cls.uninstall_app_table()
- #cls.start_onos(network_cfg = {})
+ cls.start_onos(network_cfg = {})
@classmethod
- def activate_apps(cls, apps):
+ def activate_apps(cls, apps,controller=None):
for app in apps:
- onos_ctrl = OnosCtrl(app)
+ onos_ctrl = OnosCtrl(app,controller=controller)
status, _ = onos_ctrl.activate()
assert_equal(status, True)
time.sleep(2)
@classmethod
- def install_app_table(cls):
+ def install_app_table(cls,controller=None):
##Uninstall the existing app if any
- OnosCtrl.uninstall_app(cls.table_app)
+ OnosCtrl.uninstall_app(cls.table_app,onos_ip=controller)
time.sleep(2)
log.info('Installing the multi table app %s for subscriber test' %(cls.table_app_file))
- OnosCtrl.install_app(cls.table_app_file)
+ OnosCtrl.install_app(cls.table_app_file,onos_ip=controller)
time.sleep(3)
#onos_ctrl = OnosCtrl(cls.vtn_app)
#onos_ctrl.deactivate()
@classmethod
- def uninstall_app_table(cls):
+ def uninstall_app_table(cls,controller=None):
##Uninstall the table app on class exit
- OnosCtrl.uninstall_app(cls.table_app)
+ OnosCtrl.uninstall_app(cls.table_app,onos_ip=controller)
time.sleep(2)
log.info('Installing back the cord igmp app %s for subscriber test on exit' %(cls.app_file))
- OnosCtrl.install_app(cls.app_file)
+ OnosCtrl.install_app(cls.app_file,onos_ip=controller)
#onos_ctrl = OnosCtrl(cls.vtn_app)
#onos_ctrl.activate()
@@ -1737,7 +1766,7 @@
else:
config = network_cfg
log.info('Restarting ONOS with new network configuration')
- return cord_test_onos_restart(config = config)
+ #return cord_test_onos_restart(config = config)
@classmethod
def remove_onos_config(cls):
@@ -1772,27 +1801,30 @@
os.system(cmd)
except: pass
- def onos_aaa_load(self):
+ def onos_aaa_load(self,controller=None):
+ log.info('controller ip in cluster.py onos_aaa_load is %s'%controller)
if self.aaa_loaded:
return
aaa_dict = {'apps' : { 'org.onosproject.aaa' : { 'AAA' : { 'radiusSecret': 'radius_password',
'radiusIp': '172.17.0.2' } } } }
radius_ip = os.getenv('ONOS_AAA_IP') or '172.17.0.2'
aaa_dict['apps']['org.onosproject.aaa']['AAA']['radiusIp'] = radius_ip
- self.onos_load_config('org.onosproject.aaa', aaa_dict)
+ self.onos_load_config('org.onosproject.aaa', aaa_dict,controller=controller)
self.aaa_loaded = True
- def onos_dhcp_table_load(self, config = None):
+ def onos_dhcp_table_load(self, config = None,controller=None):
+ log.info('controller ip in cluster.py onos_dhcp_table_load is %s'%controller)
dhcp_dict = {'apps' : { 'org.onosproject.dhcp' : { 'dhcp' : copy.copy(self.dhcp_server_config) } } }
dhcp_config = dhcp_dict['apps']['org.onosproject.dhcp']['dhcp']
if config:
for k in config.keys():
if dhcp_config.has_key(k):
dhcp_config[k] = config[k]
- self.onos_load_config('org.onosproject.dhcp', dhcp_dict)
+ self.onos_load_config('org.onosproject.dhcp', dhcp_dict,controller=controller)
- def onos_load_config(self, app, config):
- status, code = OnosCtrl.config(config)
+ def onos_load_config(self, app, config,controller=None):
+ log.info('controller ip in cluster.py onos_load_config is %s'%controller)
+ status, code = OnosCtrl(controller=controller).config(config)
if status is False:
log.info('JSON config request for app %s returned status %d' %(app, code))
assert_equal(status, True)
@@ -1942,7 +1974,7 @@
def generate_port_list(self, subscribers, channels):
return self.port_list[:subscribers]
- def subscriber_load(self, create = True, num = 10, num_channels = 1, channel_start = 0, port_list = []):
+ def subscriber_load(self, create = True, num = 10, num_channels = 1, channel_start = 0, port_list = [],controller=None):
'''Load the subscriber from the database'''
self.subscriber_db = SubscriberDB(create = create, services = self.test_services)
if create is True:
@@ -1966,19 +1998,20 @@
index += 1
#load the ssm list for all subscriber channels
- igmpChannel = IgmpChannel()
+ igmpChannel = IgmpChannel(controller=controller)
ssm_groups = map(lambda sub: sub.channels, self.subscriber_list)
ssm_list = reduce(lambda ssm1, ssm2: ssm1+ssm2, ssm_groups)
igmpChannel.igmp_load_ssm_config(ssm_list)
def subscriber_join_verify( self, num_subscribers = 10, num_channels = 1,
- channel_start = 0, cbs = None, port_list = [], negative_subscriber_auth = None):
+ channel_start = 0, cbs = None, port_list = [], negative_subscriber_auth = None,controller=None):
+ log.info('controller ip in cluster.py subscriber_join_verify is %s'%controller)
self.test_status = False
self.ovs_cleanup()
subscribers_count = num_subscribers
sub_loop_count = num_subscribers
self.subscriber_load(create = True, num = num_subscribers,
- num_channels = num_channels, channel_start = channel_start, port_list = port_list)
- self.onos_aaa_load()
+ num_channels = num_channels, channel_start = channel_start, port_list = port_list,controller=controller)
+ self.onos_aaa_load(controller=controller)
self.thread_pool = ThreadPool(min(100, subscribers_count), queue_size=1, wait_timeout=1)
chan_leave = False #for single channel, multiple subscribers
@@ -2492,7 +2525,6 @@
subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
self.test_status = True
return self.test_status
-
def subscriber_dhcp_specific_lease_packet(self, iface = INTF_RX_DEFAULT):
''' Client sends DHCP Discover packet for particular lease time.'''
config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
diff --git a/src/test/utils/OnosCtrl.py b/src/test/utils/OnosCtrl.py
index 29f9080..5e6dcdf 100644
--- a/src/test/utils/OnosCtrl.py
+++ b/src/test/utils/OnosCtrl.py
@@ -56,10 +56,16 @@
self.auth = ('karaf', 'karaf')
@classmethod
- def config(cls, config):
+ def config(cls, config,controller=None):
if config is not None:
json_data = json.dumps(config)
- resp = requests.post(cls.cfg_url, auth = cls.auth, data = json_data)
+ if controller is None:
+ print('default Onos config url is %s'%cls.cfg_url)
+ resp = requests.post(cls.cfg_url, auth = cls.auth, data = json_data)
+ else:
+ cfg_url = 'http://%s:8181/onos/v1/network/configuration/' %(controller)
+ print('non-default Onos config url is %s'%cfg_url)
+ resp = requests.post(cfg_url, auth = cls.auth, data = json_data)
return resp.ok, resp.status_code
return False, 400
@@ -99,15 +105,19 @@
return did
@classmethod
- def get_flows(cls, device_id):
- url = 'http://%s:8181/onos/v1/flows/' %(cls.controller) + device_id
+ def get_flows(cls, device_id,controller=None):
+ if controller is None:
+ url = 'http://%s:8181/onos/v1/flows/' %(cls.controller) + device_id
+ else:
+ url = 'http://%s:8181/onos/v1/flows/' %(controller) + device_id
+ print('get flows url is %s'%url)
result = requests.get(url, auth = cls.auth)
if result.ok:
return result.json()['flows']
return None
@classmethod
- def cord_olt_config(cls, olt_device_data = None):
+ def cord_olt_config(cls, olt_device_data = None,controller=None):
'''Configures OLT data for existing devices/switches'''
if olt_device_data is None:
return
@@ -123,7 +133,7 @@
did_dict[did] = access_device_dict
##configure the device list with access information
- return cls.config(config)
+ return cls.config(config,controller=controller)
@classmethod
def install_app(cls, app_file, onos_ip = None):
@@ -134,6 +144,7 @@
result = requests.post(url, auth = cls.auth,
params = params, headers = headers,
data = payload)
+ print('result.ok, result.status_code are %s and %s'%(result.ok, result.status_code))
return result.ok, result.status_code
@classmethod
@@ -159,9 +170,10 @@
return resp.ok, resp.status_code
@classmethod
- def host_config(cls, config):
+ def host_config(cls, config,onos_ip=None):
if config:
json_data = json.dumps(config)
+ url = cls.host_cfg_url if onos_ip is None else 'http://%s:8181/onos/v1/hosts/'.format(onos_ip)
resp = requests.post(cls.host_cfg_url, auth = cls.auth, data = json_data)
return resp.ok, resp.status_code
return False, 400