blob: 6fa911916402c6eb5faabb7af428e4a9195eafd0 [file] [log] [blame]
ChetanGaonker2099d722016-10-07 15:16:58 -07001#copyright 2016-present Ciena Corporation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14#
15import unittest
16from nose.tools import *
17from scapy.all import *
18from OnosCtrl import OnosCtrl, get_mac
19from OltConfig import OltConfig
20from socket import socket
21from OnosFlowCtrl import OnosFlowCtrl
22from nose.twistedtools import reactor, deferred
23from twisted.internet import defer
24from onosclidriver import OnosCliDriver
25from CordContainer import Container, Onos, Quagga
26from CordTestServer import cord_test_onos_restart, cord_test_onos_shutdown, cord_test_onos_add_cluster, cord_test_quagga_restart
27from portmaps import g_subscriber_port_map
28from scapy.all import *
29import time, monotonic
30import threading
31from threading import current_thread
32from Cluster import *
33from EapTLS import TLSAuthTest
34from ACL import ACLTest
A R Karthick1f908202016-11-16 17:32:20 -080035from OnosLog import OnosLog
36from CordLogger import CordLogger
ChetanGaonker2099d722016-10-07 15:16:58 -070037import os
38import json
39import random
40import collections
41log.setLevel('INFO')
42
A R Karthick1f908202016-11-16 17:32:20 -080043class cluster_exchange(CordLogger):
ChetanGaonker2099d722016-10-07 15:16:58 -070044 test_path = os.path.dirname(os.path.realpath(__file__))
45 onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
46 mac = RandMAC()._fix()
47 flows_eth = Ether(src = RandMAC()._fix(), dst = RandMAC()._fix())
48 igmp_eth = Ether(dst = '01:00:5e:00:00:16', type = ETH_P_IP)
49 igmp_ip = IP(dst = '224.0.0.22')
50 ONOS_INSTANCES = 3
51 V_INF1 = 'veth0'
52 TLS_TIMEOUT = 100
53 device_id = 'of:' + get_mac()
54 igmp = cluster_igmp()
55 igmp_groups = igmp.mcast_ip_range(start_ip = '224.1.8.10',end_ip = '224.1.10.49')
56 igmp_sources = igmp.source_ip_range(start_ip = '38.24.29.35',end_ip='38.24.35.56')
57 tls = cluster_tls()
58 flows = cluster_flows()
59 proxyarp = cluster_proxyarp()
60 vrouter = cluster_vrouter()
61 acl = cluster_acl()
62 dhcprelay = cluster_dhcprelay()
63 subscriber = cluster_subscriber()
A R Karthickec2db322016-11-17 15:06:01 -080064 testcaseLoggers = ('test_cluster_controller_restarts',)
A R Karthick1f908202016-11-16 17:32:20 -080065
66 def setUp(self):
67 if self._testMethodName not in self.testcaseLoggers:
68 super(cluster_exchange, self).setUp()
69
70 def tearDown(self):
71 if self._testMethodName not in self.testcaseLoggers:
72 super(cluster_exchange, self).tearDown()
ChetanGaonker2099d722016-10-07 15:16:58 -070073
74 def get_controller(self):
75 controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
76 controller = controller.split(',')[0]
77 return controller
78
A R Karthick1f908202016-11-16 17:32:20 -080079 @classmethod
80 def get_controllers(cls):
81 controllers = os.getenv('ONOS_CONTROLLER_IP') or ''
82 return controllers.split(',')
83
ChetanGaonker2099d722016-10-07 15:16:58 -070084 def cliEnter(self,controller = None):
85 retries = 0
86 while retries < 3:
87 self.cli = OnosCliDriver(controller = controller,connect = True)
88 if self.cli.handle:
89 break
90 else:
91 retries += 1
92 time.sleep(2)
93
94 def cliExit(self):
95 self.cli.disconnect()
96
A R Karthick1f908202016-11-16 17:32:20 -080097 def get_leader(self, controller = None):
98 self.cliEnter(controller = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -080099 try:
100 result = json.loads(self.cli.leaders(jsonFormat = True))
101 except:
102 result = None
103
A R Karthick1f908202016-11-16 17:32:20 -0800104 if result is None:
105 log.info('Leaders command failure for controller %s' %controller)
106 else:
107 log.info('Leaders returned: %s' %result)
108 self.cliExit()
109 return result
110
111 def get_leaders(self, controller = None):
112 result = []
113 if type(controller) in [ list, tuple ]:
114 for c in controller:
115 leaders = self.get_leader(controller = c)
116 result.append(leaders)
117 else:
118 leaders = self.get_leader(controller = controller)
119 result.append(leaders)
120 return result
121
A R Karthickec2db322016-11-17 15:06:01 -0800122 def verify_leaders(self, controller = None):
123 leaders = self.get_leaders(controller = controller)
124 failed = filter(lambda l: l == None, leaders)
125 return failed
126
ChetanGaonker2099d722016-10-07 15:16:58 -0700127 def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
128 tries = 0
129 try:
130 self.cliEnter(controller = controller)
131 while tries <= 10:
132 cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
133 if cluster_summary:
134 log.info("cluster 'summary' command output is %s"%cluster_summary)
135 nodes = cluster_summary['nodes']
136 if verify:
137 if nodes == onos_instances:
138 self.cliExit()
139 return True
140 else:
141 tries += 1
142 time.sleep(1)
143 else:
144 if nodes >= onos_instances:
145 self.cliExit()
146 return True
147 else:
148 tries += 1
149 time.sleep(1)
150 else:
151 tries += 1
152 time.sleep(1)
153 self.cliExit()
154 return False
155 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700156 raise Exception('Failed to get cluster members')
157 return False
ChetanGaonker2099d722016-10-07 15:16:58 -0700158
159 def get_cluster_current_member_ips(self,controller = None):
160 tries = 0
161 cluster_ips = []
162 try:
163 self.cliEnter(controller = controller)
164 while tries <= 10:
165 cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
166 if cluster_nodes:
167 log.info("cluster 'nodes' output is %s"%cluster_nodes)
168 cluster_ips = map(lambda c: c['id'], cluster_nodes)
169 self.cliExit()
170 cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
171 return cluster_ips
172 else:
173 tries += 1
174 self.cliExit()
175 return cluster_ips
176 except:
177 raise Exception('Failed to get cluster members')
178 return cluster_ips
179
ChetanGaonker689b3862016-10-17 16:25:01 -0700180 def get_cluster_container_names_ips(self,controller=None):
A R Karthick1f908202016-11-16 17:32:20 -0800181 onos_names_ips = {}
182 onos_ips = self.get_cluster_current_member_ips(controller=controller)
183 onos_names_ips[onos_ips[0]] = Onos.NAME
184 onos_names_ips[Onos.NAME] = onos_ips[0]
185 for i in range(1,len(onos_ips)):
186 name = '{0}-{1}'.format(Onos.NAME,i+1)
187 onos_names_ips[onos_ips[i]] = name
188 onos_names_ips[name] = onos_ips[i]
ChetanGaonker2099d722016-10-07 15:16:58 -0700189
190 return onos_names_ips
191
192 #identifying current master of a connected device, not tested
193 def get_cluster_current_master_standbys(self,controller=None,device_id=device_id):
194 master = None
195 standbys = []
196 tries = 0
197 try:
198 cli = self.cliEnter(controller = controller)
199 while tries <= 10:
200 roles = json.loads(self.cli.roles(jsonFormat = True))
201 log.info("cluster 'roles' command output is %s"%roles)
202 if roles:
203 for device in roles:
204 log.info('Verifying device info in line %s'%device)
205 if device['id'] == device_id:
206 master = str(device['master'])
207 standbys = map(lambda d: str(d), device['standbys'])
208 log.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
209 self.cliExit()
210 return master, standbys
211 self.cliExit()
212 return master, standbys
213 else:
214 tries += 1
ChetanGaonker689b3862016-10-17 16:25:01 -0700215 time.sleep(1)
216 self.cliExit()
217 return master,standbys
218 except:
219 raise Exception('Failed to get cluster members')
220 return master,standbys
221
222 def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
223 ''' returns master and standbys of all the connected devices to ONOS cluster instance'''
224 device_dict = {}
225 tries = 0
226 try:
227 cli = self.cliEnter(controller = controller)
228 while tries <= 10:
229 device_dict = {}
230 roles = json.loads(self.cli.roles(jsonFormat = True))
231 log.info("cluster 'roles' command output is %s"%roles)
232 if roles:
233 for device in roles:
234 device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
235 for i in range(len(device_dict[device['id']]['standbys'])):
236 device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
237 log.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
238 self.cliExit()
239 return device_dict
240 else:
241 tries += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700242 time.sleep(1)
243 self.cliExit()
ChetanGaonker689b3862016-10-17 16:25:01 -0700244 return device_dict
245 except:
246 raise Exception('Failed to get cluster members')
247 return device_dict
248
249 #identify current master of a connected device, not tested
250 def get_cluster_connected_devices(self,controller=None):
251 '''returns all the devices connected to ONOS cluster'''
252 device_list = []
253 tries = 0
254 try:
255 cli = self.cliEnter(controller = controller)
256 while tries <= 10:
257 device_list = []
258 devices = json.loads(self.cli.devices(jsonFormat = True))
259 log.info("cluster 'devices' command output is %s"%devices)
260 if devices:
261 for device in devices:
262 log.info('device id is %s'%device['id'])
263 device_list.append(str(device['id']))
264 self.cliExit()
265 return device_list
266 else:
267 tries += 1
268 time.sleep(1)
269 self.cliExit()
270 return device_list
271 except:
272 raise Exception('Failed to get cluster members')
273 return device_list
274
275 def get_number_of_devices_of_master(self,controller=None):
276 '''returns master-device pairs, which master having what devices'''
277 master_count = {}
278 try:
279 cli = self.cliEnter(controller = controller)
280 masters = json.loads(self.cli.masters(jsonFormat = True))
281 if masters:
282 for master in masters:
283 master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
284 return master_count
285 else:
286 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700287 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700288 raise Exception('Failed to get cluster members')
289 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700290
291 def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None):
292 if new_master is None: return False
ChetanGaonker689b3862016-10-17 16:25:01 -0700293 self.cliEnter(controller=controller)
ChetanGaonker2099d722016-10-07 15:16:58 -0700294 cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
295 command = self.cli.command(cmd = cmd, jsonFormat = False)
296 self.cliExit()
297 time.sleep(60)
298 master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
299 assert_equal(master,new_master)
300 log.info('Cluster master changed to %s successfully'%new_master)
301
ChetanGaonker689b3862016-10-17 16:25:01 -0700302 def withdraw_cluster_current_mastership(self,master_ip=None,device_id=device_id,controller=None):
303 '''current master looses its mastership and hence new master will be elected'''
304 self.cliEnter(controller=controller)
305 cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
306 command = self.cli.command(cmd = cmd, jsonFormat = False)
307 self.cliExit()
308 time.sleep(60)
309 new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
310 assert_not_equal(new_master_ip,master_ip)
311 log.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
312 log.info('Cluster new master is %s'%new_master_ip)
313 return True
314
A R Karthickec2db322016-11-17 15:06:01 -0800315 def test_cluster_controller_restarts(self):
A R Karthick1f908202016-11-16 17:32:20 -0800316 '''Test the cluster by repeatedly killing the controllers'''
317 controllers = self.get_controllers()
318 ctlr_len = len(controllers)
319 if ctlr_len <= 1:
320 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
321 assert_greater(ctlr_len, 1)
322
323 #this call would verify the cluster for once
324 onos_map = self.get_cluster_container_names_ips()
325
A R Karthickec2db322016-11-17 15:06:01 -0800326 def check_exception(controller = None):
A R Karthick1f908202016-11-16 17:32:20 -0800327 adjacent_controller = None
328 adjacent_controllers = None
329 if controller:
330 adjacent_controllers = set(controllers) - set( [controller] )
331 adjacent_controller = next(iter(adjacent_controllers))
332 for node in controllers:
333 onosLog = OnosLog(host = node)
334 ##check the logs for storage exception
335 _, output = onosLog.get_log(('ERROR', 'Exception',))
A R Karthickec2db322016-11-17 15:06:01 -0800336 if output and output.find('StorageException$Timeout') >= 0:
337 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
338 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
339 log.info('\n' + '-' * 50 + '\n')
A R Karthick1f908202016-11-16 17:32:20 -0800340 log.info('%s' %output)
A R Karthickec2db322016-11-17 15:06:01 -0800341 log.info('\n' + '-' * 50 + '\n')
342 failed = self.verify_leaders(controllers)
343 if failed:
344 log.info('Leaders command failed on node: %s' %node)
345 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800346 return controller
347
348 try:
A R Karthickec2db322016-11-17 15:06:01 -0800349 ips = self.get_cluster_current_member_ips(controller = adjacent_controller)
A R Karthick1f908202016-11-16 17:32:20 -0800350 print('ONOS cluster formed with controllers: %s' %ips)
351 st = True
352 except:
353 st = False
354
A R Karthickec2db322016-11-17 15:06:01 -0800355 failed = self.verify_leaders(controllers)
A R Karthick1f908202016-11-16 17:32:20 -0800356 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800357 if st is False:
358 log.info('No storage exception and ONOS cluster was not formed successfully')
359 else:
360 controller = None
361
362 return controller
363
364 next_controller = None
365 tries = 10
366 for num in range(tries):
367 index = num % ctlr_len
368 #index = random.randrange(0, ctlr_len)
369 controller = onos_map[controllers[index]] if next_controller is None else next_controller
A R Karthick1ef70552016-11-17 17:33:36 -0800370 log.info('ITERATION: %d. Restarting Controller %s' %(num + 1, controller))
A R Karthick1f908202016-11-16 17:32:20 -0800371 try:
372 cord_test_onos_restart(node = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800373 time.sleep(60)
A R Karthick1f908202016-11-16 17:32:20 -0800374 except:
375 time.sleep(5)
376 continue
A R Karthickec2db322016-11-17 15:06:01 -0800377 next_controller = check_exception(controller = controller)
A R Karthick1f908202016-11-16 17:32:20 -0800378
ChetanGaonker2099d722016-10-07 15:16:58 -0700379 #pass
ChetanGaonker689b3862016-10-17 16:25:01 -0700380 def test_cluster_formation_and_verification(self,onos_instances = ONOS_INSTANCES):
381 status = self.verify_cluster_status(onos_instances = onos_instances)
382 assert_equal(status, True)
383 log.info('Cluster exists with %d ONOS instances'%onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -0700384
385 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700386 def test_cluster_adding_members(self, add = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700387 status = self.verify_cluster_status(onos_instances = onos_instances)
388 assert_equal(status, True)
389 onos_ips = self.get_cluster_current_member_ips()
390 onos_instances = len(onos_ips)+add
391 log.info('Adding %d nodes to the ONOS cluster' %add)
392 cord_test_onos_add_cluster(count = add)
393 status = self.verify_cluster_status(onos_instances=onos_instances)
394 assert_equal(status, True)
395
ChetanGaonker689b3862016-10-17 16:25:01 -0700396 def test_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700397 status = self.verify_cluster_status(onos_instances = onos_instances)
398 assert_equal(status, True)
399 master, standbys = self.get_cluster_current_master_standbys()
400 assert_equal(len(standbys),(onos_instances-1))
401 onos_names_ips = self.get_cluster_container_names_ips()
402 master_onos_name = onos_names_ips[master]
403 log.info('Removing cluster current master %s'%(master))
404 cord_test_onos_shutdown(node = master_onos_name)
405 time.sleep(60)
406 onos_instances -= 1
407 status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0])
408 assert_equal(status, True)
409 new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
410 assert_not_equal(master,new_master)
ChetanGaonker689b3862016-10-17 16:25:01 -0700411 log.info('Successfully removed clusters master instance')
ChetanGaonker2099d722016-10-07 15:16:58 -0700412
ChetanGaonker689b3862016-10-17 16:25:01 -0700413 def test_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700414 status = self.verify_cluster_status(onos_instances = onos_instances)
415 assert_equal(status, True)
416 master, standbys = self.get_cluster_current_master_standbys()
417 assert_equal(len(standbys),(onos_instances-1))
418 onos_names_ips = self.get_cluster_container_names_ips()
419 member_onos_name = onos_names_ips[standbys[0]]
420 log.info('Removing cluster member %s'%standbys[0])
421 cord_test_onos_shutdown(node = member_onos_name)
422 time.sleep(60)
423 onos_instances -= 1
424 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
425 assert_equal(status, True)
426
ChetanGaonker689b3862016-10-17 16:25:01 -0700427 def test_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700428 status = self.verify_cluster_status(onos_instances = onos_instances)
429 assert_equal(status, True)
430 master, standbys = self.get_cluster_current_master_standbys()
431 assert_equal(len(standbys),(onos_instances-1))
432 onos_names_ips = self.get_cluster_container_names_ips()
433 member1_onos_name = onos_names_ips[standbys[0]]
434 member2_onos_name = onos_names_ips[standbys[1]]
435 log.info('Removing cluster member %s'%standbys[0])
436 cord_test_onos_shutdown(node = member1_onos_name)
437 log.info('Removing cluster member %s'%standbys[1])
438 cord_test_onos_shutdown(node = member2_onos_name)
439 time.sleep(60)
440 onos_instances = onos_instances - 2
441 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
442 assert_equal(status, True)
443
ChetanGaonker689b3862016-10-17 16:25:01 -0700444 def test_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700445 status = self.verify_cluster_status(onos_instances = onos_instances)
446 assert_equal(status, True)
447 master, standbys = self.get_cluster_current_master_standbys()
448 assert_equal(len(standbys),(onos_instances-1))
449 onos_names_ips = self.get_cluster_container_names_ips()
450 for i in range(remove):
451 member_onos_name = onos_names_ips[standbys[i]]
452 log.info('Removing onos container with name %s'%standbys[i])
453 cord_test_onos_shutdown(node = member_onos_name)
454 time.sleep(60)
455 onos_instances = onos_instances - remove
456 status = self.verify_cluster_status(onos_instances = onos_instances, controller=master)
457 assert_equal(status, True)
458
459 #nottest test cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700460 def test_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES , add = 2, remove = 2):
ChetanGaonker2099d722016-10-07 15:16:58 -0700461 status = self.verify_cluster_status(onos_instances = onos_instances)
462 assert_equal(status, True)
463 onos_ips = self.get_cluster_current_member_ips()
464 onos_instances = len(onos_ips)+add
465 log.info('Adding %d ONOS instances to the cluster'%add)
466 cord_test_onos_add_cluster(count = add)
467 status = self.verify_cluster_status(onos_instances=onos_instances)
468 assert_equal(status, True)
469 log.info('Removing %d ONOS instances from the cluster'%remove)
470 for i in range(remove):
471 name = '{}-{}'.format(Onos.NAME, onos_instances - i)
472 log.info('Removing onos container with name %s'%name)
473 cord_test_onos_shutdown(node = name)
474 time.sleep(60)
475 onos_instances = onos_instances-remove
476 status = self.verify_cluster_status(onos_instances=onos_instances)
477 assert_equal(status, True)
478
479 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700480 def test_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
ChetanGaonker2099d722016-10-07 15:16:58 -0700481 status = self.verify_cluster_status(onos_instances = onos_instances)
482 assert_equal(status, True)
483 onos_ips = self.get_cluster_current_member_ips()
484 onos_instances = onos_instances-remove
485 log.info('Removing %d ONOS instances from the cluster'%remove)
486 for i in range(remove):
487 name = '{}-{}'.format(Onos.NAME, len(onos_ips)-i)
488 log.info('Removing onos container with name %s'%name)
489 cord_test_onos_shutdown(node = name)
490 time.sleep(60)
491 status = self.verify_cluster_status(onos_instances=onos_instances)
492 assert_equal(status, True)
493 log.info('Adding %d ONOS instances to the cluster'%add)
494 cord_test_onos_add_cluster(count = add)
495 onos_instances = onos_instances+add
496 status = self.verify_cluster_status(onos_instances=onos_instances)
497 assert_equal(status, True)
498
ChetanGaonker689b3862016-10-17 16:25:01 -0700499 def test_cluster_restart(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700500 status = self.verify_cluster_status(onos_instances = onos_instances)
501 assert_equal(status, True)
502 log.info('Restarting cluster')
503 cord_test_onos_restart()
504 status = self.verify_cluster_status(onos_instances = onos_instances)
505 assert_equal(status, True)
506
ChetanGaonker689b3862016-10-17 16:25:01 -0700507 def test_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700508 status = self.verify_cluster_status(onos_instances = onos_instances)
509 assert_equal(status, True)
510 master, standbys = self.get_cluster_current_master_standbys()
511 onos_names_ips = self.get_cluster_container_names_ips()
512 master_onos_name = onos_names_ips[master]
513 log.info('Restarting cluster master %s'%master)
514 cord_test_onos_restart(node = master_onos_name)
515 status = self.verify_cluster_status(onos_instances = onos_instances)
516 assert_equal(status, True)
517 log.info('Cluster came up after master restart as expected')
518
519 #test fail. master changing after restart. Need to check correct behavior.
ChetanGaonker689b3862016-10-17 16:25:01 -0700520 def test_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700521 status = self.verify_cluster_status(onos_instances = onos_instances)
522 assert_equal(status, True)
523 master1, standbys = self.get_cluster_current_master_standbys()
524 onos_names_ips = self.get_cluster_container_names_ips()
525 master_onos_name = onos_names_ips[master1]
526 log.info('Restarting cluster master %s'%master)
527 cord_test_onos_restart(node = master_onos_name)
528 status = self.verify_cluster_status(onos_instances = onos_instances)
529 assert_equal(status, True)
530 master2, standbys = self.get_cluster_current_master_standbys()
531 assert_equal(master1,master2)
532 log.info('Cluster master is same before and after cluster master restart as expected')
533
ChetanGaonker689b3862016-10-17 16:25:01 -0700534 def test_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700535 status = self.verify_cluster_status(onos_instances = onos_instances)
536 assert_equal(status, True)
537 master, standbys = self.get_cluster_current_master_standbys()
538 assert_equal(len(standbys),(onos_instances-1))
539 onos_names_ips = self.get_cluster_container_names_ips()
540 member_onos_name = onos_names_ips[standbys[0]]
541 log.info('Restarting cluster member %s'%standbys[0])
542 cord_test_onos_restart(node = member_onos_name)
543 status = self.verify_cluster_status(onos_instances = onos_instances)
544 assert_equal(status, True)
545 log.info('Cluster came up as expected after restarting one member')
546
ChetanGaonker689b3862016-10-17 16:25:01 -0700547 def test_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700548 status = self.verify_cluster_status(onos_instances = onos_instances)
549 assert_equal(status, True)
550 master, standbys = self.get_cluster_current_master_standbys()
551 assert_equal(len(standbys),(onos_instances-1))
552 onos_names_ips = self.get_cluster_container_names_ips()
553 member1_onos_name = onos_names_ips[standbys[0]]
554 member2_onos_name = onos_names_ips[standbys[1]]
555 log.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1]))
556 cord_test_onos_restart(node = member1_onos_name)
557 cord_test_onos_restart(node = member2_onos_name)
558 status = self.verify_cluster_status(onos_instances = onos_instances)
559 assert_equal(status, True)
560 log.info('Cluster came up as expected after restarting two members')
561
ChetanGaonker689b3862016-10-17 16:25:01 -0700562 def test_cluster_state_with_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700563 status = self.verify_cluster_status(onos_instances = onos_instances)
564 assert_equal(status,True)
565 master, standbys = self.get_cluster_current_master_standbys()
566 assert_equal(len(standbys),(onos_instances-1))
567 onos_names_ips = self.get_cluster_container_names_ips()
568 for i in range(members):
569 member_onos_name = onos_names_ips[standbys[i]]
570 log.info('Restarting cluster member %s'%standbys[i])
571 cord_test_onos_restart(node = member_onos_name)
572
573 status = self.verify_cluster_status(onos_instances = onos_instances)
574 assert_equal(status, True)
575 log.info('Cluster came up as expected after restarting %d members'%members)
576
ChetanGaonker689b3862016-10-17 16:25:01 -0700577 def test_cluster_state_with_master_change(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700578 status = self.verify_cluster_status(onos_instances=onos_instances)
579 assert_equal(status, True)
580 master, standbys = self.get_cluster_current_master_standbys()
581 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -0700582 log.info('Cluster current master of devices is %s'%master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700583 self.change_master_current_cluster(new_master=standbys[0])
584 log.info('Cluster master changed successfully')
585
586 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700587 def test_cluster_with_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700588 status = self.verify_cluster_status(onos_instances = onos_instances)
589 assert_equal(status, True)
590 onos_ips = self.get_cluster_current_member_ips()
591 self.vrouter.setUpClass()
592 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
593 assert_equal(res, True)
594 for onos_ip in onos_ips:
595 tries = 0
596 flag = False
597 try:
598 self.cliEnter(controller = onos_ip)
599 while tries <= 5:
600 routes = json.loads(self.cli.routes(jsonFormat = True))
601 if routes:
602 assert_equal(len(routes['routes4']), networks)
603 self.cliExit()
604 flag = True
605 break
606 else:
607 tries += 1
608 time.sleep(1)
609 assert_equal(flag, True)
610 except:
611 log.info('Exception occured while checking routes in onos instance %s'%onos_ip)
612 raise
613
614 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700615 def test_cluster_with_vrouter_and_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700616 status = self.verify_cluster_status(onos_instances = onos_instances)
617 assert_equal(status, True)
618 onos_ips = self.get_cluster_current_member_ips()
619 master, standbys = self.get_cluster_current_master_standbys()
620 onos_names_ips = self.get_cluster_container_names_ips()
621 master_onos_name = onos_names_ips[master]
622 self.vrouter.setUpClass()
623 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
624 assert_equal(res,True)
625 cord_test_onos_shutdown(node = master_onos_name)
626 time.sleep(60)
ChetanGaonker689b3862016-10-17 16:25:01 -0700627 log.info('Verifying vrouter traffic after cluster master is down')
ChetanGaonker2099d722016-10-07 15:16:58 -0700628 self.vrouter.vrouter_traffic_verify()
629
630 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700631 def test_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700632 status = self.verify_cluster_status(onos_instances = onos_instances)
633 assert_equal(status, True)
634 onos_ips = self.get_cluster_current_member_ips()
635 master, standbys = self.get_cluster_current_master_standbys()
636 onos_names_ips = self.get_cluster_container_names_ips()
637 master_onos_name = onos_names_ips[master]
638 self.vrouter.setUpClass()
639 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
640 assert_equal(res, True)
641 cord_test_onos_restart()
642 self.vrouter.vrouter_traffic_verify()
643
644 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700645 def test_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700646 status = self.verify_cluster_status(onos_instances = onos_instances)
647 assert_equal(status, True)
648 self.vrouter.setUpClass()
649 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
650 assert_equal(res, True)
651 self.vrouter.vrouter_activate(deactivate=True)
652 time.sleep(15)
653 self.vrouter.vrouter_traffic_verify(positive_test=False)
654 self.vrouter.vrouter_activate(deactivate=False)
655
656 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700657 def test_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700658 status = self.verify_cluster_status(onos_instances = onos_instances)
659 assert_equal(status, True)
660 master, standbys = self.get_cluster_current_master_standbys()
661 onos_names_ips = self.get_cluster_container_names_ips()
662 master_onos_name = onos_names_ips[master]
663 self.vrouter.setUpClass()
664 log.info('Verifying vrouter before master down')
665 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
666 assert_equal(res, True)
667 self.vrouter.vrouter_activate(deactivate=True)
668 log.info('Verifying vrouter traffic after app deactivated')
669 time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down
670 self.vrouter.vrouter_traffic_verify(positive_test=False)
671 log.info('Verifying vrouter traffic after master down')
672 cord_test_onos_shutdown(node = master_onos_name)
673 time.sleep(60)
674 self.vrouter.vrouter_traffic_verify(positive_test=False)
675 self.vrouter.vrouter_activate(deactivate=False)
676
677 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700678 def test_cluster_for_vrouter_app_and_making_member_down(self, networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700679 status = self.verify_cluster_status(onos_instances = onos_instances)
680 assert_equal(status, True)
681 master, standbys = self.get_cluster_current_master_standbys()
682 onos_names_ips = self.get_cluster_container_names_ips()
683 member_onos_name = onos_names_ips[standbys[0]]
684 self.vrouter.setUpClass()
685 log.info('Verifying vrouter before cluster member down')
686 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
687 assert_equal(res, True) # Expecting vrouter should work properly
688 log.info('Verifying vrouter after cluster member down')
689 cord_test_onos_shutdown(node = member_onos_name)
690 time.sleep(60)
691 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
692
693 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700694 def test_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700695 status = self.verify_cluster_status(onos_instances = onos_instances)
696 assert_equal(status, True)
697 master, standbys = self.get_cluster_current_master_standbys()
698 onos_names_ips = self.get_cluster_container_names_ips()
699 member_onos_name = onos_names_ips[standbys[1]]
700 self.vrouter.setUpClass()
701 log.info('Verifying vrouter traffic before cluster member restart')
702 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
703 assert_equal(res, True) # Expecting vrouter should work properly
704 cord_test_onos_restart(node = member_onos_name)
705 log.info('Verifying vrouter traffic after cluster member restart')
706 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
707
708 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700709 def test_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700710 status = self.verify_cluster_status(onos_instances = onos_instances)
711 assert_equal(status, True)
712 self.vrouter.setUpClass()
713 log.info('Verifying vrouter traffic before cluster restart')
714 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
715 assert_equal(res, True) # Expecting vrouter should work properly
716 cord_test_onos_restart()
717 log.info('Verifying vrouter traffic after cluster restart')
718 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
719
720
721 #test fails because flow state is in pending_add in onos
ChetanGaonker689b3862016-10-17 16:25:01 -0700722 def test_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700723 status = self.verify_cluster_status(onos_instances = onos_instances)
724 assert_equal(status, True)
725 master, standbys = self.get_cluster_current_master_standbys()
726 onos_names_ips = self.get_cluster_container_names_ips()
727 master_onos_name = onos_names_ips[master]
728 self.flows.setUpClass()
729 egress = 1
730 ingress = 2
731 egress_map = { 'ip': '192.168.30.1', 'udp_port': 9500 }
732 ingress_map = { 'ip': '192.168.40.1', 'udp_port': 9000 }
733 flow = OnosFlowCtrl(deviceId = self.device_id,
734 egressPort = egress,
735 ingressPort = ingress,
736 udpSrc = ingress_map['udp_port'],
737 udpDst = egress_map['udp_port'],
738 controller=master
739 )
740 result = flow.addFlow()
741 assert_equal(result, True)
742 time.sleep(1)
743 self.success = False
744 def mac_recv_task():
745 def recv_cb(pkt):
746 log.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
747 self.success = True
748 sniff(timeout=2,
749 lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
750 and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.flows.port_map[egress])
751
752 for i in [0,1]:
753 if i == 1:
754 cord_test_onos_shutdown(node = master_onos_name)
755 log.info('Verifying flows traffic after master killed')
756 time.sleep(45)
757 else:
758 log.info('Verifying flows traffic before master killed')
759 t = threading.Thread(target = mac_recv_task)
760 t.start()
761 L2 = self.flows_eth #Ether(src = ingress_map['ether'], dst = egress_map['ether'])
762 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
763 L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
764 pkt = L2/L3/L4
765 log.info('Sending packets to verify if flows are correct')
766 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
767 t.join()
768 assert_equal(self.success, True)
769
ChetanGaonker689b3862016-10-17 16:25:01 -0700770 def test_cluster_state_changing_master_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700771 status = self.verify_cluster_status(onos_instances=onos_instances)
772 assert_equal(status, True)
773 master, standbys = self.get_cluster_current_master_standbys()
774 self.flows.setUpClass()
775 egress = 1
776 ingress = 2
777 egress_map = { 'ip': '192.168.30.1' }
778 ingress_map = { 'ip': '192.168.40.1' }
779 flow = OnosFlowCtrl(deviceId = self.device_id,
780 egressPort = egress,
781 ingressPort = ingress,
782 ecn = 1,
783 controller=master
784 )
785 result = flow.addFlow()
786 assert_equal(result, True)
787 ##wait for flows to be added to ONOS
788 time.sleep(1)
789 self.success = False
790 def mac_recv_task():
791 def recv_cb(pkt):
792 log.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
793 self.success = True
794 sniff(count=2, timeout=5,
795 lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
796 and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb,
797 iface = self.flows.port_map[egress])
798 for i in [0,1]:
799 if i == 1:
800 log.info('Changing cluster master to %s'%standbys[0])
801 self.change_master_current_cluster(new_master=standbys[0])
802 log.info('Verifying flow traffic after cluster master chnaged')
803 else:
804 log.info('Verifying flow traffic before cluster master changed')
805 t = threading.Thread(target = mac_recv_task)
806 t.start()
807 L2 = self.flows_eth # Ether(src = ingress_map['ether'], dst = egress_map['ether'])
808 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1)
809 pkt = L2/L3
810 log.info('Sending a packet to verify if flows are correct')
811 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
812 t.join()
813 assert_equal(self.success, True)
814
ChetanGaonker689b3862016-10-17 16:25:01 -0700815 #pass
816 def test_cluster_flow_for_ipv6_extension_header_and_master_restart(self,onos_instances = ONOS_INSTANCES):
817 status = self.verify_cluster_status(onos_instances=onos_instances)
818 assert_equal(status, True)
819 master,standbys = self.get_cluster_current_master_standbys()
820 onos_names_ips = self.get_cluster_container_names_ips()
821 master_onos_name = onos_names_ips[master]
822 self.flows.setUpClass()
823 egress = 1
824 ingress = 2
825 egress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
826 ingress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
827 flow = OnosFlowCtrl(deviceId = self.device_id,
828 egressPort = egress,
829 ingressPort = ingress,
830 ipv6_extension = 0,
831 controller=master
832 )
833
834 result = flow.addFlow()
835 assert_equal(result, True)
836 ##wait for flows to be added to ONOS
837 time.sleep(1)
838 self.success = False
839 def mac_recv_task():
840 def recv_cb(pkt):
841 log.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s'%(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
842 self.success = True
843 sniff(timeout=2,count=5,
844 lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.flows.port_map[egress])
845 for i in [0,1]:
846 if i == 1:
847 log.info('Restart cluster current master %s'%master)
848 Container(master_onos_name,Onos.IMAGE).restart()
849 time.sleep(45)
850 log.info('Verifying flow traffic after master restart')
851 else:
852 log.info('Verifying flow traffic before master restart')
853 t = threading.Thread(target = mac_recv_task)
854 t.start()
855 L2 = self.flows_eth
856 L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
857 pkt = L2/L3
858 log.info('Sending packets to verify if flows are correct')
859 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
860 t.join()
861 assert_equal(self.success, True)
862
863 def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
864 dst_mac = self.igmp.iptomac(group)
865 eth = Ether(dst= dst_mac)
866 ip = IP(dst=group,src=source)
867 data = repr(monotonic.monotonic())
868 sendp(eth/ip/data,count=20, iface = intf)
869 pkt = (eth/ip/data)
870 log.info('multicast traffic packet %s'%pkt.show())
871
872 def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
873 log.info('verifying multicast traffic for group %s from source %s'%(group,source))
874 self.success = False
875 def recv_task():
876 def igmp_recv_cb(pkt):
877 log.info('multicast data received for group %s from source %s'%(group,source))
878 self.success = True
879 sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
880 t = threading.Thread(target = recv_task)
881 t.start()
882 self.send_multicast_data_traffic(group,source=source)
883 t.join()
884 return self.success
885
886 #pass
887 def test_cluster_with_igmp_include_exclude_modes_and_restarting_master(self, onos_instances=ONOS_INSTANCES):
888 status = self.verify_cluster_status(onos_instances=onos_instances)
889 assert_equal(status, True)
890 master, standbys = self.get_cluster_current_master_standbys()
891 assert_equal(len(standbys), (onos_instances-1))
892 onos_names_ips = self.get_cluster_container_names_ips()
893 master_onos_name = onos_names_ips[master]
894 self.igmp.setUp(controller=master)
895 groups = ['224.2.3.4','230.5.6.7']
896 src_list = ['2.2.2.2','3.3.3.3']
897 self.igmp.onos_ssm_table_load(groups, src_list=src_list, controller=master)
898 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
899 iface = self.V_INF1, delay = 2)
900 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
901 iface = self.V_INF1, delay = 2)
902 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
903 assert_equal(status,True)
904 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
905 assert_equal(status,False)
906 log.info('restarting cluster master %s'%master)
907 Container(master_onos_name,Onos.IMAGE).restart()
908 time.sleep(60)
909 log.info('verifying multicast data traffic after master restart')
910 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
911 assert_equal(status,True)
912 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
913 assert_equal(status,False)
914
915 #pass
916 def test_cluster_with_igmp_include_exclude_modes_and_making_master_down(self, onos_instances=ONOS_INSTANCES):
917 status = self.verify_cluster_status(onos_instances=onos_instances)
918 assert_equal(status, True)
919 master, standbys = self.get_cluster_current_master_standbys()
920 assert_equal(len(standbys), (onos_instances-1))
921 onos_names_ips = self.get_cluster_container_names_ips()
922 master_onos_name = onos_names_ips[master]
923 self.igmp.setUp(controller=master)
924 groups = [self.igmp.random_mcast_ip(),self.igmp.random_mcast_ip()]
925 src_list = [self.igmp.randomsourceip()]
926 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
927 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
928 iface = self.V_INF1, delay = 2)
929 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
930 iface = self.V_INF1, delay = 2)
931 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
932 assert_equal(status,True)
933 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
934 assert_equal(status,False)
935 log.info('Killing cluster master %s'%master)
936 Container(master_onos_name,Onos.IMAGE).kill()
937 time.sleep(60)
938 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=standbys[0])
939 assert_equal(status, True)
940 log.info('Verifying multicast data traffic after cluster master down')
941 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
942 assert_equal(status,True)
943 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
944 assert_equal(status,False)
945
946 def test_cluster_with_igmp_include_mode_checking_traffic_recovery_time_after_master_is_down(self, onos_instances=ONOS_INSTANCES):
947 status = self.verify_cluster_status(onos_instances=onos_instances)
948 assert_equal(status, True)
949 master, standbys = self.get_cluster_current_master_standbys()
950 assert_equal(len(standbys), (onos_instances-1))
951 onos_names_ips = self.get_cluster_container_names_ips()
952 master_onos_name = onos_names_ips[master]
953 self.igmp.setUp(controller=master)
954 groups = [self.igmp.random_mcast_ip()]
955 src_list = [self.igmp.randomsourceip()]
956 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
957 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
958 iface = self.V_INF1, delay = 2)
959 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
960 assert_equal(status,True)
961 log.info('Killing clusters master %s'%master)
962 Container(master_onos_name,Onos.IMAGE).kill()
963 count = 0
964 for i in range(60):
965 log.info('Verifying multicast data traffic after cluster master down')
966 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
967 if status:
968 break
969 else:
970 count += 1
971 time.sleep(1)
972 assert_equal(status, True)
973 log.info('Time taken to recover traffic after clusters master down is %d seconds'%count)
974
975
976 #pass
977 def test_cluster_state_with_igmp_leave_group_after_master_change(self, onos_instances=ONOS_INSTANCES):
978 status = self.verify_cluster_status(onos_instances=onos_instances)
979 assert_equal(status, True)
980 master, standbys = self.get_cluster_current_master_standbys()
981 assert_equal(len(standbys), (onos_instances-1))
982 self.igmp.setUp(controller=master)
983 groups = [self.igmp.random_mcast_ip()]
984 src_list = [self.igmp.randomsourceip()]
985 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
986 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
987 iface = self.V_INF1, delay = 2)
988 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
989 assert_equal(status,True)
990 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
991 self.change_cluster_current_master(new_master=standbys[0])
992 log.info('Verifying multicast traffic after cluster master change')
993 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
994 assert_equal(status,True)
995 log.info('Sending igmp TO_EXCLUDE message to leave the group %s'%groups[0])
996 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
997 iface = self.V_INF1, delay = 1)
998 time.sleep(10)
999 status = self.verify_igmp_data_traffic(groups[0],intf = self.V_INF1,source= src_list[0])
1000 assert_equal(status,False)
1001
1002 #pass
1003 def test_cluster_state_with_igmp_join_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
1004 status = self.verify_cluster_status(onos_instances=onos_instances)
1005 assert_equal(status, True)
1006 master,standbys = self.get_cluster_current_master_standbys()
1007 assert_equal(len(standbys), (onos_instances-1))
1008 self.igmp.setUp(controller=master)
1009 groups = [self.igmp.random_mcast_ip()]
1010 src_list = [self.igmp.randomsourceip()]
1011 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1012 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1013 self.change_cluster_current_master(new_master = standbys[0])
1014 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1015 iface = self.V_INF1, delay = 2)
1016 time.sleep(1)
1017 self.change_cluster_current_master(new_master = master)
1018 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1019 assert_equal(status,True)
1020
1021 #pass
ChetanGaonker2099d722016-10-07 15:16:58 -07001022 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001023 def test_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001024 status = self.verify_cluster_status(onos_instances=onos_instances)
1025 assert_equal(status, True)
1026 master, standbys = self.get_cluster_current_master_standbys()
1027 assert_equal(len(standbys), (onos_instances-1))
1028 self.tls.setUp(controller=master)
1029 df = defer.Deferred()
1030 def eap_tls_verify(df):
1031 tls = TLSAuthTest()
1032 tls.runTest()
1033 df.callback(0)
1034 reactor.callLater(0, eap_tls_verify, df)
1035 return df
1036
1037 @deferred(120)
ChetanGaonker689b3862016-10-17 16:25:01 -07001038 def test_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001039 master, standbys = self.get_cluster_current_master_standbys()
1040 assert_equal(len(standbys), (onos_instances-1))
1041 self.tls.setUp()
1042 df = defer.Deferred()
1043 def eap_tls_verify2(df2):
1044 tls = TLSAuthTest()
1045 tls.runTest()
1046 df.callback(0)
1047 for i in [0,1]:
1048 if i == 1:
1049 log.info('Changing cluster master %s to %s'%(master, standbys[0]))
1050 self.change_master_current_cluster(new_master=standbys[0])
1051 log.info('Verifying tls authentication after cluster master changed to %s'%standbys[0])
1052 else:
1053 log.info('Verifying tls authentication before cluster master change')
1054 reactor.callLater(0, eap_tls_verify, df)
1055 return df
1056
1057 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001058 def test_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001059 status = self.verify_cluster_status(onos_instances=onos_instances)
1060 assert_equal(status, True)
1061 master, standbys = self.get_cluster_current_master_standbys()
1062 assert_equal(len(standbys), (onos_instances-1))
1063 onos_names_ips = self.get_cluster_container_names_ips()
1064 master_onos_name = onos_names_ips[master]
1065 self.tls.setUp()
1066 df = defer.Deferred()
1067 def eap_tls_verify(df):
1068 tls = TLSAuthTest()
1069 tls.runTest()
1070 df.callback(0)
1071 for i in [0,1]:
1072 if i == 1:
1073 log.info('Killing cluster current master %s'%master)
1074 cord_test_onos_shutdown(node = master_onos_name)
1075 time.sleep(20)
1076 status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True)
1077 assert_equal(status, True)
1078 log.info('Cluster came up with %d instances after killing master'%(onos_instances-1))
1079 log.info('Verifying tls authentication after killing cluster master')
1080 reactor.callLater(0, eap_tls_verify, df)
1081 return df
1082
1083 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001084 def test_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001085 status = self.verify_cluster_status(onos_instances=onos_instances)
1086 assert_equal(status, True)
1087 master, standbys = self.get_cluster_current_master_standbys()
1088 assert_equal(len(standbys), (onos_instances-1))
1089 onos_names_ips = self.get_cluster_container_names_ips()
1090 member_onos_name = onos_names_ips[standbys[0]]
1091 self.tls.setUp()
1092 df = defer.Deferred()
1093 def eap_tls_no_cert(df):
1094 def tls_no_cert_cb():
1095 log.info('TLS authentication failed with no certificate')
1096 tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
1097 tls.runTest()
1098 assert_equal(tls.failTest, True)
1099 df.callback(0)
1100 for i in [0,1]:
1101 if i == 1:
1102 log.info('Restart cluster member %s'%standbys[0])
1103 Container(member_onos_name,Onos.IMAGE).restart()
1104 time.sleep(20)
1105 status = self.verify_cluster_status(onos_instances=onos_instances)
1106 assert_equal(status, True)
1107 log.info('Cluster came up with %d instances after member restart'%(onos_instances))
1108 log.info('Verifying tls authentication after member restart')
1109 reactor.callLater(0, eap_tls_no_cert, df)
1110 return df
1111
ChetanGaonker689b3862016-10-17 16:25:01 -07001112 #pass
1113 def test_cluster_proxyarp_master_change_and_app_deactivation(self,onos_instances=ONOS_INSTANCES,hosts = 3):
1114 status = self.verify_cluster_status(onos_instances=onos_instances)
1115 assert_equal(status,True)
1116 master,standbys = self.get_cluster_current_master_standbys()
1117 assert_equal(len(standbys),(onos_instances-1))
1118 self.proxyarp.setUpClass()
1119 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1120 ingress = hosts+1
1121 for hostip, hostmac in hosts_config:
1122 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1123 time.sleep(1)
1124 log.info('changing cluster current master from %s to %s'%(master,standbys[0]))
1125 self.change_cluster_current_master(new_master=standbys[0])
1126 log.info('verifying proxyarp after master change')
1127 for hostip, hostmac in hosts_config:
1128 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1129 time.sleep(1)
1130 log.info('Deactivating proxyarp app and expecting proxyarp functionality not to work')
1131 self.proxyarp.proxyarp_activate(deactivate = True,controller=standbys[0])
1132 time.sleep(3)
1133 for hostip, hostmac in hosts_config:
1134 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
1135 time.sleep(1)
1136 log.info('activating proxyarp app and expecting to get arp reply from ONOS')
1137 self.proxyarp.proxyarp_activate(deactivate = False,controller=standbys[0])
1138 time.sleep(3)
1139 for hostip, hostmac in hosts_config:
1140 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1141 time.sleep(1)
ChetanGaonker2099d722016-10-07 15:16:58 -07001142
ChetanGaonker689b3862016-10-17 16:25:01 -07001143 #pass
1144 def test_cluster_with_proxyarp_and_one_member_down(self,hosts=3,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001145 status = self.verify_cluster_status(onos_instances=onos_instances)
1146 assert_equal(status, True)
1147 master, standbys = self.get_cluster_current_master_standbys()
ChetanGaonker689b3862016-10-17 16:25:01 -07001148 assert_equal(len(standbys), (onos_instances-1))
1149 onos_names_ips = self.get_cluster_container_names_ips()
1150 member_onos_name = onos_names_ips[standbys[1]]
1151 self.proxyarp.setUpClass()
1152 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1153 ingress = hosts+1
1154 for hostip, hostmac in hosts_config:
1155 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1156 time.sleep(1)
1157 log.info('killing cluster member %s'%standbys[1])
1158 Container(member_onos_name,Onos.IMAGE).kill()
1159 time.sleep(20)
1160 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=master,verify=True)
1161 assert_equal(status, True)
1162 log.info('cluster came up with %d instances after member down'%(onos_instances-1))
1163 log.info('verifying proxy arp functionality after cluster member down')
1164 for hostip, hostmac in hosts_config:
1165 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1166 time.sleep(1)
1167
1168 #pass
1169 def test_cluster_with_proxyarp_and_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts=10,onos_instances=ONOS_INSTANCES):
1170 status = self.verify_cluster_status(onos_instances=onos_instances)
1171 assert_equal(status, True)
1172 self.proxyarp.setUpClass()
1173 master, standbys = self.get_cluster_current_master_standbys()
1174 assert_equal(len(standbys), (onos_instances-1))
1175 ports_map, egress_map, hosts_config = self.proxyarp.proxyarp_config(hosts = hosts, controller=master)
1176 self.success = True
1177 ingress = hosts+1
1178 ports = range(ingress,ingress+10)
1179 hostmac = []
1180 hostip = []
1181 for ip,mac in hosts_config:
1182 hostmac.append(mac)
1183 hostip.append(ip)
1184 success_dir = {}
1185 def verify_proxyarp(*r):
1186 ingress, hostmac, hostip = r[0],r[1],r[2]
1187 def mac_recv_task():
1188 def recv_cb(pkt):
1189 log.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
1190 success_dir[current_thread().name] = True
1191 sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
1192 prn = recv_cb, iface = self.proxyarp.port_map[ingress])
1193 t = threading.Thread(target = mac_recv_task)
1194 t.start()
1195 pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
1196 log.info('Sending arp request for dest ip %s on interface %s' %
1197 (hostip,self.proxyarp.port_map[ingress]))
1198 sendp(pkt, count = 10,iface = self.proxyarp.port_map[ingress])
1199 t.join()
1200 t = []
1201 for i in range(10):
1202 t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
1203 for i in range(10):
1204 t[i].start()
1205 time.sleep(2)
1206 for i in range(10):
1207 t[i].join()
1208 if len(success_dir) != 10:
1209 self.success = False
1210 assert_equal(self.success, True)
1211
1212 #pass
1213 def test_cluster_with_acl_rule_before_master_change_and_remove_acl_rule_after_master_change(self,onos_instances=ONOS_INSTANCES):
1214 status = self.verify_cluster_status(onos_instances=onos_instances)
1215 assert_equal(status, True)
1216 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001217 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001218 self.acl.setUp()
1219 acl_rule = ACLTest()
1220 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1221 if status is False:
1222 log.info('JSON request returned status %d' %code)
1223 assert_equal(status, True)
1224 result = acl_rule.get_acl_rules(controller=master)
1225 aclRules1 = result.json()['aclRules']
1226 log.info('Added acl rules is %s'%aclRules1)
1227 acl_Id = map(lambda d: d['id'], aclRules1)
1228 log.info('Changing cluster current master from %s to %s'%(master,standbys[0]))
1229 self.change_cluster_current_master(new_master=standbys[0])
1230 status,code = acl_rule.remove_acl_rule(acl_Id[0],controller=standbys[0])
1231 if status is False:
1232 log.info('JSON request returned status %d' %code)
1233 assert_equal(status, True)
1234
1235 #pass
1236 def test_cluster_verifying_acl_rule_in_new_master_after_current_master_is_down(self,onos_instances=ONOS_INSTANCES):
1237 status = self.verify_cluster_status(onos_instances=onos_instances)
1238 assert_equal(status, True)
1239 master,standbys = self.get_cluster_current_master_standbys()
1240 assert_equal(len(standbys),(onos_instances-1))
1241 onos_names_ips = self.get_cluster_container_names_ips()
1242 master_onos_name = onos_names_ips[master]
1243 self.acl.setUp()
1244 acl_rule = ACLTest()
1245 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1246 if status is False:
1247 log.info('JSON request returned status %d' %code)
1248 assert_equal(status, True)
1249 result1 = acl_rule.get_acl_rules(controller=master)
1250 aclRules1 = result1.json()['aclRules']
1251 log.info('Added acl rules is %s'%aclRules1)
1252 acl_Id1 = map(lambda d: d['id'], aclRules1)
1253 log.info('Killing cluster current master %s'%master)
1254 Container(master_onos_name,Onos.IMAGE).kill()
1255 time.sleep(45)
1256 status = self.verify_cluster_status(onos_instances=onos_instances,controller=standbys[0])
1257 assert_equal(status, True)
1258 new_master,standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
1259 assert_equal(len(standbys),(onos_instances-2))
1260 assert_not_equal(new_master,master)
1261 result2 = acl_rule.get_acl_rules(controller=new_master)
1262 aclRules2 = result2.json()['aclRules']
1263 acl_Id2 = map(lambda d: d['id'], aclRules2)
1264 log.info('Acl Ids before and after master down are %s and %s'%(acl_Id1,acl_Id2))
1265 assert_equal(acl_Id2,acl_Id1)
1266
1267 #acl traffic scenario not working as acl rule is not getting added to onos
1268 def test_cluster_with_acl_traffic_before_and_after_two_members_down(self,onos_instances=ONOS_INSTANCES):
1269 status = self.verify_cluster_status(onos_instances=onos_instances)
1270 assert_equal(status, True)
1271 master,standbys = self.get_cluster_current_master_standbys()
1272 assert_equal(len(standbys),(onos_instances-1))
1273 onos_names_ips = self.get_cluster_container_names_ips()
1274 member1_onos_name = onos_names_ips[standbys[0]]
1275 member2_onos_name = onos_names_ips[standbys[1]]
1276 ingress = self.acl.ingress_iface
1277 egress = self.acl.CURRENT_PORT_NUM
1278 acl_rule = ACLTest()
1279 status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.acl.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.acl.HOST_DST_IP)
1280 self.acl.CURRENT_PORT_NUM += 1
1281 time.sleep(5)
1282 if status is False:
1283 log.info('JSON request returned status %d' %code)
1284 assert_equal(status, True)
1285 srcMac = '00:00:00:00:00:11'
1286 dstMac = host_ip_mac[0][1]
1287 self.acl.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1, egress_iface_num = egress )
1288 status, code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'deny',controller=master)
1289 time.sleep(10)
1290 if status is False:
1291 log.info('JSON request returned status %d' %code)
1292 assert_equal(status, True)
1293 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1294 log.info('killing cluster members %s and %s'%(standbys[0],standbys[1]))
1295 Container(member1_onos_name, Onos.IMAGE).kill()
1296 Container(member2_onos_name, Onos.IMAGE).kill()
1297 time.sleep(40)
1298 status = self.verify_cluster_status(onos_instances=onos_instances-2,verify=True,controller=master)
1299 assert_equal(status, True)
1300 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1301 self.acl.acl_hosts_remove(egress_iface_count = 1, egress_iface_num = egress)
1302
1303 #pass
1304 def test_cluster_with_dhcpRelay_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1305 status = self.verify_cluster_status(onos_instances=onos_instances)
1306 assert_equal(status, True)
1307 master,standbys = self.get_cluster_current_master_standbys()
1308 assert_equal(len(standbys),(onos_instances-1))
1309 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001310 mac = self.dhcprelay.get_mac(iface)
1311 self.dhcprelay.host_load(iface)
1312 ##we use the defaults for this test that serves as an example for others
1313 ##You don't need to restart dhcpd server if retaining default config
1314 config = self.dhcprelay.default_config
1315 options = self.dhcprelay.default_options
1316 subnet = self.dhcprelay.default_subnet_config
1317 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1318 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1319 config = config,
1320 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001321 subnet = subnet,
1322 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001323 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
1324 cip, sip = self.dhcprelay.send_recv(mac)
1325 log.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
1326 self.change_master_current_cluster(new_master=standbys[0])
1327 log.info('Releasing ip %s to server %s' %(cip, sip))
1328 assert_equal(self.dhcprelay.dhcp.release(cip), True)
1329 log.info('Triggering DHCP discover again after release')
1330 cip2, sip2 = self.dhcprelay.send_recv(mac)
1331 log.info('Verifying released IP was given back on rediscover')
1332 assert_equal(cip, cip2)
1333 log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
1334 assert_equal(self.dhcprelay.dhcp.release(cip2), True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001335 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001336
ChetanGaonker689b3862016-10-17 16:25:01 -07001337
1338 def test_cluster_with_dhcpRelay_and_verify_dhcp_ip_after_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1339 status = self.verify_cluster_status(onos_instances=onos_instances)
1340 assert_equal(status, True)
1341 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001342 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001343 onos_names_ips = self.get_cluster_container_names_ips()
1344 master_onos_name = onos_names_ips[master]
1345 self.dhcprelay.setUpClass(controller=master)
1346 mac = self.dhcprelay.get_mac(iface)
1347 self.dhcprelay.host_load(iface)
1348 ##we use the defaults for this test that serves as an example for others
1349 ##You don't need to restart dhcpd server if retaining default config
1350 config = self.dhcprelay.default_config
1351 options = self.dhcprelay.default_options
1352 subnet = self.dhcprelay.default_subnet_config
1353 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1354 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1355 config = config,
1356 options = options,
1357 subnet = subnet,
1358 controller=master)
1359 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1360 log.info('Initiating dhcp process from client %s'%mac)
1361 cip, sip = self.dhcprelay.send_recv(mac)
1362 log.info('Killing cluster current master %s'%master)
1363 Container(master_onos_name, Onos.IMAGE).kill()
1364 time.sleep(60)
1365 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=standbys[0])
1366 assert_equal(status, True)
1367 mac = self.dhcprelay.dhcp.get_mac(cip)[0]
1368 log.info("Verifying dhcp clients gets same IP after cluster master restarts")
1369 new_cip, new_sip = self.dhcprelay.dhcp.only_request(cip, mac)
1370 assert_equal(new_cip, cip)
1371 self.dhcprelay.tearDownClass(controller=standbys[0])
1372
1373 #pass
1374 def test_cluster_with_dhcpRelay_and_simulate_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1375 status = self.verify_cluster_status(onos_instances=onos_instances)
1376 assert_equal(status, True)
1377 master,standbys = self.get_cluster_current_master_standbys()
1378 assert_equal(len(standbys),(onos_instances-1))
1379 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001380 macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3']
1381 self.dhcprelay.host_load(iface)
1382 ##we use the defaults for this test that serves as an example for others
1383 ##You don't need to restart dhcpd server if retaining default config
1384 config = self.dhcprelay.default_config
1385 options = self.dhcprelay.default_options
1386 subnet = self.dhcprelay.default_subnet_config
1387 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1388 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1389 config = config,
1390 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001391 subnet = subnet,
1392 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001393 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1394 cip1, sip1 = self.dhcprelay.send_recv(macs[0])
1395 assert_not_equal(cip1,None)
1396 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip1,macs[0],master))
1397 log.info('Changing cluster master from %s to %s'%(master, standbys[0]))
1398 self.change_master_current_cluster(new_master=standbys[0])
1399 cip2, sip2 = self.dhcprelay.send_recv(macs[1])
1400 assert_not_equal(cip2,None)
1401 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[1],standbys[0]))
1402 self.change_master_current_cluster(new_master=master)
1403 log.info('Changing cluster master from %s to %s'%(standbys[0],master))
1404 cip3, sip3 = self.dhcprelay.send_recv(macs[2])
1405 assert_not_equal(cip3,None)
1406 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master))
ChetanGaonker689b3862016-10-17 16:25:01 -07001407 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001408
ChetanGaonker689b3862016-10-17 16:25:01 -07001409 def test_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001410 status = self.verify_cluster_status(onos_instances=onos_instances)
1411 assert_equal(status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001412 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001413 self.subscriber.num_subscribers = 5
1414 self.subscriber.num_channels = 10
1415 for i in [0,1]:
1416 if i == 1:
1417 cord_test_onos_restart()
1418 time.sleep(45)
1419 status = self.verify_cluster_status(onos_instances=onos_instances)
1420 assert_equal(status, True)
1421 log.info('Verifying cord subscriber functionality after cluster restart')
1422 else:
1423 log.info('Verifying cord subscriber functionality before cluster restart')
1424 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1425 num_channels = self.subscriber.num_channels,
1426 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1427 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1428 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1429 self.subscriber.num_channels))
1430 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001431 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001432
ChetanGaonker689b3862016-10-17 16:25:01 -07001433 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1434 def test_cluster_with_cord_subscriber_join_next_channel_before_and_after_cluster_mastership_is_withdrawn(self,onos_instances=ONOS_INSTANCES):
1435 status = self.verify_cluster_status(onos_instances=onos_instances)
1436 assert_equal(status, True)
1437 master,standbys = self.get_cluster_current_master_standbys()
1438 assert_equal(len(standbys),(onos_instances-1))
1439 self.subscriber.setUpClass(controller=master)
1440 self.subscriber.num_subscribers = 5
1441 self.subscriber.num_channels = 10
1442 for i in [0,1]:
1443 if i == 1:
1444 status=self.withdraw_cluster_current_mastership(master_ip=master)
1445 asser_equal(status, True)
1446 master,standbys = self.get_cluster_current_master_standbys()
1447 log.info('verifying cord subscriber functionality after cluster current master withdraw mastership')
1448 else:
1449 log.info('verifying cord subscriber functionality before cluster master withdraw mastership')
1450 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1451 num_channels = self.subscriber.num_channels,
1452 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1453 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1454 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1455 self.subscriber.num_channels),controller=master)
1456 assert_equal(test_status, True)
1457 self.subscriber.tearDownClass(controller=master)
1458
1459 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1460 def test_cluster_with_cord_subscriber_join_recv_traffic_from_10channels_and_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001461 status = self.verify_cluster_status(onos_instances=onos_instances)
1462 assert_equal(status, True)
1463 master, standbys = self.get_cluster_current_master_standbys()
1464 assert_equal(len(standbys),(onos_instances-1))
1465 onos_names_ips = self.get_cluster_container_names_ips()
1466 member_onos_name = onos_names_ips[standbys[0]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001467 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001468 num_subscribers = 1
1469 num_channels = 10
1470 for i in [0,1]:
1471 if i == 1:
1472 cord_test_onos_shutdown(node = member_onos_name)
1473 time.sleep(30)
ChetanGaonker689b3862016-10-17 16:25:01 -07001474 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001475 assert_equal(status, True)
1476 log.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0])
1477 else:
1478 log.info('Verifying cord subscriber functionality before cluster member %s is down'%standbys[0])
1479 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1480 num_channels = num_channels,
1481 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify,
1482 self.subscriber.igmp_verify, self.subscriber.traffic_verify),
1483 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
ChetanGaonker689b3862016-10-17 16:25:01 -07001484 negative_subscriber_auth = 'all',controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001485 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001486 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001487
ChetanGaonker689b3862016-10-17 16:25:01 -07001488 def test_cluster_with_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001489 status = self.verify_cluster_status(onos_instances=onos_instances)
1490 assert_equal(status, True)
1491 master, standbys = self.get_cluster_current_master_standbys()
1492 assert_equal(len(standbys),(onos_instances-1))
1493 onos_names_ips = self.get_cluster_container_names_ips()
1494 member1_onos_name = onos_names_ips[standbys[0]]
1495 member2_onos_name = onos_names_ips[standbys[1]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001496 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001497 num_subscribers = 1
1498 num_channels = 10
1499 for i in [0,1]:
1500 if i == 1:
1501 cord_test_onos_shutdown(node = member1_onos_name)
1502 cord_test_onos_shutdown(node = member2_onos_name)
1503 time.sleep(60)
1504 status = self.verify_cluster_status(onos_instances=onos_instances-2)
1505 assert_equal(status, True)
1506 log.info('Verifying cord subscriber funtionality after cluster two members %s and %s down'%(standbys[0],standbys[1]))
1507 else:
1508 log.info('Verifying cord subscriber funtionality before cluster two members %s and %s down'%(standbys[0],standbys[1]))
1509 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1510 num_channels = num_channels,
1511 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1512 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1513 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
1514 negative_subscriber_auth = 'all')
1515 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001516 self.subscriber.tearDownClass(controller=master)
1517
1518 #pass
1519 def test_cluster_with_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1520 status = self.verify_cluster_status(onos_instances=onos_instances)
1521 assert_equal(status, True)
1522 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1523 for device in device_dict.keys():
1524 log.info("Device is %s"%device_dict[device])
1525 assert_not_equal(device_dict[device]['master'],'none')
1526 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1527 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1528
1529 #pass
1530 def test_cluster_state_in_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1531 status = self.verify_cluster_status(onos_instances=onos_instances)
1532 assert_equal(status, True)
1533 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1534 cluster_ips = self.get_cluster_current_member_ips()
1535 for ip in cluster_ips:
1536 device_dict= self.get_cluster_current_master_standbys_of_connected_devices(controller = ip)
1537 assert_equal(len(device_dict.keys()),onos_instances)
1538 for device in device_dict.keys():
1539 log.info("Device is %s"%device_dict[device])
1540 assert_not_equal(device_dict[device]['master'],'none')
1541 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1542 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1543
1544 #pass
1545 def test_cluster_verifying_multiple_ovs_switches_after_master_is_restarted(self,onos_instances = ONOS_INSTANCES):
1546 status = self.verify_cluster_status(onos_instances=onos_instances)
1547 assert_equal(status, True)
1548 onos_names_ips = self.get_cluster_container_names_ips()
1549 master_count = self.get_number_of_devices_of_master()
1550 log.info('Master count information is %s'%master_count)
1551 total_devices = 0
1552 for master in master_count.keys():
1553 total_devices += master_count[master]['size']
1554 if master_count[master]['size'] != 0:
1555 restart_ip = master
1556 assert_equal(total_devices,onos_instances)
1557 member_onos_name = onos_names_ips[restart_ip]
1558 log.info('Restarting cluster member %s having ip %s'%(member_onos_name,restart_ip))
1559 Container(member_onos_name, Onos.IMAGE).restart()
1560 time.sleep(40)
1561 master_count = self.get_number_of_devices_of_master()
1562 log.info('Master count information after restart is %s'%master_count)
1563 total_devices = 0
1564 for master in master_count.keys():
1565 total_devices += master_count[master]['size']
1566 if master == restart_ip:
1567 assert_equal(master_count[master]['size'], 0)
1568 assert_equal(total_devices,onos_instances)
1569
1570 #pass
1571 def test_cluster_verifying_multiple_ovs_switches_with_one_master_down(self,onos_instances = ONOS_INSTANCES):
1572 status = self.verify_cluster_status(onos_instances=onos_instances)
1573 assert_equal(status, True)
1574 onos_names_ips = self.get_cluster_container_names_ips()
1575 master_count = self.get_number_of_devices_of_master()
1576 log.info('Master count information is %s'%master_count)
1577 total_devices = 0
1578 for master in master_count.keys():
1579 total_devices += master_count[master]['size']
1580 if master_count[master]['size'] != 0:
1581 restart_ip = master
1582 assert_equal(total_devices,onos_instances)
1583 master_onos_name = onos_names_ips[restart_ip]
1584 log.info('Shutting down cluster member %s having ip %s'%(master_onos_name,restart_ip))
1585 Container(master_onos_name, Onos.IMAGE).kill()
1586 time.sleep(40)
1587 for ip in onos_names_ips.keys():
1588 if ip != restart_ip:
1589 controller_ip = ip
1590 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=controller_ip)
1591 assert_equal(status, True)
1592 master_count = self.get_number_of_devices_of_master(controller=controller_ip)
1593 log.info('Master count information after restart is %s'%master_count)
1594 total_devices = 0
1595 for master in master_count.keys():
1596 total_devices += master_count[master]['size']
1597 if master == restart_ip:
1598 assert_equal(master_count[master]['size'], 0)
1599 assert_equal(total_devices,onos_instances)
1600
1601 #pass
1602 def test_cluster_verifying_multiple_ovs_switches_with_current_master_withdrawing_mastership(self,onos_instances = ONOS_INSTANCES):
1603 status = self.verify_cluster_status(onos_instances=onos_instances)
1604 assert_equal(status, True)
1605 master_count = self.get_number_of_devices_of_master()
1606 log.info('Master count information is %s'%master_count)
1607 total_devices = 0
1608 for master in master_count.keys():
1609 total_devices += int(master_count[master]['size'])
1610 if master_count[master]['size'] != 0:
1611 master_ip = master
1612 log.info('Devices of master %s are %s'%(master_count[master]['devices'],master))
1613 device_id = str(master_count[master]['devices'][0])
1614 device_count = master_count[master]['size']
1615 assert_equal(total_devices,onos_instances)
1616 log.info('Withdrawing mastership of device %s for controller %s'%(device_id,master_ip))
1617 status=self.withdraw_cluster_current_mastership(master_ip=master_ip,device_id = device_id)
1618 assert_equal(status, True)
1619 master_count = self.get_number_of_devices_of_master()
1620 log.info('Master count information after cluster mastership withdraw is %s'%master_count)
1621 total_devices = 0
1622 for master in master_count.keys():
1623 total_devices += int(master_count[master]['size'])
1624 if master == master_ip:
1625 assert_equal(master_count[master]['size'], device_count-1)
1626 assert_equal(total_devices,onos_instances)
1627
1628 #pass
1629 def test_cluster_verifying_multiple_ovs_switches_and_restarting_cluster(self,onos_instances = ONOS_INSTANCES):
1630 status = self.verify_cluster_status(onos_instances=onos_instances)
1631 assert_equal(status, True)
1632 master_count = self.get_number_of_devices_of_master()
1633 log.info('Master count information is %s'%master_count)
1634 total_devices = 0
1635 for master in master_count.keys():
1636 total_devices += master_count[master]['size']
1637 assert_equal(total_devices,onos_instances)
1638 log.info('Restarting cluster')
1639 cord_test_onos_restart()
1640 time.sleep(60)
1641 master_count = self.get_number_of_devices_of_master()
1642 log.info('Master count information after restart is %s'%master_count)
1643 total_devices = 0
1644 for master in master_count.keys():
1645 total_devices += master_count[master]['size']
1646 assert_equal(total_devices,onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -07001647