blob: a694bade82e3121a0c1f03bd548749ffad95374b [file] [log] [blame]
ChetanGaonker2099d722016-10-07 15:16:58 -07001#copyright 2016-present Ciena Corporation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14#
15import unittest
16from nose.tools import *
17from scapy.all import *
18from OnosCtrl import OnosCtrl, get_mac
19from OltConfig import OltConfig
20from socket import socket
21from OnosFlowCtrl import OnosFlowCtrl
22from nose.twistedtools import reactor, deferred
23from twisted.internet import defer
24from onosclidriver import OnosCliDriver
25from CordContainer import Container, Onos, Quagga
A.R Karthick2560f042016-11-30 14:38:52 -080026from CordTestServer import cord_test_onos_restart, cord_test_onos_shutdown, cord_test_onos_add_cluster, cord_test_quagga_restart, cord_test_restart_cluster
ChetanGaonker2099d722016-10-07 15:16:58 -070027from portmaps import g_subscriber_port_map
28from scapy.all import *
29import time, monotonic
30import threading
31from threading import current_thread
32from Cluster import *
33from EapTLS import TLSAuthTest
34from ACL import ACLTest
A R Karthick1f908202016-11-16 17:32:20 -080035from OnosLog import OnosLog
36from CordLogger import CordLogger
ChetanGaonker2099d722016-10-07 15:16:58 -070037import os
38import json
39import random
40import collections
41log.setLevel('INFO')
42
A R Karthick1f908202016-11-16 17:32:20 -080043class cluster_exchange(CordLogger):
ChetanGaonker2099d722016-10-07 15:16:58 -070044 test_path = os.path.dirname(os.path.realpath(__file__))
45 onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
46 mac = RandMAC()._fix()
47 flows_eth = Ether(src = RandMAC()._fix(), dst = RandMAC()._fix())
48 igmp_eth = Ether(dst = '01:00:5e:00:00:16', type = ETH_P_IP)
49 igmp_ip = IP(dst = '224.0.0.22')
50 ONOS_INSTANCES = 3
51 V_INF1 = 'veth0'
52 TLS_TIMEOUT = 100
53 device_id = 'of:' + get_mac()
54 igmp = cluster_igmp()
55 igmp_groups = igmp.mcast_ip_range(start_ip = '224.1.8.10',end_ip = '224.1.10.49')
56 igmp_sources = igmp.source_ip_range(start_ip = '38.24.29.35',end_ip='38.24.35.56')
57 tls = cluster_tls()
58 flows = cluster_flows()
59 proxyarp = cluster_proxyarp()
60 vrouter = cluster_vrouter()
61 acl = cluster_acl()
62 dhcprelay = cluster_dhcprelay()
63 subscriber = cluster_subscriber()
A.R Karthick2560f042016-11-30 14:38:52 -080064 testcaseLoggers = ('test_cluster_controller_restarts', 'test_cluster_single_controller_restarts', 'test_cluster_restarts')
A R Karthick1f908202016-11-16 17:32:20 -080065
66 def setUp(self):
67 if self._testMethodName not in self.testcaseLoggers:
68 super(cluster_exchange, self).setUp()
69
70 def tearDown(self):
71 if self._testMethodName not in self.testcaseLoggers:
72 super(cluster_exchange, self).tearDown()
ChetanGaonker2099d722016-10-07 15:16:58 -070073
74 def get_controller(self):
75 controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
76 controller = controller.split(',')[0]
77 return controller
78
A R Karthick1f908202016-11-16 17:32:20 -080079 @classmethod
80 def get_controllers(cls):
81 controllers = os.getenv('ONOS_CONTROLLER_IP') or ''
82 return controllers.split(',')
83
ChetanGaonker2099d722016-10-07 15:16:58 -070084 def cliEnter(self,controller = None):
85 retries = 0
86 while retries < 3:
87 self.cli = OnosCliDriver(controller = controller,connect = True)
88 if self.cli.handle:
89 break
90 else:
91 retries += 1
92 time.sleep(2)
93
94 def cliExit(self):
95 self.cli.disconnect()
96
A R Karthick1f908202016-11-16 17:32:20 -080097 def get_leader(self, controller = None):
98 self.cliEnter(controller = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -080099 try:
100 result = json.loads(self.cli.leaders(jsonFormat = True))
101 except:
102 result = None
103
A R Karthick1f908202016-11-16 17:32:20 -0800104 if result is None:
105 log.info('Leaders command failure for controller %s' %controller)
106 else:
107 log.info('Leaders returned: %s' %result)
108 self.cliExit()
109 return result
110
A R Karthicke14fc022016-12-08 14:50:29 -0800111 def log_set(self, level = None, app = 'org.onosproject', controllers = None):
112 CordLogger.logSet(level = level, app = app, controllers = controllers, forced = True)
A R Karthickef1232d2016-12-07 09:18:15 -0800113
A R Karthick1f908202016-11-16 17:32:20 -0800114 def get_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800115 result_map = {}
116 if controller is None:
117 controller = self.get_controller()
A R Karthick1f908202016-11-16 17:32:20 -0800118 if type(controller) in [ list, tuple ]:
119 for c in controller:
120 leaders = self.get_leader(controller = c)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800121 result_map[c] = leaders
A R Karthick1f908202016-11-16 17:32:20 -0800122 else:
123 leaders = self.get_leader(controller = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800124 result_map[controller] = leaders
125 return result_map
A R Karthick1f908202016-11-16 17:32:20 -0800126
A R Karthickec2db322016-11-17 15:06:01 -0800127 def verify_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800128 leaders_map = self.get_leaders(controller = controller)
129 failed = [ k for k,v in leaders_map.items() if v == None ]
A R Karthickec2db322016-11-17 15:06:01 -0800130 return failed
131
ChetanGaonker2099d722016-10-07 15:16:58 -0700132 def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
133 tries = 0
134 try:
135 self.cliEnter(controller = controller)
136 while tries <= 10:
137 cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
138 if cluster_summary:
139 log.info("cluster 'summary' command output is %s"%cluster_summary)
140 nodes = cluster_summary['nodes']
141 if verify:
142 if nodes == onos_instances:
143 self.cliExit()
144 return True
145 else:
146 tries += 1
147 time.sleep(1)
148 else:
149 if nodes >= onos_instances:
150 self.cliExit()
151 return True
152 else:
153 tries += 1
154 time.sleep(1)
155 else:
156 tries += 1
157 time.sleep(1)
158 self.cliExit()
159 return False
160 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700161 raise Exception('Failed to get cluster members')
162 return False
ChetanGaonker2099d722016-10-07 15:16:58 -0700163
A.R Karthick45ab3e12016-11-30 11:25:51 -0800164 def get_cluster_current_member_ips(self, controller = None, nodes_filter = None):
ChetanGaonker2099d722016-10-07 15:16:58 -0700165 tries = 0
166 cluster_ips = []
167 try:
168 self.cliEnter(controller = controller)
169 while tries <= 10:
170 cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
171 if cluster_nodes:
172 log.info("cluster 'nodes' output is %s"%cluster_nodes)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800173 if nodes_filter:
174 cluster_nodes = nodes_filter(cluster_nodes)
ChetanGaonker2099d722016-10-07 15:16:58 -0700175 cluster_ips = map(lambda c: c['id'], cluster_nodes)
176 self.cliExit()
177 cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
178 return cluster_ips
179 else:
180 tries += 1
181 self.cliExit()
182 return cluster_ips
183 except:
184 raise Exception('Failed to get cluster members')
185 return cluster_ips
186
ChetanGaonker689b3862016-10-17 16:25:01 -0700187 def get_cluster_container_names_ips(self,controller=None):
A R Karthick1f908202016-11-16 17:32:20 -0800188 onos_names_ips = {}
189 onos_ips = self.get_cluster_current_member_ips(controller=controller)
190 onos_names_ips[onos_ips[0]] = Onos.NAME
191 onos_names_ips[Onos.NAME] = onos_ips[0]
192 for i in range(1,len(onos_ips)):
193 name = '{0}-{1}'.format(Onos.NAME,i+1)
194 onos_names_ips[onos_ips[i]] = name
195 onos_names_ips[name] = onos_ips[i]
ChetanGaonker2099d722016-10-07 15:16:58 -0700196
197 return onos_names_ips
198
199 #identifying current master of a connected device, not tested
200 def get_cluster_current_master_standbys(self,controller=None,device_id=device_id):
201 master = None
202 standbys = []
203 tries = 0
204 try:
205 cli = self.cliEnter(controller = controller)
206 while tries <= 10:
207 roles = json.loads(self.cli.roles(jsonFormat = True))
208 log.info("cluster 'roles' command output is %s"%roles)
209 if roles:
210 for device in roles:
211 log.info('Verifying device info in line %s'%device)
212 if device['id'] == device_id:
213 master = str(device['master'])
214 standbys = map(lambda d: str(d), device['standbys'])
215 log.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
216 self.cliExit()
217 return master, standbys
218 self.cliExit()
219 return master, standbys
220 else:
221 tries += 1
ChetanGaonker689b3862016-10-17 16:25:01 -0700222 time.sleep(1)
223 self.cliExit()
224 return master,standbys
225 except:
226 raise Exception('Failed to get cluster members')
227 return master,standbys
228
229 def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
230 ''' returns master and standbys of all the connected devices to ONOS cluster instance'''
231 device_dict = {}
232 tries = 0
233 try:
234 cli = self.cliEnter(controller = controller)
235 while tries <= 10:
236 device_dict = {}
237 roles = json.loads(self.cli.roles(jsonFormat = True))
238 log.info("cluster 'roles' command output is %s"%roles)
239 if roles:
240 for device in roles:
241 device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
242 for i in range(len(device_dict[device['id']]['standbys'])):
243 device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
244 log.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
245 self.cliExit()
246 return device_dict
247 else:
248 tries += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700249 time.sleep(1)
250 self.cliExit()
ChetanGaonker689b3862016-10-17 16:25:01 -0700251 return device_dict
252 except:
253 raise Exception('Failed to get cluster members')
254 return device_dict
255
256 #identify current master of a connected device, not tested
257 def get_cluster_connected_devices(self,controller=None):
258 '''returns all the devices connected to ONOS cluster'''
259 device_list = []
260 tries = 0
261 try:
262 cli = self.cliEnter(controller = controller)
263 while tries <= 10:
264 device_list = []
265 devices = json.loads(self.cli.devices(jsonFormat = True))
266 log.info("cluster 'devices' command output is %s"%devices)
267 if devices:
268 for device in devices:
269 log.info('device id is %s'%device['id'])
270 device_list.append(str(device['id']))
271 self.cliExit()
272 return device_list
273 else:
274 tries += 1
275 time.sleep(1)
276 self.cliExit()
277 return device_list
278 except:
279 raise Exception('Failed to get cluster members')
280 return device_list
281
282 def get_number_of_devices_of_master(self,controller=None):
283 '''returns master-device pairs, which master having what devices'''
284 master_count = {}
285 try:
286 cli = self.cliEnter(controller = controller)
287 masters = json.loads(self.cli.masters(jsonFormat = True))
288 if masters:
289 for master in masters:
290 master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
291 return master_count
292 else:
293 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700294 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700295 raise Exception('Failed to get cluster members')
296 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700297
298 def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None):
299 if new_master is None: return False
ChetanGaonker689b3862016-10-17 16:25:01 -0700300 self.cliEnter(controller=controller)
ChetanGaonker2099d722016-10-07 15:16:58 -0700301 cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
302 command = self.cli.command(cmd = cmd, jsonFormat = False)
303 self.cliExit()
304 time.sleep(60)
305 master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
306 assert_equal(master,new_master)
307 log.info('Cluster master changed to %s successfully'%new_master)
308
ChetanGaonker689b3862016-10-17 16:25:01 -0700309 def withdraw_cluster_current_mastership(self,master_ip=None,device_id=device_id,controller=None):
310 '''current master looses its mastership and hence new master will be elected'''
311 self.cliEnter(controller=controller)
312 cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
313 command = self.cli.command(cmd = cmd, jsonFormat = False)
314 self.cliExit()
315 time.sleep(60)
316 new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
317 assert_not_equal(new_master_ip,master_ip)
318 log.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
319 log.info('Cluster new master is %s'%new_master_ip)
320 return True
321
A R Karthickec2db322016-11-17 15:06:01 -0800322 def test_cluster_controller_restarts(self):
A R Karthick1f908202016-11-16 17:32:20 -0800323 '''Test the cluster by repeatedly killing the controllers'''
324 controllers = self.get_controllers()
325 ctlr_len = len(controllers)
326 if ctlr_len <= 1:
327 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
328 assert_greater(ctlr_len, 1)
329
330 #this call would verify the cluster for once
331 onos_map = self.get_cluster_container_names_ips()
332
A R Karthickec2db322016-11-17 15:06:01 -0800333 def check_exception(controller = None):
A R Karthick1f908202016-11-16 17:32:20 -0800334 adjacent_controller = None
335 adjacent_controllers = None
336 if controller:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800337 adjacent_controllers = list(set(controllers) - set([controller]))
338 adjacent_controller = adjacent_controllers[0]
A R Karthick1f908202016-11-16 17:32:20 -0800339 for node in controllers:
340 onosLog = OnosLog(host = node)
341 ##check the logs for storage exception
342 _, output = onosLog.get_log(('ERROR', 'Exception',))
A R Karthickec2db322016-11-17 15:06:01 -0800343 if output and output.find('StorageException$Timeout') >= 0:
344 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
345 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
346 log.info('\n' + '-' * 50 + '\n')
A R Karthick1f908202016-11-16 17:32:20 -0800347 log.info('%s' %output)
A R Karthickec2db322016-11-17 15:06:01 -0800348 log.info('\n' + '-' * 50 + '\n')
349 failed = self.verify_leaders(controllers)
350 if failed:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800351 log.info('Leaders command failed on nodes: %s' %failed)
A R Karthickec2db322016-11-17 15:06:01 -0800352 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800353 return controller
354
355 try:
A R Karthickec2db322016-11-17 15:06:01 -0800356 ips = self.get_cluster_current_member_ips(controller = adjacent_controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800357 log.info('ONOS cluster formed with controllers: %s' %ips)
A R Karthick1f908202016-11-16 17:32:20 -0800358 st = True
359 except:
360 st = False
361
A R Karthickec2db322016-11-17 15:06:01 -0800362 failed = self.verify_leaders(controllers)
A R Karthick1f908202016-11-16 17:32:20 -0800363 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800364 if st is False:
365 log.info('No storage exception and ONOS cluster was not formed successfully')
366 else:
367 controller = None
368
369 return controller
370
371 next_controller = None
372 tries = 10
373 for num in range(tries):
374 index = num % ctlr_len
375 #index = random.randrange(0, ctlr_len)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800376 controller_name = onos_map[controllers[index]] if next_controller is None else onos_map[next_controller]
377 controller = onos_map[controller_name]
378 log.info('ITERATION: %d. Restarting Controller %s' %(num + 1, controller_name))
A R Karthick1f908202016-11-16 17:32:20 -0800379 try:
A R Karthickef1232d2016-12-07 09:18:15 -0800380 #enable debug log for the other controllers before restarting this controller
A R Karthicke14fc022016-12-08 14:50:29 -0800381 adjacent_controllers = list( set(controllers) - set([controller]) )
382 self.log_set(controllers = adjacent_controllers)
383 self.log_set(app = 'io.atomix', controllers = adjacent_controllers)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800384 cord_test_onos_restart(node = controller_name, timeout = 0)
A R Karthicke14fc022016-12-08 14:50:29 -0800385 self.log_set(controllers = controller)
386 self.log_set(app = 'io.atomix', controllers = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800387 time.sleep(60)
A R Karthick1f908202016-11-16 17:32:20 -0800388 except:
389 time.sleep(5)
390 continue
A R Karthickec2db322016-11-17 15:06:01 -0800391 next_controller = check_exception(controller = controller)
A R Karthick1f908202016-11-16 17:32:20 -0800392
A.R Karthick45ab3e12016-11-30 11:25:51 -0800393 def test_cluster_single_controller_restarts(self):
394 '''Test the cluster by repeatedly restarting the same controller'''
395 controllers = self.get_controllers()
396 ctlr_len = len(controllers)
397 if ctlr_len <= 1:
398 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
399 assert_greater(ctlr_len, 1)
400
401 #this call would verify the cluster for once
402 onos_map = self.get_cluster_container_names_ips()
403
404 def check_exception(controller, inclusive = False):
405 adjacent_controllers = list(set(controllers) - set([controller]))
406 adjacent_controller = adjacent_controllers[0]
407 controller_list = adjacent_controllers if inclusive == False else controllers
408 storage_exceptions = []
409 for node in controller_list:
410 onosLog = OnosLog(host = node)
411 ##check the logs for storage exception
412 _, output = onosLog.get_log(('ERROR', 'Exception',))
413 if output and output.find('StorageException$Timeout') >= 0:
414 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
415 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
416 log.info('\n' + '-' * 50 + '\n')
417 log.info('%s' %output)
418 log.info('\n' + '-' * 50 + '\n')
419 storage_exceptions.append(node)
420
421 failed = self.verify_leaders(controller_list)
422 if failed:
423 log.info('Leaders command failed on nodes: %s' %failed)
424 if storage_exceptions:
425 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
426 assert_equal(len(failed), 0)
427 return controller
428
429 for ctlr in controller_list:
430 ips = self.get_cluster_current_member_ips(controller = ctlr,
431 nodes_filter = \
432 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
433 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
434 if controller in ips and inclusive is False:
435 log.info('Controller %s still ACTIVE on Node %s after it was shutdown' %(controller, ctlr))
436 if controller not in ips and inclusive is True:
437 log.info('Controller %s still INACTIVE on Node %s after it was shutdown' %(controller, ctlr))
438
439 return controller
440
441 tries = 10
442 #chose a random controller for shutdown/restarts
443 controller = controllers[random.randrange(0, ctlr_len)]
444 controller_name = onos_map[controller]
445 for num in range(tries):
446 index = num % ctlr_len
447 log.info('ITERATION: %d. Shutting down Controller %s' %(num + 1, controller_name))
448 try:
449 cord_test_onos_shutdown(node = controller_name)
450 time.sleep(20)
451 except:
452 time.sleep(5)
453 continue
454 #check for exceptions on the adjacent nodes
455 check_exception(controller)
456 #Now restart the controller back
457 log.info('Restarting back the controller %s' %controller_name)
458 cord_test_onos_restart(node = controller_name)
459 time.sleep(60)
460 check_exception(controller, inclusive = True)
461
A.R Karthick2560f042016-11-30 14:38:52 -0800462 def test_cluster_restarts(self):
463 '''Test the cluster by repeatedly restarting the entire cluster'''
464 controllers = self.get_controllers()
465 ctlr_len = len(controllers)
466 if ctlr_len <= 1:
467 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
468 assert_greater(ctlr_len, 1)
469
470 #this call would verify the cluster for once
471 onos_map = self.get_cluster_container_names_ips()
472
473 def check_exception():
474 controller_list = controllers
475 storage_exceptions = []
476 for node in controller_list:
477 onosLog = OnosLog(host = node)
478 ##check the logs for storage exception
479 _, output = onosLog.get_log(('ERROR', 'Exception',))
480 if output and output.find('StorageException$Timeout') >= 0:
481 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
482 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
483 log.info('\n' + '-' * 50 + '\n')
484 log.info('%s' %output)
485 log.info('\n' + '-' * 50 + '\n')
486 storage_exceptions.append(node)
487
488 failed = self.verify_leaders(controller_list)
489 if failed:
490 log.info('Leaders command failed on nodes: %s' %failed)
491 if storage_exceptions:
492 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
493 assert_equal(len(failed), 0)
494 return
495
496 for ctlr in controller_list:
497 ips = self.get_cluster_current_member_ips(controller = ctlr,
498 nodes_filter = \
499 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
500 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
501 assert_equal(len(ips), len(controllers))
502
503 tries = 10
504 for num in range(tries):
505 log.info('ITERATION: %d. Restarting cluster with controllers at %s' %(num+1, controllers))
506 try:
507 cord_test_restart_cluster()
508 log.info('Delaying before verifying cluster status')
509 time.sleep(60)
510 except:
511 time.sleep(10)
512 continue
513 #check for exceptions on the adjacent nodes
514 check_exception()
515
ChetanGaonker2099d722016-10-07 15:16:58 -0700516 #pass
ChetanGaonker689b3862016-10-17 16:25:01 -0700517 def test_cluster_formation_and_verification(self,onos_instances = ONOS_INSTANCES):
518 status = self.verify_cluster_status(onos_instances = onos_instances)
519 assert_equal(status, True)
520 log.info('Cluster exists with %d ONOS instances'%onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -0700521
522 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700523 def test_cluster_adding_members(self, add = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700524 status = self.verify_cluster_status(onos_instances = onos_instances)
525 assert_equal(status, True)
526 onos_ips = self.get_cluster_current_member_ips()
527 onos_instances = len(onos_ips)+add
528 log.info('Adding %d nodes to the ONOS cluster' %add)
529 cord_test_onos_add_cluster(count = add)
530 status = self.verify_cluster_status(onos_instances=onos_instances)
531 assert_equal(status, True)
532
ChetanGaonker689b3862016-10-17 16:25:01 -0700533 def test_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700534 status = self.verify_cluster_status(onos_instances = onos_instances)
535 assert_equal(status, True)
536 master, standbys = self.get_cluster_current_master_standbys()
537 assert_equal(len(standbys),(onos_instances-1))
538 onos_names_ips = self.get_cluster_container_names_ips()
539 master_onos_name = onos_names_ips[master]
540 log.info('Removing cluster current master %s'%(master))
541 cord_test_onos_shutdown(node = master_onos_name)
542 time.sleep(60)
543 onos_instances -= 1
544 status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0])
545 assert_equal(status, True)
546 new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
547 assert_not_equal(master,new_master)
ChetanGaonker689b3862016-10-17 16:25:01 -0700548 log.info('Successfully removed clusters master instance')
ChetanGaonker2099d722016-10-07 15:16:58 -0700549
ChetanGaonker689b3862016-10-17 16:25:01 -0700550 def test_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700551 status = self.verify_cluster_status(onos_instances = onos_instances)
552 assert_equal(status, True)
553 master, standbys = self.get_cluster_current_master_standbys()
554 assert_equal(len(standbys),(onos_instances-1))
555 onos_names_ips = self.get_cluster_container_names_ips()
556 member_onos_name = onos_names_ips[standbys[0]]
557 log.info('Removing cluster member %s'%standbys[0])
558 cord_test_onos_shutdown(node = member_onos_name)
559 time.sleep(60)
560 onos_instances -= 1
561 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
562 assert_equal(status, True)
563
ChetanGaonker689b3862016-10-17 16:25:01 -0700564 def test_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700565 status = self.verify_cluster_status(onos_instances = onos_instances)
566 assert_equal(status, True)
567 master, standbys = self.get_cluster_current_master_standbys()
568 assert_equal(len(standbys),(onos_instances-1))
569 onos_names_ips = self.get_cluster_container_names_ips()
570 member1_onos_name = onos_names_ips[standbys[0]]
571 member2_onos_name = onos_names_ips[standbys[1]]
572 log.info('Removing cluster member %s'%standbys[0])
573 cord_test_onos_shutdown(node = member1_onos_name)
574 log.info('Removing cluster member %s'%standbys[1])
575 cord_test_onos_shutdown(node = member2_onos_name)
576 time.sleep(60)
577 onos_instances = onos_instances - 2
578 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
579 assert_equal(status, True)
580
ChetanGaonker689b3862016-10-17 16:25:01 -0700581 def test_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700582 status = self.verify_cluster_status(onos_instances = onos_instances)
583 assert_equal(status, True)
584 master, standbys = self.get_cluster_current_master_standbys()
585 assert_equal(len(standbys),(onos_instances-1))
586 onos_names_ips = self.get_cluster_container_names_ips()
587 for i in range(remove):
588 member_onos_name = onos_names_ips[standbys[i]]
589 log.info('Removing onos container with name %s'%standbys[i])
590 cord_test_onos_shutdown(node = member_onos_name)
591 time.sleep(60)
592 onos_instances = onos_instances - remove
593 status = self.verify_cluster_status(onos_instances = onos_instances, controller=master)
594 assert_equal(status, True)
595
596 #nottest test cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700597 def test_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES , add = 2, remove = 2):
ChetanGaonker2099d722016-10-07 15:16:58 -0700598 status = self.verify_cluster_status(onos_instances = onos_instances)
599 assert_equal(status, True)
600 onos_ips = self.get_cluster_current_member_ips()
601 onos_instances = len(onos_ips)+add
602 log.info('Adding %d ONOS instances to the cluster'%add)
603 cord_test_onos_add_cluster(count = add)
604 status = self.verify_cluster_status(onos_instances=onos_instances)
605 assert_equal(status, True)
606 log.info('Removing %d ONOS instances from the cluster'%remove)
607 for i in range(remove):
608 name = '{}-{}'.format(Onos.NAME, onos_instances - i)
609 log.info('Removing onos container with name %s'%name)
610 cord_test_onos_shutdown(node = name)
611 time.sleep(60)
612 onos_instances = onos_instances-remove
613 status = self.verify_cluster_status(onos_instances=onos_instances)
614 assert_equal(status, True)
615
616 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700617 def test_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
ChetanGaonker2099d722016-10-07 15:16:58 -0700618 status = self.verify_cluster_status(onos_instances = onos_instances)
619 assert_equal(status, True)
620 onos_ips = self.get_cluster_current_member_ips()
621 onos_instances = onos_instances-remove
622 log.info('Removing %d ONOS instances from the cluster'%remove)
623 for i in range(remove):
624 name = '{}-{}'.format(Onos.NAME, len(onos_ips)-i)
625 log.info('Removing onos container with name %s'%name)
626 cord_test_onos_shutdown(node = name)
627 time.sleep(60)
628 status = self.verify_cluster_status(onos_instances=onos_instances)
629 assert_equal(status, True)
630 log.info('Adding %d ONOS instances to the cluster'%add)
631 cord_test_onos_add_cluster(count = add)
632 onos_instances = onos_instances+add
633 status = self.verify_cluster_status(onos_instances=onos_instances)
634 assert_equal(status, True)
635
ChetanGaonker689b3862016-10-17 16:25:01 -0700636 def test_cluster_restart(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700637 status = self.verify_cluster_status(onos_instances = onos_instances)
638 assert_equal(status, True)
639 log.info('Restarting cluster')
640 cord_test_onos_restart()
641 status = self.verify_cluster_status(onos_instances = onos_instances)
642 assert_equal(status, True)
643
ChetanGaonker689b3862016-10-17 16:25:01 -0700644 def test_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700645 status = self.verify_cluster_status(onos_instances = onos_instances)
646 assert_equal(status, True)
647 master, standbys = self.get_cluster_current_master_standbys()
648 onos_names_ips = self.get_cluster_container_names_ips()
649 master_onos_name = onos_names_ips[master]
650 log.info('Restarting cluster master %s'%master)
651 cord_test_onos_restart(node = master_onos_name)
652 status = self.verify_cluster_status(onos_instances = onos_instances)
653 assert_equal(status, True)
654 log.info('Cluster came up after master restart as expected')
655
656 #test fail. master changing after restart. Need to check correct behavior.
ChetanGaonker689b3862016-10-17 16:25:01 -0700657 def test_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700658 status = self.verify_cluster_status(onos_instances = onos_instances)
659 assert_equal(status, True)
660 master1, standbys = self.get_cluster_current_master_standbys()
661 onos_names_ips = self.get_cluster_container_names_ips()
662 master_onos_name = onos_names_ips[master1]
663 log.info('Restarting cluster master %s'%master)
664 cord_test_onos_restart(node = master_onos_name)
665 status = self.verify_cluster_status(onos_instances = onos_instances)
666 assert_equal(status, True)
667 master2, standbys = self.get_cluster_current_master_standbys()
668 assert_equal(master1,master2)
669 log.info('Cluster master is same before and after cluster master restart as expected')
670
ChetanGaonker689b3862016-10-17 16:25:01 -0700671 def test_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700672 status = self.verify_cluster_status(onos_instances = onos_instances)
673 assert_equal(status, True)
674 master, standbys = self.get_cluster_current_master_standbys()
675 assert_equal(len(standbys),(onos_instances-1))
676 onos_names_ips = self.get_cluster_container_names_ips()
677 member_onos_name = onos_names_ips[standbys[0]]
678 log.info('Restarting cluster member %s'%standbys[0])
679 cord_test_onos_restart(node = member_onos_name)
680 status = self.verify_cluster_status(onos_instances = onos_instances)
681 assert_equal(status, True)
682 log.info('Cluster came up as expected after restarting one member')
683
ChetanGaonker689b3862016-10-17 16:25:01 -0700684 def test_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700685 status = self.verify_cluster_status(onos_instances = onos_instances)
686 assert_equal(status, True)
687 master, standbys = self.get_cluster_current_master_standbys()
688 assert_equal(len(standbys),(onos_instances-1))
689 onos_names_ips = self.get_cluster_container_names_ips()
690 member1_onos_name = onos_names_ips[standbys[0]]
691 member2_onos_name = onos_names_ips[standbys[1]]
692 log.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1]))
693 cord_test_onos_restart(node = member1_onos_name)
694 cord_test_onos_restart(node = member2_onos_name)
695 status = self.verify_cluster_status(onos_instances = onos_instances)
696 assert_equal(status, True)
697 log.info('Cluster came up as expected after restarting two members')
698
ChetanGaonker689b3862016-10-17 16:25:01 -0700699 def test_cluster_state_with_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700700 status = self.verify_cluster_status(onos_instances = onos_instances)
701 assert_equal(status,True)
702 master, standbys = self.get_cluster_current_master_standbys()
703 assert_equal(len(standbys),(onos_instances-1))
704 onos_names_ips = self.get_cluster_container_names_ips()
705 for i in range(members):
706 member_onos_name = onos_names_ips[standbys[i]]
707 log.info('Restarting cluster member %s'%standbys[i])
708 cord_test_onos_restart(node = member_onos_name)
709
710 status = self.verify_cluster_status(onos_instances = onos_instances)
711 assert_equal(status, True)
712 log.info('Cluster came up as expected after restarting %d members'%members)
713
ChetanGaonker689b3862016-10-17 16:25:01 -0700714 def test_cluster_state_with_master_change(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700715 status = self.verify_cluster_status(onos_instances=onos_instances)
716 assert_equal(status, True)
717 master, standbys = self.get_cluster_current_master_standbys()
718 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -0700719 log.info('Cluster current master of devices is %s'%master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700720 self.change_master_current_cluster(new_master=standbys[0])
721 log.info('Cluster master changed successfully')
722
723 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700724 def test_cluster_with_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700725 status = self.verify_cluster_status(onos_instances = onos_instances)
726 assert_equal(status, True)
727 onos_ips = self.get_cluster_current_member_ips()
728 self.vrouter.setUpClass()
729 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
730 assert_equal(res, True)
731 for onos_ip in onos_ips:
732 tries = 0
733 flag = False
734 try:
735 self.cliEnter(controller = onos_ip)
736 while tries <= 5:
737 routes = json.loads(self.cli.routes(jsonFormat = True))
738 if routes:
739 assert_equal(len(routes['routes4']), networks)
740 self.cliExit()
741 flag = True
742 break
743 else:
744 tries += 1
745 time.sleep(1)
746 assert_equal(flag, True)
747 except:
748 log.info('Exception occured while checking routes in onos instance %s'%onos_ip)
749 raise
750
751 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700752 def test_cluster_with_vrouter_and_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700753 status = self.verify_cluster_status(onos_instances = onos_instances)
754 assert_equal(status, True)
755 onos_ips = self.get_cluster_current_member_ips()
756 master, standbys = self.get_cluster_current_master_standbys()
757 onos_names_ips = self.get_cluster_container_names_ips()
758 master_onos_name = onos_names_ips[master]
759 self.vrouter.setUpClass()
760 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
761 assert_equal(res,True)
762 cord_test_onos_shutdown(node = master_onos_name)
763 time.sleep(60)
ChetanGaonker689b3862016-10-17 16:25:01 -0700764 log.info('Verifying vrouter traffic after cluster master is down')
ChetanGaonker2099d722016-10-07 15:16:58 -0700765 self.vrouter.vrouter_traffic_verify()
766
767 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700768 def test_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700769 status = self.verify_cluster_status(onos_instances = onos_instances)
770 assert_equal(status, True)
771 onos_ips = self.get_cluster_current_member_ips()
772 master, standbys = self.get_cluster_current_master_standbys()
773 onos_names_ips = self.get_cluster_container_names_ips()
774 master_onos_name = onos_names_ips[master]
775 self.vrouter.setUpClass()
776 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
777 assert_equal(res, True)
778 cord_test_onos_restart()
779 self.vrouter.vrouter_traffic_verify()
780
781 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700782 def test_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700783 status = self.verify_cluster_status(onos_instances = onos_instances)
784 assert_equal(status, True)
785 self.vrouter.setUpClass()
786 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
787 assert_equal(res, True)
788 self.vrouter.vrouter_activate(deactivate=True)
789 time.sleep(15)
790 self.vrouter.vrouter_traffic_verify(positive_test=False)
791 self.vrouter.vrouter_activate(deactivate=False)
792
793 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700794 def test_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700795 status = self.verify_cluster_status(onos_instances = onos_instances)
796 assert_equal(status, True)
797 master, standbys = self.get_cluster_current_master_standbys()
798 onos_names_ips = self.get_cluster_container_names_ips()
799 master_onos_name = onos_names_ips[master]
800 self.vrouter.setUpClass()
801 log.info('Verifying vrouter before master down')
802 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
803 assert_equal(res, True)
804 self.vrouter.vrouter_activate(deactivate=True)
805 log.info('Verifying vrouter traffic after app deactivated')
806 time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down
807 self.vrouter.vrouter_traffic_verify(positive_test=False)
808 log.info('Verifying vrouter traffic after master down')
809 cord_test_onos_shutdown(node = master_onos_name)
810 time.sleep(60)
811 self.vrouter.vrouter_traffic_verify(positive_test=False)
812 self.vrouter.vrouter_activate(deactivate=False)
813
814 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700815 def test_cluster_for_vrouter_app_and_making_member_down(self, networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700816 status = self.verify_cluster_status(onos_instances = onos_instances)
817 assert_equal(status, True)
818 master, standbys = self.get_cluster_current_master_standbys()
819 onos_names_ips = self.get_cluster_container_names_ips()
820 member_onos_name = onos_names_ips[standbys[0]]
821 self.vrouter.setUpClass()
822 log.info('Verifying vrouter before cluster member down')
823 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
824 assert_equal(res, True) # Expecting vrouter should work properly
825 log.info('Verifying vrouter after cluster member down')
826 cord_test_onos_shutdown(node = member_onos_name)
827 time.sleep(60)
828 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
829
830 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700831 def test_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700832 status = self.verify_cluster_status(onos_instances = onos_instances)
833 assert_equal(status, True)
834 master, standbys = self.get_cluster_current_master_standbys()
835 onos_names_ips = self.get_cluster_container_names_ips()
836 member_onos_name = onos_names_ips[standbys[1]]
837 self.vrouter.setUpClass()
838 log.info('Verifying vrouter traffic before cluster member restart')
839 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
840 assert_equal(res, True) # Expecting vrouter should work properly
841 cord_test_onos_restart(node = member_onos_name)
842 log.info('Verifying vrouter traffic after cluster member restart')
843 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
844
845 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700846 def test_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700847 status = self.verify_cluster_status(onos_instances = onos_instances)
848 assert_equal(status, True)
849 self.vrouter.setUpClass()
850 log.info('Verifying vrouter traffic before cluster restart')
851 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
852 assert_equal(res, True) # Expecting vrouter should work properly
853 cord_test_onos_restart()
854 log.info('Verifying vrouter traffic after cluster restart')
855 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
856
857
858 #test fails because flow state is in pending_add in onos
ChetanGaonker689b3862016-10-17 16:25:01 -0700859 def test_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700860 status = self.verify_cluster_status(onos_instances = onos_instances)
861 assert_equal(status, True)
862 master, standbys = self.get_cluster_current_master_standbys()
863 onos_names_ips = self.get_cluster_container_names_ips()
864 master_onos_name = onos_names_ips[master]
865 self.flows.setUpClass()
866 egress = 1
867 ingress = 2
868 egress_map = { 'ip': '192.168.30.1', 'udp_port': 9500 }
869 ingress_map = { 'ip': '192.168.40.1', 'udp_port': 9000 }
870 flow = OnosFlowCtrl(deviceId = self.device_id,
871 egressPort = egress,
872 ingressPort = ingress,
873 udpSrc = ingress_map['udp_port'],
874 udpDst = egress_map['udp_port'],
875 controller=master
876 )
877 result = flow.addFlow()
878 assert_equal(result, True)
879 time.sleep(1)
880 self.success = False
881 def mac_recv_task():
882 def recv_cb(pkt):
883 log.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
884 self.success = True
885 sniff(timeout=2,
886 lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
887 and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.flows.port_map[egress])
888
889 for i in [0,1]:
890 if i == 1:
891 cord_test_onos_shutdown(node = master_onos_name)
892 log.info('Verifying flows traffic after master killed')
893 time.sleep(45)
894 else:
895 log.info('Verifying flows traffic before master killed')
896 t = threading.Thread(target = mac_recv_task)
897 t.start()
898 L2 = self.flows_eth #Ether(src = ingress_map['ether'], dst = egress_map['ether'])
899 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
900 L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
901 pkt = L2/L3/L4
902 log.info('Sending packets to verify if flows are correct')
903 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
904 t.join()
905 assert_equal(self.success, True)
906
ChetanGaonker689b3862016-10-17 16:25:01 -0700907 def test_cluster_state_changing_master_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700908 status = self.verify_cluster_status(onos_instances=onos_instances)
909 assert_equal(status, True)
910 master, standbys = self.get_cluster_current_master_standbys()
911 self.flows.setUpClass()
912 egress = 1
913 ingress = 2
914 egress_map = { 'ip': '192.168.30.1' }
915 ingress_map = { 'ip': '192.168.40.1' }
916 flow = OnosFlowCtrl(deviceId = self.device_id,
917 egressPort = egress,
918 ingressPort = ingress,
919 ecn = 1,
920 controller=master
921 )
922 result = flow.addFlow()
923 assert_equal(result, True)
924 ##wait for flows to be added to ONOS
925 time.sleep(1)
926 self.success = False
927 def mac_recv_task():
928 def recv_cb(pkt):
929 log.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
930 self.success = True
931 sniff(count=2, timeout=5,
932 lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
933 and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb,
934 iface = self.flows.port_map[egress])
935 for i in [0,1]:
936 if i == 1:
937 log.info('Changing cluster master to %s'%standbys[0])
938 self.change_master_current_cluster(new_master=standbys[0])
939 log.info('Verifying flow traffic after cluster master chnaged')
940 else:
941 log.info('Verifying flow traffic before cluster master changed')
942 t = threading.Thread(target = mac_recv_task)
943 t.start()
944 L2 = self.flows_eth # Ether(src = ingress_map['ether'], dst = egress_map['ether'])
945 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1)
946 pkt = L2/L3
947 log.info('Sending a packet to verify if flows are correct')
948 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
949 t.join()
950 assert_equal(self.success, True)
951
ChetanGaonker689b3862016-10-17 16:25:01 -0700952 #pass
953 def test_cluster_flow_for_ipv6_extension_header_and_master_restart(self,onos_instances = ONOS_INSTANCES):
954 status = self.verify_cluster_status(onos_instances=onos_instances)
955 assert_equal(status, True)
956 master,standbys = self.get_cluster_current_master_standbys()
957 onos_names_ips = self.get_cluster_container_names_ips()
958 master_onos_name = onos_names_ips[master]
959 self.flows.setUpClass()
960 egress = 1
961 ingress = 2
962 egress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
963 ingress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
964 flow = OnosFlowCtrl(deviceId = self.device_id,
965 egressPort = egress,
966 ingressPort = ingress,
967 ipv6_extension = 0,
968 controller=master
969 )
970
971 result = flow.addFlow()
972 assert_equal(result, True)
973 ##wait for flows to be added to ONOS
974 time.sleep(1)
975 self.success = False
976 def mac_recv_task():
977 def recv_cb(pkt):
978 log.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s'%(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
979 self.success = True
980 sniff(timeout=2,count=5,
981 lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.flows.port_map[egress])
982 for i in [0,1]:
983 if i == 1:
984 log.info('Restart cluster current master %s'%master)
985 Container(master_onos_name,Onos.IMAGE).restart()
986 time.sleep(45)
987 log.info('Verifying flow traffic after master restart')
988 else:
989 log.info('Verifying flow traffic before master restart')
990 t = threading.Thread(target = mac_recv_task)
991 t.start()
992 L2 = self.flows_eth
993 L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
994 pkt = L2/L3
995 log.info('Sending packets to verify if flows are correct')
996 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
997 t.join()
998 assert_equal(self.success, True)
999
1000 def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
1001 dst_mac = self.igmp.iptomac(group)
1002 eth = Ether(dst= dst_mac)
1003 ip = IP(dst=group,src=source)
1004 data = repr(monotonic.monotonic())
1005 sendp(eth/ip/data,count=20, iface = intf)
1006 pkt = (eth/ip/data)
1007 log.info('multicast traffic packet %s'%pkt.show())
1008
1009 def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
1010 log.info('verifying multicast traffic for group %s from source %s'%(group,source))
1011 self.success = False
1012 def recv_task():
1013 def igmp_recv_cb(pkt):
1014 log.info('multicast data received for group %s from source %s'%(group,source))
1015 self.success = True
1016 sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
1017 t = threading.Thread(target = recv_task)
1018 t.start()
1019 self.send_multicast_data_traffic(group,source=source)
1020 t.join()
1021 return self.success
1022
1023 #pass
1024 def test_cluster_with_igmp_include_exclude_modes_and_restarting_master(self, onos_instances=ONOS_INSTANCES):
1025 status = self.verify_cluster_status(onos_instances=onos_instances)
1026 assert_equal(status, True)
1027 master, standbys = self.get_cluster_current_master_standbys()
1028 assert_equal(len(standbys), (onos_instances-1))
1029 onos_names_ips = self.get_cluster_container_names_ips()
1030 master_onos_name = onos_names_ips[master]
1031 self.igmp.setUp(controller=master)
1032 groups = ['224.2.3.4','230.5.6.7']
1033 src_list = ['2.2.2.2','3.3.3.3']
1034 self.igmp.onos_ssm_table_load(groups, src_list=src_list, controller=master)
1035 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1036 iface = self.V_INF1, delay = 2)
1037 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1038 iface = self.V_INF1, delay = 2)
1039 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1040 assert_equal(status,True)
1041 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1042 assert_equal(status,False)
1043 log.info('restarting cluster master %s'%master)
1044 Container(master_onos_name,Onos.IMAGE).restart()
1045 time.sleep(60)
1046 log.info('verifying multicast data traffic after master restart')
1047 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1048 assert_equal(status,True)
1049 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1050 assert_equal(status,False)
1051
1052 #pass
1053 def test_cluster_with_igmp_include_exclude_modes_and_making_master_down(self, onos_instances=ONOS_INSTANCES):
1054 status = self.verify_cluster_status(onos_instances=onos_instances)
1055 assert_equal(status, True)
1056 master, standbys = self.get_cluster_current_master_standbys()
1057 assert_equal(len(standbys), (onos_instances-1))
1058 onos_names_ips = self.get_cluster_container_names_ips()
1059 master_onos_name = onos_names_ips[master]
1060 self.igmp.setUp(controller=master)
1061 groups = [self.igmp.random_mcast_ip(),self.igmp.random_mcast_ip()]
1062 src_list = [self.igmp.randomsourceip()]
1063 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1064 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1065 iface = self.V_INF1, delay = 2)
1066 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1067 iface = self.V_INF1, delay = 2)
1068 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1069 assert_equal(status,True)
1070 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1071 assert_equal(status,False)
1072 log.info('Killing cluster master %s'%master)
1073 Container(master_onos_name,Onos.IMAGE).kill()
1074 time.sleep(60)
1075 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=standbys[0])
1076 assert_equal(status, True)
1077 log.info('Verifying multicast data traffic after cluster master down')
1078 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1079 assert_equal(status,True)
1080 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1081 assert_equal(status,False)
1082
1083 def test_cluster_with_igmp_include_mode_checking_traffic_recovery_time_after_master_is_down(self, onos_instances=ONOS_INSTANCES):
1084 status = self.verify_cluster_status(onos_instances=onos_instances)
1085 assert_equal(status, True)
1086 master, standbys = self.get_cluster_current_master_standbys()
1087 assert_equal(len(standbys), (onos_instances-1))
1088 onos_names_ips = self.get_cluster_container_names_ips()
1089 master_onos_name = onos_names_ips[master]
1090 self.igmp.setUp(controller=master)
1091 groups = [self.igmp.random_mcast_ip()]
1092 src_list = [self.igmp.randomsourceip()]
1093 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1094 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1095 iface = self.V_INF1, delay = 2)
1096 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1097 assert_equal(status,True)
1098 log.info('Killing clusters master %s'%master)
1099 Container(master_onos_name,Onos.IMAGE).kill()
1100 count = 0
1101 for i in range(60):
1102 log.info('Verifying multicast data traffic after cluster master down')
1103 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1104 if status:
1105 break
1106 else:
1107 count += 1
1108 time.sleep(1)
1109 assert_equal(status, True)
1110 log.info('Time taken to recover traffic after clusters master down is %d seconds'%count)
1111
1112
1113 #pass
1114 def test_cluster_state_with_igmp_leave_group_after_master_change(self, onos_instances=ONOS_INSTANCES):
1115 status = self.verify_cluster_status(onos_instances=onos_instances)
1116 assert_equal(status, True)
1117 master, standbys = self.get_cluster_current_master_standbys()
1118 assert_equal(len(standbys), (onos_instances-1))
1119 self.igmp.setUp(controller=master)
1120 groups = [self.igmp.random_mcast_ip()]
1121 src_list = [self.igmp.randomsourceip()]
1122 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1123 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1124 iface = self.V_INF1, delay = 2)
1125 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1126 assert_equal(status,True)
1127 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1128 self.change_cluster_current_master(new_master=standbys[0])
1129 log.info('Verifying multicast traffic after cluster master change')
1130 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1131 assert_equal(status,True)
1132 log.info('Sending igmp TO_EXCLUDE message to leave the group %s'%groups[0])
1133 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
1134 iface = self.V_INF1, delay = 1)
1135 time.sleep(10)
1136 status = self.verify_igmp_data_traffic(groups[0],intf = self.V_INF1,source= src_list[0])
1137 assert_equal(status,False)
1138
1139 #pass
1140 def test_cluster_state_with_igmp_join_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
1141 status = self.verify_cluster_status(onos_instances=onos_instances)
1142 assert_equal(status, True)
1143 master,standbys = self.get_cluster_current_master_standbys()
1144 assert_equal(len(standbys), (onos_instances-1))
1145 self.igmp.setUp(controller=master)
1146 groups = [self.igmp.random_mcast_ip()]
1147 src_list = [self.igmp.randomsourceip()]
1148 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1149 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1150 self.change_cluster_current_master(new_master = standbys[0])
1151 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1152 iface = self.V_INF1, delay = 2)
1153 time.sleep(1)
1154 self.change_cluster_current_master(new_master = master)
1155 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1156 assert_equal(status,True)
1157
1158 #pass
ChetanGaonker2099d722016-10-07 15:16:58 -07001159 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001160 def test_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001161 status = self.verify_cluster_status(onos_instances=onos_instances)
1162 assert_equal(status, True)
1163 master, standbys = self.get_cluster_current_master_standbys()
1164 assert_equal(len(standbys), (onos_instances-1))
1165 self.tls.setUp(controller=master)
1166 df = defer.Deferred()
1167 def eap_tls_verify(df):
1168 tls = TLSAuthTest()
1169 tls.runTest()
1170 df.callback(0)
1171 reactor.callLater(0, eap_tls_verify, df)
1172 return df
1173
1174 @deferred(120)
ChetanGaonker689b3862016-10-17 16:25:01 -07001175 def test_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001176 master, standbys = self.get_cluster_current_master_standbys()
1177 assert_equal(len(standbys), (onos_instances-1))
1178 self.tls.setUp()
1179 df = defer.Deferred()
1180 def eap_tls_verify2(df2):
1181 tls = TLSAuthTest()
1182 tls.runTest()
1183 df.callback(0)
1184 for i in [0,1]:
1185 if i == 1:
1186 log.info('Changing cluster master %s to %s'%(master, standbys[0]))
1187 self.change_master_current_cluster(new_master=standbys[0])
1188 log.info('Verifying tls authentication after cluster master changed to %s'%standbys[0])
1189 else:
1190 log.info('Verifying tls authentication before cluster master change')
1191 reactor.callLater(0, eap_tls_verify, df)
1192 return df
1193
1194 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001195 def test_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001196 status = self.verify_cluster_status(onos_instances=onos_instances)
1197 assert_equal(status, True)
1198 master, standbys = self.get_cluster_current_master_standbys()
1199 assert_equal(len(standbys), (onos_instances-1))
1200 onos_names_ips = self.get_cluster_container_names_ips()
1201 master_onos_name = onos_names_ips[master]
1202 self.tls.setUp()
1203 df = defer.Deferred()
1204 def eap_tls_verify(df):
1205 tls = TLSAuthTest()
1206 tls.runTest()
1207 df.callback(0)
1208 for i in [0,1]:
1209 if i == 1:
1210 log.info('Killing cluster current master %s'%master)
1211 cord_test_onos_shutdown(node = master_onos_name)
1212 time.sleep(20)
1213 status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True)
1214 assert_equal(status, True)
1215 log.info('Cluster came up with %d instances after killing master'%(onos_instances-1))
1216 log.info('Verifying tls authentication after killing cluster master')
1217 reactor.callLater(0, eap_tls_verify, df)
1218 return df
1219
1220 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001221 def test_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001222 status = self.verify_cluster_status(onos_instances=onos_instances)
1223 assert_equal(status, True)
1224 master, standbys = self.get_cluster_current_master_standbys()
1225 assert_equal(len(standbys), (onos_instances-1))
1226 onos_names_ips = self.get_cluster_container_names_ips()
1227 member_onos_name = onos_names_ips[standbys[0]]
1228 self.tls.setUp()
1229 df = defer.Deferred()
1230 def eap_tls_no_cert(df):
1231 def tls_no_cert_cb():
1232 log.info('TLS authentication failed with no certificate')
1233 tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
1234 tls.runTest()
1235 assert_equal(tls.failTest, True)
1236 df.callback(0)
1237 for i in [0,1]:
1238 if i == 1:
1239 log.info('Restart cluster member %s'%standbys[0])
1240 Container(member_onos_name,Onos.IMAGE).restart()
1241 time.sleep(20)
1242 status = self.verify_cluster_status(onos_instances=onos_instances)
1243 assert_equal(status, True)
1244 log.info('Cluster came up with %d instances after member restart'%(onos_instances))
1245 log.info('Verifying tls authentication after member restart')
1246 reactor.callLater(0, eap_tls_no_cert, df)
1247 return df
1248
ChetanGaonker689b3862016-10-17 16:25:01 -07001249 #pass
1250 def test_cluster_proxyarp_master_change_and_app_deactivation(self,onos_instances=ONOS_INSTANCES,hosts = 3):
1251 status = self.verify_cluster_status(onos_instances=onos_instances)
1252 assert_equal(status,True)
1253 master,standbys = self.get_cluster_current_master_standbys()
1254 assert_equal(len(standbys),(onos_instances-1))
1255 self.proxyarp.setUpClass()
1256 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1257 ingress = hosts+1
1258 for hostip, hostmac in hosts_config:
1259 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1260 time.sleep(1)
1261 log.info('changing cluster current master from %s to %s'%(master,standbys[0]))
1262 self.change_cluster_current_master(new_master=standbys[0])
1263 log.info('verifying proxyarp after master change')
1264 for hostip, hostmac in hosts_config:
1265 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1266 time.sleep(1)
1267 log.info('Deactivating proxyarp app and expecting proxyarp functionality not to work')
1268 self.proxyarp.proxyarp_activate(deactivate = True,controller=standbys[0])
1269 time.sleep(3)
1270 for hostip, hostmac in hosts_config:
1271 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
1272 time.sleep(1)
1273 log.info('activating proxyarp app and expecting to get arp reply from ONOS')
1274 self.proxyarp.proxyarp_activate(deactivate = False,controller=standbys[0])
1275 time.sleep(3)
1276 for hostip, hostmac in hosts_config:
1277 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1278 time.sleep(1)
ChetanGaonker2099d722016-10-07 15:16:58 -07001279
ChetanGaonker689b3862016-10-17 16:25:01 -07001280 #pass
1281 def test_cluster_with_proxyarp_and_one_member_down(self,hosts=3,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001282 status = self.verify_cluster_status(onos_instances=onos_instances)
1283 assert_equal(status, True)
1284 master, standbys = self.get_cluster_current_master_standbys()
ChetanGaonker689b3862016-10-17 16:25:01 -07001285 assert_equal(len(standbys), (onos_instances-1))
1286 onos_names_ips = self.get_cluster_container_names_ips()
1287 member_onos_name = onos_names_ips[standbys[1]]
1288 self.proxyarp.setUpClass()
1289 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1290 ingress = hosts+1
1291 for hostip, hostmac in hosts_config:
1292 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1293 time.sleep(1)
1294 log.info('killing cluster member %s'%standbys[1])
1295 Container(member_onos_name,Onos.IMAGE).kill()
1296 time.sleep(20)
1297 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=master,verify=True)
1298 assert_equal(status, True)
1299 log.info('cluster came up with %d instances after member down'%(onos_instances-1))
1300 log.info('verifying proxy arp functionality after cluster member down')
1301 for hostip, hostmac in hosts_config:
1302 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1303 time.sleep(1)
1304
1305 #pass
1306 def test_cluster_with_proxyarp_and_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts=10,onos_instances=ONOS_INSTANCES):
1307 status = self.verify_cluster_status(onos_instances=onos_instances)
1308 assert_equal(status, True)
1309 self.proxyarp.setUpClass()
1310 master, standbys = self.get_cluster_current_master_standbys()
1311 assert_equal(len(standbys), (onos_instances-1))
1312 ports_map, egress_map, hosts_config = self.proxyarp.proxyarp_config(hosts = hosts, controller=master)
1313 self.success = True
1314 ingress = hosts+1
1315 ports = range(ingress,ingress+10)
1316 hostmac = []
1317 hostip = []
1318 for ip,mac in hosts_config:
1319 hostmac.append(mac)
1320 hostip.append(ip)
1321 success_dir = {}
1322 def verify_proxyarp(*r):
1323 ingress, hostmac, hostip = r[0],r[1],r[2]
1324 def mac_recv_task():
1325 def recv_cb(pkt):
1326 log.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
1327 success_dir[current_thread().name] = True
1328 sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
1329 prn = recv_cb, iface = self.proxyarp.port_map[ingress])
1330 t = threading.Thread(target = mac_recv_task)
1331 t.start()
1332 pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
1333 log.info('Sending arp request for dest ip %s on interface %s' %
1334 (hostip,self.proxyarp.port_map[ingress]))
1335 sendp(pkt, count = 10,iface = self.proxyarp.port_map[ingress])
1336 t.join()
1337 t = []
1338 for i in range(10):
1339 t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
1340 for i in range(10):
1341 t[i].start()
1342 time.sleep(2)
1343 for i in range(10):
1344 t[i].join()
1345 if len(success_dir) != 10:
1346 self.success = False
1347 assert_equal(self.success, True)
1348
1349 #pass
1350 def test_cluster_with_acl_rule_before_master_change_and_remove_acl_rule_after_master_change(self,onos_instances=ONOS_INSTANCES):
1351 status = self.verify_cluster_status(onos_instances=onos_instances)
1352 assert_equal(status, True)
1353 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001354 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001355 self.acl.setUp()
1356 acl_rule = ACLTest()
1357 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1358 if status is False:
1359 log.info('JSON request returned status %d' %code)
1360 assert_equal(status, True)
1361 result = acl_rule.get_acl_rules(controller=master)
1362 aclRules1 = result.json()['aclRules']
1363 log.info('Added acl rules is %s'%aclRules1)
1364 acl_Id = map(lambda d: d['id'], aclRules1)
1365 log.info('Changing cluster current master from %s to %s'%(master,standbys[0]))
1366 self.change_cluster_current_master(new_master=standbys[0])
1367 status,code = acl_rule.remove_acl_rule(acl_Id[0],controller=standbys[0])
1368 if status is False:
1369 log.info('JSON request returned status %d' %code)
1370 assert_equal(status, True)
1371
1372 #pass
1373 def test_cluster_verifying_acl_rule_in_new_master_after_current_master_is_down(self,onos_instances=ONOS_INSTANCES):
1374 status = self.verify_cluster_status(onos_instances=onos_instances)
1375 assert_equal(status, True)
1376 master,standbys = self.get_cluster_current_master_standbys()
1377 assert_equal(len(standbys),(onos_instances-1))
1378 onos_names_ips = self.get_cluster_container_names_ips()
1379 master_onos_name = onos_names_ips[master]
1380 self.acl.setUp()
1381 acl_rule = ACLTest()
1382 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1383 if status is False:
1384 log.info('JSON request returned status %d' %code)
1385 assert_equal(status, True)
1386 result1 = acl_rule.get_acl_rules(controller=master)
1387 aclRules1 = result1.json()['aclRules']
1388 log.info('Added acl rules is %s'%aclRules1)
1389 acl_Id1 = map(lambda d: d['id'], aclRules1)
1390 log.info('Killing cluster current master %s'%master)
1391 Container(master_onos_name,Onos.IMAGE).kill()
1392 time.sleep(45)
1393 status = self.verify_cluster_status(onos_instances=onos_instances,controller=standbys[0])
1394 assert_equal(status, True)
1395 new_master,standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
1396 assert_equal(len(standbys),(onos_instances-2))
1397 assert_not_equal(new_master,master)
1398 result2 = acl_rule.get_acl_rules(controller=new_master)
1399 aclRules2 = result2.json()['aclRules']
1400 acl_Id2 = map(lambda d: d['id'], aclRules2)
1401 log.info('Acl Ids before and after master down are %s and %s'%(acl_Id1,acl_Id2))
1402 assert_equal(acl_Id2,acl_Id1)
1403
1404 #acl traffic scenario not working as acl rule is not getting added to onos
1405 def test_cluster_with_acl_traffic_before_and_after_two_members_down(self,onos_instances=ONOS_INSTANCES):
1406 status = self.verify_cluster_status(onos_instances=onos_instances)
1407 assert_equal(status, True)
1408 master,standbys = self.get_cluster_current_master_standbys()
1409 assert_equal(len(standbys),(onos_instances-1))
1410 onos_names_ips = self.get_cluster_container_names_ips()
1411 member1_onos_name = onos_names_ips[standbys[0]]
1412 member2_onos_name = onos_names_ips[standbys[1]]
1413 ingress = self.acl.ingress_iface
1414 egress = self.acl.CURRENT_PORT_NUM
1415 acl_rule = ACLTest()
1416 status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.acl.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.acl.HOST_DST_IP)
1417 self.acl.CURRENT_PORT_NUM += 1
1418 time.sleep(5)
1419 if status is False:
1420 log.info('JSON request returned status %d' %code)
1421 assert_equal(status, True)
1422 srcMac = '00:00:00:00:00:11'
1423 dstMac = host_ip_mac[0][1]
1424 self.acl.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1, egress_iface_num = egress )
1425 status, code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'deny',controller=master)
1426 time.sleep(10)
1427 if status is False:
1428 log.info('JSON request returned status %d' %code)
1429 assert_equal(status, True)
1430 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1431 log.info('killing cluster members %s and %s'%(standbys[0],standbys[1]))
1432 Container(member1_onos_name, Onos.IMAGE).kill()
1433 Container(member2_onos_name, Onos.IMAGE).kill()
1434 time.sleep(40)
1435 status = self.verify_cluster_status(onos_instances=onos_instances-2,verify=True,controller=master)
1436 assert_equal(status, True)
1437 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1438 self.acl.acl_hosts_remove(egress_iface_count = 1, egress_iface_num = egress)
1439
1440 #pass
1441 def test_cluster_with_dhcpRelay_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1442 status = self.verify_cluster_status(onos_instances=onos_instances)
1443 assert_equal(status, True)
1444 master,standbys = self.get_cluster_current_master_standbys()
1445 assert_equal(len(standbys),(onos_instances-1))
1446 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001447 mac = self.dhcprelay.get_mac(iface)
1448 self.dhcprelay.host_load(iface)
1449 ##we use the defaults for this test that serves as an example for others
1450 ##You don't need to restart dhcpd server if retaining default config
1451 config = self.dhcprelay.default_config
1452 options = self.dhcprelay.default_options
1453 subnet = self.dhcprelay.default_subnet_config
1454 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1455 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1456 config = config,
1457 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001458 subnet = subnet,
1459 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001460 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
1461 cip, sip = self.dhcprelay.send_recv(mac)
1462 log.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
1463 self.change_master_current_cluster(new_master=standbys[0])
1464 log.info('Releasing ip %s to server %s' %(cip, sip))
1465 assert_equal(self.dhcprelay.dhcp.release(cip), True)
1466 log.info('Triggering DHCP discover again after release')
1467 cip2, sip2 = self.dhcprelay.send_recv(mac)
1468 log.info('Verifying released IP was given back on rediscover')
1469 assert_equal(cip, cip2)
1470 log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
1471 assert_equal(self.dhcprelay.dhcp.release(cip2), True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001472 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001473
ChetanGaonker689b3862016-10-17 16:25:01 -07001474
1475 def test_cluster_with_dhcpRelay_and_verify_dhcp_ip_after_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1476 status = self.verify_cluster_status(onos_instances=onos_instances)
1477 assert_equal(status, True)
1478 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001479 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001480 onos_names_ips = self.get_cluster_container_names_ips()
1481 master_onos_name = onos_names_ips[master]
1482 self.dhcprelay.setUpClass(controller=master)
1483 mac = self.dhcprelay.get_mac(iface)
1484 self.dhcprelay.host_load(iface)
1485 ##we use the defaults for this test that serves as an example for others
1486 ##You don't need to restart dhcpd server if retaining default config
1487 config = self.dhcprelay.default_config
1488 options = self.dhcprelay.default_options
1489 subnet = self.dhcprelay.default_subnet_config
1490 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1491 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1492 config = config,
1493 options = options,
1494 subnet = subnet,
1495 controller=master)
1496 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1497 log.info('Initiating dhcp process from client %s'%mac)
1498 cip, sip = self.dhcprelay.send_recv(mac)
1499 log.info('Killing cluster current master %s'%master)
1500 Container(master_onos_name, Onos.IMAGE).kill()
1501 time.sleep(60)
1502 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=standbys[0])
1503 assert_equal(status, True)
1504 mac = self.dhcprelay.dhcp.get_mac(cip)[0]
1505 log.info("Verifying dhcp clients gets same IP after cluster master restarts")
1506 new_cip, new_sip = self.dhcprelay.dhcp.only_request(cip, mac)
1507 assert_equal(new_cip, cip)
1508 self.dhcprelay.tearDownClass(controller=standbys[0])
1509
1510 #pass
1511 def test_cluster_with_dhcpRelay_and_simulate_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1512 status = self.verify_cluster_status(onos_instances=onos_instances)
1513 assert_equal(status, True)
1514 master,standbys = self.get_cluster_current_master_standbys()
1515 assert_equal(len(standbys),(onos_instances-1))
1516 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001517 macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3']
1518 self.dhcprelay.host_load(iface)
1519 ##we use the defaults for this test that serves as an example for others
1520 ##You don't need to restart dhcpd server if retaining default config
1521 config = self.dhcprelay.default_config
1522 options = self.dhcprelay.default_options
1523 subnet = self.dhcprelay.default_subnet_config
1524 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1525 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1526 config = config,
1527 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001528 subnet = subnet,
1529 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001530 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1531 cip1, sip1 = self.dhcprelay.send_recv(macs[0])
1532 assert_not_equal(cip1,None)
1533 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip1,macs[0],master))
1534 log.info('Changing cluster master from %s to %s'%(master, standbys[0]))
1535 self.change_master_current_cluster(new_master=standbys[0])
1536 cip2, sip2 = self.dhcprelay.send_recv(macs[1])
1537 assert_not_equal(cip2,None)
1538 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[1],standbys[0]))
1539 self.change_master_current_cluster(new_master=master)
1540 log.info('Changing cluster master from %s to %s'%(standbys[0],master))
1541 cip3, sip3 = self.dhcprelay.send_recv(macs[2])
1542 assert_not_equal(cip3,None)
1543 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master))
ChetanGaonker689b3862016-10-17 16:25:01 -07001544 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001545
ChetanGaonker689b3862016-10-17 16:25:01 -07001546 def test_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001547 status = self.verify_cluster_status(onos_instances=onos_instances)
1548 assert_equal(status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001549 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001550 self.subscriber.num_subscribers = 5
1551 self.subscriber.num_channels = 10
1552 for i in [0,1]:
1553 if i == 1:
1554 cord_test_onos_restart()
1555 time.sleep(45)
1556 status = self.verify_cluster_status(onos_instances=onos_instances)
1557 assert_equal(status, True)
1558 log.info('Verifying cord subscriber functionality after cluster restart')
1559 else:
1560 log.info('Verifying cord subscriber functionality before cluster restart')
1561 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1562 num_channels = self.subscriber.num_channels,
1563 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1564 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1565 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1566 self.subscriber.num_channels))
1567 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001568 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001569
ChetanGaonker689b3862016-10-17 16:25:01 -07001570 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1571 def test_cluster_with_cord_subscriber_join_next_channel_before_and_after_cluster_mastership_is_withdrawn(self,onos_instances=ONOS_INSTANCES):
1572 status = self.verify_cluster_status(onos_instances=onos_instances)
1573 assert_equal(status, True)
1574 master,standbys = self.get_cluster_current_master_standbys()
1575 assert_equal(len(standbys),(onos_instances-1))
1576 self.subscriber.setUpClass(controller=master)
1577 self.subscriber.num_subscribers = 5
1578 self.subscriber.num_channels = 10
1579 for i in [0,1]:
1580 if i == 1:
1581 status=self.withdraw_cluster_current_mastership(master_ip=master)
1582 asser_equal(status, True)
1583 master,standbys = self.get_cluster_current_master_standbys()
1584 log.info('verifying cord subscriber functionality after cluster current master withdraw mastership')
1585 else:
1586 log.info('verifying cord subscriber functionality before cluster master withdraw mastership')
1587 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1588 num_channels = self.subscriber.num_channels,
1589 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1590 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1591 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1592 self.subscriber.num_channels),controller=master)
1593 assert_equal(test_status, True)
1594 self.subscriber.tearDownClass(controller=master)
1595
1596 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1597 def test_cluster_with_cord_subscriber_join_recv_traffic_from_10channels_and_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001598 status = self.verify_cluster_status(onos_instances=onos_instances)
1599 assert_equal(status, True)
1600 master, standbys = self.get_cluster_current_master_standbys()
1601 assert_equal(len(standbys),(onos_instances-1))
1602 onos_names_ips = self.get_cluster_container_names_ips()
1603 member_onos_name = onos_names_ips[standbys[0]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001604 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001605 num_subscribers = 1
1606 num_channels = 10
1607 for i in [0,1]:
1608 if i == 1:
1609 cord_test_onos_shutdown(node = member_onos_name)
1610 time.sleep(30)
ChetanGaonker689b3862016-10-17 16:25:01 -07001611 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001612 assert_equal(status, True)
1613 log.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0])
1614 else:
1615 log.info('Verifying cord subscriber functionality before cluster member %s is down'%standbys[0])
1616 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1617 num_channels = num_channels,
1618 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify,
1619 self.subscriber.igmp_verify, self.subscriber.traffic_verify),
1620 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
ChetanGaonker689b3862016-10-17 16:25:01 -07001621 negative_subscriber_auth = 'all',controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001622 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001623 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001624
ChetanGaonker689b3862016-10-17 16:25:01 -07001625 def test_cluster_with_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001626 status = self.verify_cluster_status(onos_instances=onos_instances)
1627 assert_equal(status, True)
1628 master, standbys = self.get_cluster_current_master_standbys()
1629 assert_equal(len(standbys),(onos_instances-1))
1630 onos_names_ips = self.get_cluster_container_names_ips()
1631 member1_onos_name = onos_names_ips[standbys[0]]
1632 member2_onos_name = onos_names_ips[standbys[1]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001633 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001634 num_subscribers = 1
1635 num_channels = 10
1636 for i in [0,1]:
1637 if i == 1:
1638 cord_test_onos_shutdown(node = member1_onos_name)
1639 cord_test_onos_shutdown(node = member2_onos_name)
1640 time.sleep(60)
1641 status = self.verify_cluster_status(onos_instances=onos_instances-2)
1642 assert_equal(status, True)
1643 log.info('Verifying cord subscriber funtionality after cluster two members %s and %s down'%(standbys[0],standbys[1]))
1644 else:
1645 log.info('Verifying cord subscriber funtionality before cluster two members %s and %s down'%(standbys[0],standbys[1]))
1646 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1647 num_channels = num_channels,
1648 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1649 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1650 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
1651 negative_subscriber_auth = 'all')
1652 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001653 self.subscriber.tearDownClass(controller=master)
1654
1655 #pass
1656 def test_cluster_with_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1657 status = self.verify_cluster_status(onos_instances=onos_instances)
1658 assert_equal(status, True)
1659 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1660 for device in device_dict.keys():
1661 log.info("Device is %s"%device_dict[device])
1662 assert_not_equal(device_dict[device]['master'],'none')
1663 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1664 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1665
1666 #pass
1667 def test_cluster_state_in_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1668 status = self.verify_cluster_status(onos_instances=onos_instances)
1669 assert_equal(status, True)
1670 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1671 cluster_ips = self.get_cluster_current_member_ips()
1672 for ip in cluster_ips:
1673 device_dict= self.get_cluster_current_master_standbys_of_connected_devices(controller = ip)
1674 assert_equal(len(device_dict.keys()),onos_instances)
1675 for device in device_dict.keys():
1676 log.info("Device is %s"%device_dict[device])
1677 assert_not_equal(device_dict[device]['master'],'none')
1678 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1679 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1680
1681 #pass
1682 def test_cluster_verifying_multiple_ovs_switches_after_master_is_restarted(self,onos_instances = ONOS_INSTANCES):
1683 status = self.verify_cluster_status(onos_instances=onos_instances)
1684 assert_equal(status, True)
1685 onos_names_ips = self.get_cluster_container_names_ips()
1686 master_count = self.get_number_of_devices_of_master()
1687 log.info('Master count information is %s'%master_count)
1688 total_devices = 0
1689 for master in master_count.keys():
1690 total_devices += master_count[master]['size']
1691 if master_count[master]['size'] != 0:
1692 restart_ip = master
1693 assert_equal(total_devices,onos_instances)
1694 member_onos_name = onos_names_ips[restart_ip]
1695 log.info('Restarting cluster member %s having ip %s'%(member_onos_name,restart_ip))
1696 Container(member_onos_name, Onos.IMAGE).restart()
1697 time.sleep(40)
1698 master_count = self.get_number_of_devices_of_master()
1699 log.info('Master count information after restart is %s'%master_count)
1700 total_devices = 0
1701 for master in master_count.keys():
1702 total_devices += master_count[master]['size']
1703 if master == restart_ip:
1704 assert_equal(master_count[master]['size'], 0)
1705 assert_equal(total_devices,onos_instances)
1706
1707 #pass
1708 def test_cluster_verifying_multiple_ovs_switches_with_one_master_down(self,onos_instances = ONOS_INSTANCES):
1709 status = self.verify_cluster_status(onos_instances=onos_instances)
1710 assert_equal(status, True)
1711 onos_names_ips = self.get_cluster_container_names_ips()
1712 master_count = self.get_number_of_devices_of_master()
1713 log.info('Master count information is %s'%master_count)
1714 total_devices = 0
1715 for master in master_count.keys():
1716 total_devices += master_count[master]['size']
1717 if master_count[master]['size'] != 0:
1718 restart_ip = master
1719 assert_equal(total_devices,onos_instances)
1720 master_onos_name = onos_names_ips[restart_ip]
1721 log.info('Shutting down cluster member %s having ip %s'%(master_onos_name,restart_ip))
1722 Container(master_onos_name, Onos.IMAGE).kill()
1723 time.sleep(40)
1724 for ip in onos_names_ips.keys():
1725 if ip != restart_ip:
1726 controller_ip = ip
1727 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=controller_ip)
1728 assert_equal(status, True)
1729 master_count = self.get_number_of_devices_of_master(controller=controller_ip)
1730 log.info('Master count information after restart is %s'%master_count)
1731 total_devices = 0
1732 for master in master_count.keys():
1733 total_devices += master_count[master]['size']
1734 if master == restart_ip:
1735 assert_equal(master_count[master]['size'], 0)
1736 assert_equal(total_devices,onos_instances)
1737
1738 #pass
1739 def test_cluster_verifying_multiple_ovs_switches_with_current_master_withdrawing_mastership(self,onos_instances = ONOS_INSTANCES):
1740 status = self.verify_cluster_status(onos_instances=onos_instances)
1741 assert_equal(status, True)
1742 master_count = self.get_number_of_devices_of_master()
1743 log.info('Master count information is %s'%master_count)
1744 total_devices = 0
1745 for master in master_count.keys():
1746 total_devices += int(master_count[master]['size'])
1747 if master_count[master]['size'] != 0:
1748 master_ip = master
1749 log.info('Devices of master %s are %s'%(master_count[master]['devices'],master))
1750 device_id = str(master_count[master]['devices'][0])
1751 device_count = master_count[master]['size']
1752 assert_equal(total_devices,onos_instances)
1753 log.info('Withdrawing mastership of device %s for controller %s'%(device_id,master_ip))
1754 status=self.withdraw_cluster_current_mastership(master_ip=master_ip,device_id = device_id)
1755 assert_equal(status, True)
1756 master_count = self.get_number_of_devices_of_master()
1757 log.info('Master count information after cluster mastership withdraw is %s'%master_count)
1758 total_devices = 0
1759 for master in master_count.keys():
1760 total_devices += int(master_count[master]['size'])
1761 if master == master_ip:
1762 assert_equal(master_count[master]['size'], device_count-1)
1763 assert_equal(total_devices,onos_instances)
1764
1765 #pass
1766 def test_cluster_verifying_multiple_ovs_switches_and_restarting_cluster(self,onos_instances = ONOS_INSTANCES):
1767 status = self.verify_cluster_status(onos_instances=onos_instances)
1768 assert_equal(status, True)
1769 master_count = self.get_number_of_devices_of_master()
1770 log.info('Master count information is %s'%master_count)
1771 total_devices = 0
1772 for master in master_count.keys():
1773 total_devices += master_count[master]['size']
1774 assert_equal(total_devices,onos_instances)
1775 log.info('Restarting cluster')
1776 cord_test_onos_restart()
1777 time.sleep(60)
1778 master_count = self.get_number_of_devices_of_master()
1779 log.info('Master count information after restart is %s'%master_count)
1780 total_devices = 0
1781 for master in master_count.keys():
1782 total_devices += master_count[master]['size']
1783 assert_equal(total_devices,onos_instances)