blob: d5ad5fb7e872d4a92063949977ac83901297a391 [file] [log] [blame]
ChetanGaonker2099d722016-10-07 15:16:58 -07001#copyright 2016-present Ciena Corporation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14#
15import unittest
16from nose.tools import *
17from scapy.all import *
18from OnosCtrl import OnosCtrl, get_mac
19from OltConfig import OltConfig
20from socket import socket
21from OnosFlowCtrl import OnosFlowCtrl
22from nose.twistedtools import reactor, deferred
23from twisted.internet import defer
24from onosclidriver import OnosCliDriver
25from CordContainer import Container, Onos, Quagga
A.R Karthick2560f042016-11-30 14:38:52 -080026from CordTestServer import cord_test_onos_restart, cord_test_onos_shutdown, cord_test_onos_add_cluster, cord_test_quagga_restart, cord_test_restart_cluster
ChetanGaonker2099d722016-10-07 15:16:58 -070027from portmaps import g_subscriber_port_map
28from scapy.all import *
29import time, monotonic
30import threading
31from threading import current_thread
32from Cluster import *
33from EapTLS import TLSAuthTest
34from ACL import ACLTest
A R Karthick1f908202016-11-16 17:32:20 -080035from OnosLog import OnosLog
36from CordLogger import CordLogger
ChetanGaonker2099d722016-10-07 15:16:58 -070037import os
38import json
39import random
40import collections
41log.setLevel('INFO')
42
A R Karthick1f908202016-11-16 17:32:20 -080043class cluster_exchange(CordLogger):
ChetanGaonker2099d722016-10-07 15:16:58 -070044 test_path = os.path.dirname(os.path.realpath(__file__))
45 onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
46 mac = RandMAC()._fix()
47 flows_eth = Ether(src = RandMAC()._fix(), dst = RandMAC()._fix())
48 igmp_eth = Ether(dst = '01:00:5e:00:00:16', type = ETH_P_IP)
49 igmp_ip = IP(dst = '224.0.0.22')
50 ONOS_INSTANCES = 3
51 V_INF1 = 'veth0'
52 TLS_TIMEOUT = 100
53 device_id = 'of:' + get_mac()
54 igmp = cluster_igmp()
55 igmp_groups = igmp.mcast_ip_range(start_ip = '224.1.8.10',end_ip = '224.1.10.49')
56 igmp_sources = igmp.source_ip_range(start_ip = '38.24.29.35',end_ip='38.24.35.56')
57 tls = cluster_tls()
58 flows = cluster_flows()
59 proxyarp = cluster_proxyarp()
60 vrouter = cluster_vrouter()
61 acl = cluster_acl()
62 dhcprelay = cluster_dhcprelay()
63 subscriber = cluster_subscriber()
A.R Karthick2560f042016-11-30 14:38:52 -080064 testcaseLoggers = ('test_cluster_controller_restarts', 'test_cluster_single_controller_restarts', 'test_cluster_restarts')
A R Karthick1f908202016-11-16 17:32:20 -080065
66 def setUp(self):
67 if self._testMethodName not in self.testcaseLoggers:
68 super(cluster_exchange, self).setUp()
69
70 def tearDown(self):
71 if self._testMethodName not in self.testcaseLoggers:
72 super(cluster_exchange, self).tearDown()
ChetanGaonker2099d722016-10-07 15:16:58 -070073
74 def get_controller(self):
75 controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
76 controller = controller.split(',')[0]
77 return controller
78
A R Karthick1f908202016-11-16 17:32:20 -080079 @classmethod
80 def get_controllers(cls):
81 controllers = os.getenv('ONOS_CONTROLLER_IP') or ''
82 return controllers.split(',')
83
ChetanGaonker2099d722016-10-07 15:16:58 -070084 def cliEnter(self,controller = None):
85 retries = 0
86 while retries < 3:
87 self.cli = OnosCliDriver(controller = controller,connect = True)
88 if self.cli.handle:
89 break
90 else:
91 retries += 1
92 time.sleep(2)
93
94 def cliExit(self):
95 self.cli.disconnect()
96
A R Karthick1f908202016-11-16 17:32:20 -080097 def get_leader(self, controller = None):
98 self.cliEnter(controller = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -080099 try:
100 result = json.loads(self.cli.leaders(jsonFormat = True))
101 except:
102 result = None
103
A R Karthick1f908202016-11-16 17:32:20 -0800104 if result is None:
105 log.info('Leaders command failure for controller %s' %controller)
106 else:
107 log.info('Leaders returned: %s' %result)
108 self.cliExit()
109 return result
110
111 def get_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800112 result_map = {}
113 if controller is None:
114 controller = self.get_controller()
A R Karthick1f908202016-11-16 17:32:20 -0800115 if type(controller) in [ list, tuple ]:
116 for c in controller:
117 leaders = self.get_leader(controller = c)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800118 result_map[c] = leaders
A R Karthick1f908202016-11-16 17:32:20 -0800119 else:
120 leaders = self.get_leader(controller = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800121 result_map[controller] = leaders
122 return result_map
A R Karthick1f908202016-11-16 17:32:20 -0800123
A R Karthickec2db322016-11-17 15:06:01 -0800124 def verify_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800125 leaders_map = self.get_leaders(controller = controller)
126 failed = [ k for k,v in leaders_map.items() if v == None ]
A R Karthickec2db322016-11-17 15:06:01 -0800127 return failed
128
ChetanGaonker2099d722016-10-07 15:16:58 -0700129 def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
130 tries = 0
131 try:
132 self.cliEnter(controller = controller)
133 while tries <= 10:
134 cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
135 if cluster_summary:
136 log.info("cluster 'summary' command output is %s"%cluster_summary)
137 nodes = cluster_summary['nodes']
138 if verify:
139 if nodes == onos_instances:
140 self.cliExit()
141 return True
142 else:
143 tries += 1
144 time.sleep(1)
145 else:
146 if nodes >= onos_instances:
147 self.cliExit()
148 return True
149 else:
150 tries += 1
151 time.sleep(1)
152 else:
153 tries += 1
154 time.sleep(1)
155 self.cliExit()
156 return False
157 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700158 raise Exception('Failed to get cluster members')
159 return False
ChetanGaonker2099d722016-10-07 15:16:58 -0700160
A.R Karthick45ab3e12016-11-30 11:25:51 -0800161 def get_cluster_current_member_ips(self, controller = None, nodes_filter = None):
ChetanGaonker2099d722016-10-07 15:16:58 -0700162 tries = 0
163 cluster_ips = []
164 try:
165 self.cliEnter(controller = controller)
166 while tries <= 10:
167 cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
168 if cluster_nodes:
169 log.info("cluster 'nodes' output is %s"%cluster_nodes)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800170 if nodes_filter:
171 cluster_nodes = nodes_filter(cluster_nodes)
ChetanGaonker2099d722016-10-07 15:16:58 -0700172 cluster_ips = map(lambda c: c['id'], cluster_nodes)
173 self.cliExit()
174 cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
175 return cluster_ips
176 else:
177 tries += 1
178 self.cliExit()
179 return cluster_ips
180 except:
181 raise Exception('Failed to get cluster members')
182 return cluster_ips
183
ChetanGaonker689b3862016-10-17 16:25:01 -0700184 def get_cluster_container_names_ips(self,controller=None):
A R Karthick1f908202016-11-16 17:32:20 -0800185 onos_names_ips = {}
186 onos_ips = self.get_cluster_current_member_ips(controller=controller)
187 onos_names_ips[onos_ips[0]] = Onos.NAME
188 onos_names_ips[Onos.NAME] = onos_ips[0]
189 for i in range(1,len(onos_ips)):
190 name = '{0}-{1}'.format(Onos.NAME,i+1)
191 onos_names_ips[onos_ips[i]] = name
192 onos_names_ips[name] = onos_ips[i]
ChetanGaonker2099d722016-10-07 15:16:58 -0700193
194 return onos_names_ips
195
196 #identifying current master of a connected device, not tested
197 def get_cluster_current_master_standbys(self,controller=None,device_id=device_id):
198 master = None
199 standbys = []
200 tries = 0
201 try:
202 cli = self.cliEnter(controller = controller)
203 while tries <= 10:
204 roles = json.loads(self.cli.roles(jsonFormat = True))
205 log.info("cluster 'roles' command output is %s"%roles)
206 if roles:
207 for device in roles:
208 log.info('Verifying device info in line %s'%device)
209 if device['id'] == device_id:
210 master = str(device['master'])
211 standbys = map(lambda d: str(d), device['standbys'])
212 log.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
213 self.cliExit()
214 return master, standbys
215 self.cliExit()
216 return master, standbys
217 else:
218 tries += 1
ChetanGaonker689b3862016-10-17 16:25:01 -0700219 time.sleep(1)
220 self.cliExit()
221 return master,standbys
222 except:
223 raise Exception('Failed to get cluster members')
224 return master,standbys
225
226 def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
227 ''' returns master and standbys of all the connected devices to ONOS cluster instance'''
228 device_dict = {}
229 tries = 0
230 try:
231 cli = self.cliEnter(controller = controller)
232 while tries <= 10:
233 device_dict = {}
234 roles = json.loads(self.cli.roles(jsonFormat = True))
235 log.info("cluster 'roles' command output is %s"%roles)
236 if roles:
237 for device in roles:
238 device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
239 for i in range(len(device_dict[device['id']]['standbys'])):
240 device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
241 log.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
242 self.cliExit()
243 return device_dict
244 else:
245 tries += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700246 time.sleep(1)
247 self.cliExit()
ChetanGaonker689b3862016-10-17 16:25:01 -0700248 return device_dict
249 except:
250 raise Exception('Failed to get cluster members')
251 return device_dict
252
253 #identify current master of a connected device, not tested
254 def get_cluster_connected_devices(self,controller=None):
255 '''returns all the devices connected to ONOS cluster'''
256 device_list = []
257 tries = 0
258 try:
259 cli = self.cliEnter(controller = controller)
260 while tries <= 10:
261 device_list = []
262 devices = json.loads(self.cli.devices(jsonFormat = True))
263 log.info("cluster 'devices' command output is %s"%devices)
264 if devices:
265 for device in devices:
266 log.info('device id is %s'%device['id'])
267 device_list.append(str(device['id']))
268 self.cliExit()
269 return device_list
270 else:
271 tries += 1
272 time.sleep(1)
273 self.cliExit()
274 return device_list
275 except:
276 raise Exception('Failed to get cluster members')
277 return device_list
278
279 def get_number_of_devices_of_master(self,controller=None):
280 '''returns master-device pairs, which master having what devices'''
281 master_count = {}
282 try:
283 cli = self.cliEnter(controller = controller)
284 masters = json.loads(self.cli.masters(jsonFormat = True))
285 if masters:
286 for master in masters:
287 master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
288 return master_count
289 else:
290 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700291 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700292 raise Exception('Failed to get cluster members')
293 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700294
295 def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None):
296 if new_master is None: return False
ChetanGaonker689b3862016-10-17 16:25:01 -0700297 self.cliEnter(controller=controller)
ChetanGaonker2099d722016-10-07 15:16:58 -0700298 cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
299 command = self.cli.command(cmd = cmd, jsonFormat = False)
300 self.cliExit()
301 time.sleep(60)
302 master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
303 assert_equal(master,new_master)
304 log.info('Cluster master changed to %s successfully'%new_master)
305
ChetanGaonker689b3862016-10-17 16:25:01 -0700306 def withdraw_cluster_current_mastership(self,master_ip=None,device_id=device_id,controller=None):
307 '''current master looses its mastership and hence new master will be elected'''
308 self.cliEnter(controller=controller)
309 cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
310 command = self.cli.command(cmd = cmd, jsonFormat = False)
311 self.cliExit()
312 time.sleep(60)
313 new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
314 assert_not_equal(new_master_ip,master_ip)
315 log.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
316 log.info('Cluster new master is %s'%new_master_ip)
317 return True
318
A R Karthickec2db322016-11-17 15:06:01 -0800319 def test_cluster_controller_restarts(self):
A R Karthick1f908202016-11-16 17:32:20 -0800320 '''Test the cluster by repeatedly killing the controllers'''
321 controllers = self.get_controllers()
322 ctlr_len = len(controllers)
323 if ctlr_len <= 1:
324 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
325 assert_greater(ctlr_len, 1)
326
327 #this call would verify the cluster for once
328 onos_map = self.get_cluster_container_names_ips()
329
A R Karthickec2db322016-11-17 15:06:01 -0800330 def check_exception(controller = None):
A R Karthick1f908202016-11-16 17:32:20 -0800331 adjacent_controller = None
332 adjacent_controllers = None
333 if controller:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800334 adjacent_controllers = list(set(controllers) - set([controller]))
335 adjacent_controller = adjacent_controllers[0]
A R Karthick1f908202016-11-16 17:32:20 -0800336 for node in controllers:
337 onosLog = OnosLog(host = node)
338 ##check the logs for storage exception
339 _, output = onosLog.get_log(('ERROR', 'Exception',))
A R Karthickec2db322016-11-17 15:06:01 -0800340 if output and output.find('StorageException$Timeout') >= 0:
341 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
342 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
343 log.info('\n' + '-' * 50 + '\n')
A R Karthick1f908202016-11-16 17:32:20 -0800344 log.info('%s' %output)
A R Karthickec2db322016-11-17 15:06:01 -0800345 log.info('\n' + '-' * 50 + '\n')
346 failed = self.verify_leaders(controllers)
347 if failed:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800348 log.info('Leaders command failed on nodes: %s' %failed)
A R Karthickec2db322016-11-17 15:06:01 -0800349 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800350 return controller
351
352 try:
A R Karthickec2db322016-11-17 15:06:01 -0800353 ips = self.get_cluster_current_member_ips(controller = adjacent_controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800354 log.info('ONOS cluster formed with controllers: %s' %ips)
A R Karthick1f908202016-11-16 17:32:20 -0800355 st = True
356 except:
357 st = False
358
A R Karthickec2db322016-11-17 15:06:01 -0800359 failed = self.verify_leaders(controllers)
A R Karthick1f908202016-11-16 17:32:20 -0800360 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800361 if st is False:
362 log.info('No storage exception and ONOS cluster was not formed successfully')
363 else:
364 controller = None
365
366 return controller
367
368 next_controller = None
369 tries = 10
370 for num in range(tries):
371 index = num % ctlr_len
372 #index = random.randrange(0, ctlr_len)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800373 controller_name = onos_map[controllers[index]] if next_controller is None else onos_map[next_controller]
374 controller = onos_map[controller_name]
375 log.info('ITERATION: %d. Restarting Controller %s' %(num + 1, controller_name))
A R Karthick1f908202016-11-16 17:32:20 -0800376 try:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800377 cord_test_onos_restart(node = controller_name, timeout = 0)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800378 time.sleep(60)
A R Karthick1f908202016-11-16 17:32:20 -0800379 except:
380 time.sleep(5)
381 continue
A R Karthickec2db322016-11-17 15:06:01 -0800382 next_controller = check_exception(controller = controller)
A R Karthick1f908202016-11-16 17:32:20 -0800383
A.R Karthick45ab3e12016-11-30 11:25:51 -0800384 def test_cluster_single_controller_restarts(self):
385 '''Test the cluster by repeatedly restarting the same controller'''
386 controllers = self.get_controllers()
387 ctlr_len = len(controllers)
388 if ctlr_len <= 1:
389 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
390 assert_greater(ctlr_len, 1)
391
392 #this call would verify the cluster for once
393 onos_map = self.get_cluster_container_names_ips()
394
395 def check_exception(controller, inclusive = False):
396 adjacent_controllers = list(set(controllers) - set([controller]))
397 adjacent_controller = adjacent_controllers[0]
398 controller_list = adjacent_controllers if inclusive == False else controllers
399 storage_exceptions = []
400 for node in controller_list:
401 onosLog = OnosLog(host = node)
402 ##check the logs for storage exception
403 _, output = onosLog.get_log(('ERROR', 'Exception',))
404 if output and output.find('StorageException$Timeout') >= 0:
405 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
406 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
407 log.info('\n' + '-' * 50 + '\n')
408 log.info('%s' %output)
409 log.info('\n' + '-' * 50 + '\n')
410 storage_exceptions.append(node)
411
412 failed = self.verify_leaders(controller_list)
413 if failed:
414 log.info('Leaders command failed on nodes: %s' %failed)
415 if storage_exceptions:
416 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
417 assert_equal(len(failed), 0)
418 return controller
419
420 for ctlr in controller_list:
421 ips = self.get_cluster_current_member_ips(controller = ctlr,
422 nodes_filter = \
423 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
424 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
425 if controller in ips and inclusive is False:
426 log.info('Controller %s still ACTIVE on Node %s after it was shutdown' %(controller, ctlr))
427 if controller not in ips and inclusive is True:
428 log.info('Controller %s still INACTIVE on Node %s after it was shutdown' %(controller, ctlr))
429
430 return controller
431
432 tries = 10
433 #chose a random controller for shutdown/restarts
434 controller = controllers[random.randrange(0, ctlr_len)]
435 controller_name = onos_map[controller]
436 for num in range(tries):
437 index = num % ctlr_len
438 log.info('ITERATION: %d. Shutting down Controller %s' %(num + 1, controller_name))
439 try:
440 cord_test_onos_shutdown(node = controller_name)
441 time.sleep(20)
442 except:
443 time.sleep(5)
444 continue
445 #check for exceptions on the adjacent nodes
446 check_exception(controller)
447 #Now restart the controller back
448 log.info('Restarting back the controller %s' %controller_name)
449 cord_test_onos_restart(node = controller_name)
450 time.sleep(60)
451 check_exception(controller, inclusive = True)
452
A.R Karthick2560f042016-11-30 14:38:52 -0800453 def test_cluster_restarts(self):
454 '''Test the cluster by repeatedly restarting the entire cluster'''
455 controllers = self.get_controllers()
456 ctlr_len = len(controllers)
457 if ctlr_len <= 1:
458 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
459 assert_greater(ctlr_len, 1)
460
461 #this call would verify the cluster for once
462 onos_map = self.get_cluster_container_names_ips()
463
464 def check_exception():
465 controller_list = controllers
466 storage_exceptions = []
467 for node in controller_list:
468 onosLog = OnosLog(host = node)
469 ##check the logs for storage exception
470 _, output = onosLog.get_log(('ERROR', 'Exception',))
471 if output and output.find('StorageException$Timeout') >= 0:
472 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
473 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
474 log.info('\n' + '-' * 50 + '\n')
475 log.info('%s' %output)
476 log.info('\n' + '-' * 50 + '\n')
477 storage_exceptions.append(node)
478
479 failed = self.verify_leaders(controller_list)
480 if failed:
481 log.info('Leaders command failed on nodes: %s' %failed)
482 if storage_exceptions:
483 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
484 assert_equal(len(failed), 0)
485 return
486
487 for ctlr in controller_list:
488 ips = self.get_cluster_current_member_ips(controller = ctlr,
489 nodes_filter = \
490 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
491 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
492 assert_equal(len(ips), len(controllers))
493
494 tries = 10
495 for num in range(tries):
496 log.info('ITERATION: %d. Restarting cluster with controllers at %s' %(num+1, controllers))
497 try:
498 cord_test_restart_cluster()
499 log.info('Delaying before verifying cluster status')
500 time.sleep(60)
501 except:
502 time.sleep(10)
503 continue
504 #check for exceptions on the adjacent nodes
505 check_exception()
506
ChetanGaonker2099d722016-10-07 15:16:58 -0700507 #pass
ChetanGaonker689b3862016-10-17 16:25:01 -0700508 def test_cluster_formation_and_verification(self,onos_instances = ONOS_INSTANCES):
509 status = self.verify_cluster_status(onos_instances = onos_instances)
510 assert_equal(status, True)
511 log.info('Cluster exists with %d ONOS instances'%onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -0700512
513 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700514 def test_cluster_adding_members(self, add = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700515 status = self.verify_cluster_status(onos_instances = onos_instances)
516 assert_equal(status, True)
517 onos_ips = self.get_cluster_current_member_ips()
518 onos_instances = len(onos_ips)+add
519 log.info('Adding %d nodes to the ONOS cluster' %add)
520 cord_test_onos_add_cluster(count = add)
521 status = self.verify_cluster_status(onos_instances=onos_instances)
522 assert_equal(status, True)
523
ChetanGaonker689b3862016-10-17 16:25:01 -0700524 def test_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700525 status = self.verify_cluster_status(onos_instances = onos_instances)
526 assert_equal(status, True)
527 master, standbys = self.get_cluster_current_master_standbys()
528 assert_equal(len(standbys),(onos_instances-1))
529 onos_names_ips = self.get_cluster_container_names_ips()
530 master_onos_name = onos_names_ips[master]
531 log.info('Removing cluster current master %s'%(master))
532 cord_test_onos_shutdown(node = master_onos_name)
533 time.sleep(60)
534 onos_instances -= 1
535 status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0])
536 assert_equal(status, True)
537 new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
538 assert_not_equal(master,new_master)
ChetanGaonker689b3862016-10-17 16:25:01 -0700539 log.info('Successfully removed clusters master instance')
ChetanGaonker2099d722016-10-07 15:16:58 -0700540
ChetanGaonker689b3862016-10-17 16:25:01 -0700541 def test_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700542 status = self.verify_cluster_status(onos_instances = onos_instances)
543 assert_equal(status, True)
544 master, standbys = self.get_cluster_current_master_standbys()
545 assert_equal(len(standbys),(onos_instances-1))
546 onos_names_ips = self.get_cluster_container_names_ips()
547 member_onos_name = onos_names_ips[standbys[0]]
548 log.info('Removing cluster member %s'%standbys[0])
549 cord_test_onos_shutdown(node = member_onos_name)
550 time.sleep(60)
551 onos_instances -= 1
552 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
553 assert_equal(status, True)
554
ChetanGaonker689b3862016-10-17 16:25:01 -0700555 def test_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700556 status = self.verify_cluster_status(onos_instances = onos_instances)
557 assert_equal(status, True)
558 master, standbys = self.get_cluster_current_master_standbys()
559 assert_equal(len(standbys),(onos_instances-1))
560 onos_names_ips = self.get_cluster_container_names_ips()
561 member1_onos_name = onos_names_ips[standbys[0]]
562 member2_onos_name = onos_names_ips[standbys[1]]
563 log.info('Removing cluster member %s'%standbys[0])
564 cord_test_onos_shutdown(node = member1_onos_name)
565 log.info('Removing cluster member %s'%standbys[1])
566 cord_test_onos_shutdown(node = member2_onos_name)
567 time.sleep(60)
568 onos_instances = onos_instances - 2
569 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
570 assert_equal(status, True)
571
ChetanGaonker689b3862016-10-17 16:25:01 -0700572 def test_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700573 status = self.verify_cluster_status(onos_instances = onos_instances)
574 assert_equal(status, True)
575 master, standbys = self.get_cluster_current_master_standbys()
576 assert_equal(len(standbys),(onos_instances-1))
577 onos_names_ips = self.get_cluster_container_names_ips()
578 for i in range(remove):
579 member_onos_name = onos_names_ips[standbys[i]]
580 log.info('Removing onos container with name %s'%standbys[i])
581 cord_test_onos_shutdown(node = member_onos_name)
582 time.sleep(60)
583 onos_instances = onos_instances - remove
584 status = self.verify_cluster_status(onos_instances = onos_instances, controller=master)
585 assert_equal(status, True)
586
587 #nottest test cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700588 def test_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES , add = 2, remove = 2):
ChetanGaonker2099d722016-10-07 15:16:58 -0700589 status = self.verify_cluster_status(onos_instances = onos_instances)
590 assert_equal(status, True)
591 onos_ips = self.get_cluster_current_member_ips()
592 onos_instances = len(onos_ips)+add
593 log.info('Adding %d ONOS instances to the cluster'%add)
594 cord_test_onos_add_cluster(count = add)
595 status = self.verify_cluster_status(onos_instances=onos_instances)
596 assert_equal(status, True)
597 log.info('Removing %d ONOS instances from the cluster'%remove)
598 for i in range(remove):
599 name = '{}-{}'.format(Onos.NAME, onos_instances - i)
600 log.info('Removing onos container with name %s'%name)
601 cord_test_onos_shutdown(node = name)
602 time.sleep(60)
603 onos_instances = onos_instances-remove
604 status = self.verify_cluster_status(onos_instances=onos_instances)
605 assert_equal(status, True)
606
607 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700608 def test_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
ChetanGaonker2099d722016-10-07 15:16:58 -0700609 status = self.verify_cluster_status(onos_instances = onos_instances)
610 assert_equal(status, True)
611 onos_ips = self.get_cluster_current_member_ips()
612 onos_instances = onos_instances-remove
613 log.info('Removing %d ONOS instances from the cluster'%remove)
614 for i in range(remove):
615 name = '{}-{}'.format(Onos.NAME, len(onos_ips)-i)
616 log.info('Removing onos container with name %s'%name)
617 cord_test_onos_shutdown(node = name)
618 time.sleep(60)
619 status = self.verify_cluster_status(onos_instances=onos_instances)
620 assert_equal(status, True)
621 log.info('Adding %d ONOS instances to the cluster'%add)
622 cord_test_onos_add_cluster(count = add)
623 onos_instances = onos_instances+add
624 status = self.verify_cluster_status(onos_instances=onos_instances)
625 assert_equal(status, True)
626
ChetanGaonker689b3862016-10-17 16:25:01 -0700627 def test_cluster_restart(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700628 status = self.verify_cluster_status(onos_instances = onos_instances)
629 assert_equal(status, True)
630 log.info('Restarting cluster')
631 cord_test_onos_restart()
632 status = self.verify_cluster_status(onos_instances = onos_instances)
633 assert_equal(status, True)
634
ChetanGaonker689b3862016-10-17 16:25:01 -0700635 def test_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700636 status = self.verify_cluster_status(onos_instances = onos_instances)
637 assert_equal(status, True)
638 master, standbys = self.get_cluster_current_master_standbys()
639 onos_names_ips = self.get_cluster_container_names_ips()
640 master_onos_name = onos_names_ips[master]
641 log.info('Restarting cluster master %s'%master)
642 cord_test_onos_restart(node = master_onos_name)
643 status = self.verify_cluster_status(onos_instances = onos_instances)
644 assert_equal(status, True)
645 log.info('Cluster came up after master restart as expected')
646
647 #test fail. master changing after restart. Need to check correct behavior.
ChetanGaonker689b3862016-10-17 16:25:01 -0700648 def test_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700649 status = self.verify_cluster_status(onos_instances = onos_instances)
650 assert_equal(status, True)
651 master1, standbys = self.get_cluster_current_master_standbys()
652 onos_names_ips = self.get_cluster_container_names_ips()
653 master_onos_name = onos_names_ips[master1]
654 log.info('Restarting cluster master %s'%master)
655 cord_test_onos_restart(node = master_onos_name)
656 status = self.verify_cluster_status(onos_instances = onos_instances)
657 assert_equal(status, True)
658 master2, standbys = self.get_cluster_current_master_standbys()
659 assert_equal(master1,master2)
660 log.info('Cluster master is same before and after cluster master restart as expected')
661
ChetanGaonker689b3862016-10-17 16:25:01 -0700662 def test_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700663 status = self.verify_cluster_status(onos_instances = onos_instances)
664 assert_equal(status, True)
665 master, standbys = self.get_cluster_current_master_standbys()
666 assert_equal(len(standbys),(onos_instances-1))
667 onos_names_ips = self.get_cluster_container_names_ips()
668 member_onos_name = onos_names_ips[standbys[0]]
669 log.info('Restarting cluster member %s'%standbys[0])
670 cord_test_onos_restart(node = member_onos_name)
671 status = self.verify_cluster_status(onos_instances = onos_instances)
672 assert_equal(status, True)
673 log.info('Cluster came up as expected after restarting one member')
674
ChetanGaonker689b3862016-10-17 16:25:01 -0700675 def test_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700676 status = self.verify_cluster_status(onos_instances = onos_instances)
677 assert_equal(status, True)
678 master, standbys = self.get_cluster_current_master_standbys()
679 assert_equal(len(standbys),(onos_instances-1))
680 onos_names_ips = self.get_cluster_container_names_ips()
681 member1_onos_name = onos_names_ips[standbys[0]]
682 member2_onos_name = onos_names_ips[standbys[1]]
683 log.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1]))
684 cord_test_onos_restart(node = member1_onos_name)
685 cord_test_onos_restart(node = member2_onos_name)
686 status = self.verify_cluster_status(onos_instances = onos_instances)
687 assert_equal(status, True)
688 log.info('Cluster came up as expected after restarting two members')
689
ChetanGaonker689b3862016-10-17 16:25:01 -0700690 def test_cluster_state_with_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700691 status = self.verify_cluster_status(onos_instances = onos_instances)
692 assert_equal(status,True)
693 master, standbys = self.get_cluster_current_master_standbys()
694 assert_equal(len(standbys),(onos_instances-1))
695 onos_names_ips = self.get_cluster_container_names_ips()
696 for i in range(members):
697 member_onos_name = onos_names_ips[standbys[i]]
698 log.info('Restarting cluster member %s'%standbys[i])
699 cord_test_onos_restart(node = member_onos_name)
700
701 status = self.verify_cluster_status(onos_instances = onos_instances)
702 assert_equal(status, True)
703 log.info('Cluster came up as expected after restarting %d members'%members)
704
ChetanGaonker689b3862016-10-17 16:25:01 -0700705 def test_cluster_state_with_master_change(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700706 status = self.verify_cluster_status(onos_instances=onos_instances)
707 assert_equal(status, True)
708 master, standbys = self.get_cluster_current_master_standbys()
709 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -0700710 log.info('Cluster current master of devices is %s'%master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700711 self.change_master_current_cluster(new_master=standbys[0])
712 log.info('Cluster master changed successfully')
713
714 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700715 def test_cluster_with_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700716 status = self.verify_cluster_status(onos_instances = onos_instances)
717 assert_equal(status, True)
718 onos_ips = self.get_cluster_current_member_ips()
719 self.vrouter.setUpClass()
720 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
721 assert_equal(res, True)
722 for onos_ip in onos_ips:
723 tries = 0
724 flag = False
725 try:
726 self.cliEnter(controller = onos_ip)
727 while tries <= 5:
728 routes = json.loads(self.cli.routes(jsonFormat = True))
729 if routes:
730 assert_equal(len(routes['routes4']), networks)
731 self.cliExit()
732 flag = True
733 break
734 else:
735 tries += 1
736 time.sleep(1)
737 assert_equal(flag, True)
738 except:
739 log.info('Exception occured while checking routes in onos instance %s'%onos_ip)
740 raise
741
742 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700743 def test_cluster_with_vrouter_and_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700744 status = self.verify_cluster_status(onos_instances = onos_instances)
745 assert_equal(status, True)
746 onos_ips = self.get_cluster_current_member_ips()
747 master, standbys = self.get_cluster_current_master_standbys()
748 onos_names_ips = self.get_cluster_container_names_ips()
749 master_onos_name = onos_names_ips[master]
750 self.vrouter.setUpClass()
751 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
752 assert_equal(res,True)
753 cord_test_onos_shutdown(node = master_onos_name)
754 time.sleep(60)
ChetanGaonker689b3862016-10-17 16:25:01 -0700755 log.info('Verifying vrouter traffic after cluster master is down')
ChetanGaonker2099d722016-10-07 15:16:58 -0700756 self.vrouter.vrouter_traffic_verify()
757
758 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700759 def test_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700760 status = self.verify_cluster_status(onos_instances = onos_instances)
761 assert_equal(status, True)
762 onos_ips = self.get_cluster_current_member_ips()
763 master, standbys = self.get_cluster_current_master_standbys()
764 onos_names_ips = self.get_cluster_container_names_ips()
765 master_onos_name = onos_names_ips[master]
766 self.vrouter.setUpClass()
767 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
768 assert_equal(res, True)
769 cord_test_onos_restart()
770 self.vrouter.vrouter_traffic_verify()
771
772 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700773 def test_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700774 status = self.verify_cluster_status(onos_instances = onos_instances)
775 assert_equal(status, True)
776 self.vrouter.setUpClass()
777 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
778 assert_equal(res, True)
779 self.vrouter.vrouter_activate(deactivate=True)
780 time.sleep(15)
781 self.vrouter.vrouter_traffic_verify(positive_test=False)
782 self.vrouter.vrouter_activate(deactivate=False)
783
784 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700785 def test_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700786 status = self.verify_cluster_status(onos_instances = onos_instances)
787 assert_equal(status, True)
788 master, standbys = self.get_cluster_current_master_standbys()
789 onos_names_ips = self.get_cluster_container_names_ips()
790 master_onos_name = onos_names_ips[master]
791 self.vrouter.setUpClass()
792 log.info('Verifying vrouter before master down')
793 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
794 assert_equal(res, True)
795 self.vrouter.vrouter_activate(deactivate=True)
796 log.info('Verifying vrouter traffic after app deactivated')
797 time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down
798 self.vrouter.vrouter_traffic_verify(positive_test=False)
799 log.info('Verifying vrouter traffic after master down')
800 cord_test_onos_shutdown(node = master_onos_name)
801 time.sleep(60)
802 self.vrouter.vrouter_traffic_verify(positive_test=False)
803 self.vrouter.vrouter_activate(deactivate=False)
804
805 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700806 def test_cluster_for_vrouter_app_and_making_member_down(self, networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700807 status = self.verify_cluster_status(onos_instances = onos_instances)
808 assert_equal(status, True)
809 master, standbys = self.get_cluster_current_master_standbys()
810 onos_names_ips = self.get_cluster_container_names_ips()
811 member_onos_name = onos_names_ips[standbys[0]]
812 self.vrouter.setUpClass()
813 log.info('Verifying vrouter before cluster member down')
814 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
815 assert_equal(res, True) # Expecting vrouter should work properly
816 log.info('Verifying vrouter after cluster member down')
817 cord_test_onos_shutdown(node = member_onos_name)
818 time.sleep(60)
819 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
820
821 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700822 def test_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700823 status = self.verify_cluster_status(onos_instances = onos_instances)
824 assert_equal(status, True)
825 master, standbys = self.get_cluster_current_master_standbys()
826 onos_names_ips = self.get_cluster_container_names_ips()
827 member_onos_name = onos_names_ips[standbys[1]]
828 self.vrouter.setUpClass()
829 log.info('Verifying vrouter traffic before cluster member restart')
830 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
831 assert_equal(res, True) # Expecting vrouter should work properly
832 cord_test_onos_restart(node = member_onos_name)
833 log.info('Verifying vrouter traffic after cluster member restart')
834 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
835
836 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700837 def test_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700838 status = self.verify_cluster_status(onos_instances = onos_instances)
839 assert_equal(status, True)
840 self.vrouter.setUpClass()
841 log.info('Verifying vrouter traffic before cluster restart')
842 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
843 assert_equal(res, True) # Expecting vrouter should work properly
844 cord_test_onos_restart()
845 log.info('Verifying vrouter traffic after cluster restart')
846 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
847
848
849 #test fails because flow state is in pending_add in onos
ChetanGaonker689b3862016-10-17 16:25:01 -0700850 def test_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700851 status = self.verify_cluster_status(onos_instances = onos_instances)
852 assert_equal(status, True)
853 master, standbys = self.get_cluster_current_master_standbys()
854 onos_names_ips = self.get_cluster_container_names_ips()
855 master_onos_name = onos_names_ips[master]
856 self.flows.setUpClass()
857 egress = 1
858 ingress = 2
859 egress_map = { 'ip': '192.168.30.1', 'udp_port': 9500 }
860 ingress_map = { 'ip': '192.168.40.1', 'udp_port': 9000 }
861 flow = OnosFlowCtrl(deviceId = self.device_id,
862 egressPort = egress,
863 ingressPort = ingress,
864 udpSrc = ingress_map['udp_port'],
865 udpDst = egress_map['udp_port'],
866 controller=master
867 )
868 result = flow.addFlow()
869 assert_equal(result, True)
870 time.sleep(1)
871 self.success = False
872 def mac_recv_task():
873 def recv_cb(pkt):
874 log.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
875 self.success = True
876 sniff(timeout=2,
877 lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
878 and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.flows.port_map[egress])
879
880 for i in [0,1]:
881 if i == 1:
882 cord_test_onos_shutdown(node = master_onos_name)
883 log.info('Verifying flows traffic after master killed')
884 time.sleep(45)
885 else:
886 log.info('Verifying flows traffic before master killed')
887 t = threading.Thread(target = mac_recv_task)
888 t.start()
889 L2 = self.flows_eth #Ether(src = ingress_map['ether'], dst = egress_map['ether'])
890 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
891 L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
892 pkt = L2/L3/L4
893 log.info('Sending packets to verify if flows are correct')
894 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
895 t.join()
896 assert_equal(self.success, True)
897
ChetanGaonker689b3862016-10-17 16:25:01 -0700898 def test_cluster_state_changing_master_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700899 status = self.verify_cluster_status(onos_instances=onos_instances)
900 assert_equal(status, True)
901 master, standbys = self.get_cluster_current_master_standbys()
902 self.flows.setUpClass()
903 egress = 1
904 ingress = 2
905 egress_map = { 'ip': '192.168.30.1' }
906 ingress_map = { 'ip': '192.168.40.1' }
907 flow = OnosFlowCtrl(deviceId = self.device_id,
908 egressPort = egress,
909 ingressPort = ingress,
910 ecn = 1,
911 controller=master
912 )
913 result = flow.addFlow()
914 assert_equal(result, True)
915 ##wait for flows to be added to ONOS
916 time.sleep(1)
917 self.success = False
918 def mac_recv_task():
919 def recv_cb(pkt):
920 log.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
921 self.success = True
922 sniff(count=2, timeout=5,
923 lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
924 and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb,
925 iface = self.flows.port_map[egress])
926 for i in [0,1]:
927 if i == 1:
928 log.info('Changing cluster master to %s'%standbys[0])
929 self.change_master_current_cluster(new_master=standbys[0])
930 log.info('Verifying flow traffic after cluster master chnaged')
931 else:
932 log.info('Verifying flow traffic before cluster master changed')
933 t = threading.Thread(target = mac_recv_task)
934 t.start()
935 L2 = self.flows_eth # Ether(src = ingress_map['ether'], dst = egress_map['ether'])
936 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1)
937 pkt = L2/L3
938 log.info('Sending a packet to verify if flows are correct')
939 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
940 t.join()
941 assert_equal(self.success, True)
942
ChetanGaonker689b3862016-10-17 16:25:01 -0700943 #pass
944 def test_cluster_flow_for_ipv6_extension_header_and_master_restart(self,onos_instances = ONOS_INSTANCES):
945 status = self.verify_cluster_status(onos_instances=onos_instances)
946 assert_equal(status, True)
947 master,standbys = self.get_cluster_current_master_standbys()
948 onos_names_ips = self.get_cluster_container_names_ips()
949 master_onos_name = onos_names_ips[master]
950 self.flows.setUpClass()
951 egress = 1
952 ingress = 2
953 egress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
954 ingress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
955 flow = OnosFlowCtrl(deviceId = self.device_id,
956 egressPort = egress,
957 ingressPort = ingress,
958 ipv6_extension = 0,
959 controller=master
960 )
961
962 result = flow.addFlow()
963 assert_equal(result, True)
964 ##wait for flows to be added to ONOS
965 time.sleep(1)
966 self.success = False
967 def mac_recv_task():
968 def recv_cb(pkt):
969 log.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s'%(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
970 self.success = True
971 sniff(timeout=2,count=5,
972 lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.flows.port_map[egress])
973 for i in [0,1]:
974 if i == 1:
975 log.info('Restart cluster current master %s'%master)
976 Container(master_onos_name,Onos.IMAGE).restart()
977 time.sleep(45)
978 log.info('Verifying flow traffic after master restart')
979 else:
980 log.info('Verifying flow traffic before master restart')
981 t = threading.Thread(target = mac_recv_task)
982 t.start()
983 L2 = self.flows_eth
984 L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
985 pkt = L2/L3
986 log.info('Sending packets to verify if flows are correct')
987 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
988 t.join()
989 assert_equal(self.success, True)
990
991 def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
992 dst_mac = self.igmp.iptomac(group)
993 eth = Ether(dst= dst_mac)
994 ip = IP(dst=group,src=source)
995 data = repr(monotonic.monotonic())
996 sendp(eth/ip/data,count=20, iface = intf)
997 pkt = (eth/ip/data)
998 log.info('multicast traffic packet %s'%pkt.show())
999
1000 def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
1001 log.info('verifying multicast traffic for group %s from source %s'%(group,source))
1002 self.success = False
1003 def recv_task():
1004 def igmp_recv_cb(pkt):
1005 log.info('multicast data received for group %s from source %s'%(group,source))
1006 self.success = True
1007 sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
1008 t = threading.Thread(target = recv_task)
1009 t.start()
1010 self.send_multicast_data_traffic(group,source=source)
1011 t.join()
1012 return self.success
1013
1014 #pass
1015 def test_cluster_with_igmp_include_exclude_modes_and_restarting_master(self, onos_instances=ONOS_INSTANCES):
1016 status = self.verify_cluster_status(onos_instances=onos_instances)
1017 assert_equal(status, True)
1018 master, standbys = self.get_cluster_current_master_standbys()
1019 assert_equal(len(standbys), (onos_instances-1))
1020 onos_names_ips = self.get_cluster_container_names_ips()
1021 master_onos_name = onos_names_ips[master]
1022 self.igmp.setUp(controller=master)
1023 groups = ['224.2.3.4','230.5.6.7']
1024 src_list = ['2.2.2.2','3.3.3.3']
1025 self.igmp.onos_ssm_table_load(groups, src_list=src_list, controller=master)
1026 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1027 iface = self.V_INF1, delay = 2)
1028 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1029 iface = self.V_INF1, delay = 2)
1030 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1031 assert_equal(status,True)
1032 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1033 assert_equal(status,False)
1034 log.info('restarting cluster master %s'%master)
1035 Container(master_onos_name,Onos.IMAGE).restart()
1036 time.sleep(60)
1037 log.info('verifying multicast data traffic after master restart')
1038 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1039 assert_equal(status,True)
1040 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1041 assert_equal(status,False)
1042
1043 #pass
1044 def test_cluster_with_igmp_include_exclude_modes_and_making_master_down(self, onos_instances=ONOS_INSTANCES):
1045 status = self.verify_cluster_status(onos_instances=onos_instances)
1046 assert_equal(status, True)
1047 master, standbys = self.get_cluster_current_master_standbys()
1048 assert_equal(len(standbys), (onos_instances-1))
1049 onos_names_ips = self.get_cluster_container_names_ips()
1050 master_onos_name = onos_names_ips[master]
1051 self.igmp.setUp(controller=master)
1052 groups = [self.igmp.random_mcast_ip(),self.igmp.random_mcast_ip()]
1053 src_list = [self.igmp.randomsourceip()]
1054 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1055 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1056 iface = self.V_INF1, delay = 2)
1057 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1058 iface = self.V_INF1, delay = 2)
1059 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1060 assert_equal(status,True)
1061 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1062 assert_equal(status,False)
1063 log.info('Killing cluster master %s'%master)
1064 Container(master_onos_name,Onos.IMAGE).kill()
1065 time.sleep(60)
1066 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=standbys[0])
1067 assert_equal(status, True)
1068 log.info('Verifying multicast data traffic after cluster master down')
1069 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1070 assert_equal(status,True)
1071 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1072 assert_equal(status,False)
1073
1074 def test_cluster_with_igmp_include_mode_checking_traffic_recovery_time_after_master_is_down(self, onos_instances=ONOS_INSTANCES):
1075 status = self.verify_cluster_status(onos_instances=onos_instances)
1076 assert_equal(status, True)
1077 master, standbys = self.get_cluster_current_master_standbys()
1078 assert_equal(len(standbys), (onos_instances-1))
1079 onos_names_ips = self.get_cluster_container_names_ips()
1080 master_onos_name = onos_names_ips[master]
1081 self.igmp.setUp(controller=master)
1082 groups = [self.igmp.random_mcast_ip()]
1083 src_list = [self.igmp.randomsourceip()]
1084 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1085 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1086 iface = self.V_INF1, delay = 2)
1087 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1088 assert_equal(status,True)
1089 log.info('Killing clusters master %s'%master)
1090 Container(master_onos_name,Onos.IMAGE).kill()
1091 count = 0
1092 for i in range(60):
1093 log.info('Verifying multicast data traffic after cluster master down')
1094 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1095 if status:
1096 break
1097 else:
1098 count += 1
1099 time.sleep(1)
1100 assert_equal(status, True)
1101 log.info('Time taken to recover traffic after clusters master down is %d seconds'%count)
1102
1103
1104 #pass
1105 def test_cluster_state_with_igmp_leave_group_after_master_change(self, onos_instances=ONOS_INSTANCES):
1106 status = self.verify_cluster_status(onos_instances=onos_instances)
1107 assert_equal(status, True)
1108 master, standbys = self.get_cluster_current_master_standbys()
1109 assert_equal(len(standbys), (onos_instances-1))
1110 self.igmp.setUp(controller=master)
1111 groups = [self.igmp.random_mcast_ip()]
1112 src_list = [self.igmp.randomsourceip()]
1113 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1114 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1115 iface = self.V_INF1, delay = 2)
1116 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1117 assert_equal(status,True)
1118 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1119 self.change_cluster_current_master(new_master=standbys[0])
1120 log.info('Verifying multicast traffic after cluster master change')
1121 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1122 assert_equal(status,True)
1123 log.info('Sending igmp TO_EXCLUDE message to leave the group %s'%groups[0])
1124 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
1125 iface = self.V_INF1, delay = 1)
1126 time.sleep(10)
1127 status = self.verify_igmp_data_traffic(groups[0],intf = self.V_INF1,source= src_list[0])
1128 assert_equal(status,False)
1129
1130 #pass
1131 def test_cluster_state_with_igmp_join_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
1132 status = self.verify_cluster_status(onos_instances=onos_instances)
1133 assert_equal(status, True)
1134 master,standbys = self.get_cluster_current_master_standbys()
1135 assert_equal(len(standbys), (onos_instances-1))
1136 self.igmp.setUp(controller=master)
1137 groups = [self.igmp.random_mcast_ip()]
1138 src_list = [self.igmp.randomsourceip()]
1139 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1140 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1141 self.change_cluster_current_master(new_master = standbys[0])
1142 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1143 iface = self.V_INF1, delay = 2)
1144 time.sleep(1)
1145 self.change_cluster_current_master(new_master = master)
1146 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1147 assert_equal(status,True)
1148
1149 #pass
ChetanGaonker2099d722016-10-07 15:16:58 -07001150 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001151 def test_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001152 status = self.verify_cluster_status(onos_instances=onos_instances)
1153 assert_equal(status, True)
1154 master, standbys = self.get_cluster_current_master_standbys()
1155 assert_equal(len(standbys), (onos_instances-1))
1156 self.tls.setUp(controller=master)
1157 df = defer.Deferred()
1158 def eap_tls_verify(df):
1159 tls = TLSAuthTest()
1160 tls.runTest()
1161 df.callback(0)
1162 reactor.callLater(0, eap_tls_verify, df)
1163 return df
1164
1165 @deferred(120)
ChetanGaonker689b3862016-10-17 16:25:01 -07001166 def test_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001167 master, standbys = self.get_cluster_current_master_standbys()
1168 assert_equal(len(standbys), (onos_instances-1))
1169 self.tls.setUp()
1170 df = defer.Deferred()
1171 def eap_tls_verify2(df2):
1172 tls = TLSAuthTest()
1173 tls.runTest()
1174 df.callback(0)
1175 for i in [0,1]:
1176 if i == 1:
1177 log.info('Changing cluster master %s to %s'%(master, standbys[0]))
1178 self.change_master_current_cluster(new_master=standbys[0])
1179 log.info('Verifying tls authentication after cluster master changed to %s'%standbys[0])
1180 else:
1181 log.info('Verifying tls authentication before cluster master change')
1182 reactor.callLater(0, eap_tls_verify, df)
1183 return df
1184
1185 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001186 def test_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001187 status = self.verify_cluster_status(onos_instances=onos_instances)
1188 assert_equal(status, True)
1189 master, standbys = self.get_cluster_current_master_standbys()
1190 assert_equal(len(standbys), (onos_instances-1))
1191 onos_names_ips = self.get_cluster_container_names_ips()
1192 master_onos_name = onos_names_ips[master]
1193 self.tls.setUp()
1194 df = defer.Deferred()
1195 def eap_tls_verify(df):
1196 tls = TLSAuthTest()
1197 tls.runTest()
1198 df.callback(0)
1199 for i in [0,1]:
1200 if i == 1:
1201 log.info('Killing cluster current master %s'%master)
1202 cord_test_onos_shutdown(node = master_onos_name)
1203 time.sleep(20)
1204 status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True)
1205 assert_equal(status, True)
1206 log.info('Cluster came up with %d instances after killing master'%(onos_instances-1))
1207 log.info('Verifying tls authentication after killing cluster master')
1208 reactor.callLater(0, eap_tls_verify, df)
1209 return df
1210
1211 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001212 def test_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001213 status = self.verify_cluster_status(onos_instances=onos_instances)
1214 assert_equal(status, True)
1215 master, standbys = self.get_cluster_current_master_standbys()
1216 assert_equal(len(standbys), (onos_instances-1))
1217 onos_names_ips = self.get_cluster_container_names_ips()
1218 member_onos_name = onos_names_ips[standbys[0]]
1219 self.tls.setUp()
1220 df = defer.Deferred()
1221 def eap_tls_no_cert(df):
1222 def tls_no_cert_cb():
1223 log.info('TLS authentication failed with no certificate')
1224 tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
1225 tls.runTest()
1226 assert_equal(tls.failTest, True)
1227 df.callback(0)
1228 for i in [0,1]:
1229 if i == 1:
1230 log.info('Restart cluster member %s'%standbys[0])
1231 Container(member_onos_name,Onos.IMAGE).restart()
1232 time.sleep(20)
1233 status = self.verify_cluster_status(onos_instances=onos_instances)
1234 assert_equal(status, True)
1235 log.info('Cluster came up with %d instances after member restart'%(onos_instances))
1236 log.info('Verifying tls authentication after member restart')
1237 reactor.callLater(0, eap_tls_no_cert, df)
1238 return df
1239
ChetanGaonker689b3862016-10-17 16:25:01 -07001240 #pass
1241 def test_cluster_proxyarp_master_change_and_app_deactivation(self,onos_instances=ONOS_INSTANCES,hosts = 3):
1242 status = self.verify_cluster_status(onos_instances=onos_instances)
1243 assert_equal(status,True)
1244 master,standbys = self.get_cluster_current_master_standbys()
1245 assert_equal(len(standbys),(onos_instances-1))
1246 self.proxyarp.setUpClass()
1247 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1248 ingress = hosts+1
1249 for hostip, hostmac in hosts_config:
1250 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1251 time.sleep(1)
1252 log.info('changing cluster current master from %s to %s'%(master,standbys[0]))
1253 self.change_cluster_current_master(new_master=standbys[0])
1254 log.info('verifying proxyarp after master change')
1255 for hostip, hostmac in hosts_config:
1256 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1257 time.sleep(1)
1258 log.info('Deactivating proxyarp app and expecting proxyarp functionality not to work')
1259 self.proxyarp.proxyarp_activate(deactivate = True,controller=standbys[0])
1260 time.sleep(3)
1261 for hostip, hostmac in hosts_config:
1262 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
1263 time.sleep(1)
1264 log.info('activating proxyarp app and expecting to get arp reply from ONOS')
1265 self.proxyarp.proxyarp_activate(deactivate = False,controller=standbys[0])
1266 time.sleep(3)
1267 for hostip, hostmac in hosts_config:
1268 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1269 time.sleep(1)
ChetanGaonker2099d722016-10-07 15:16:58 -07001270
ChetanGaonker689b3862016-10-17 16:25:01 -07001271 #pass
1272 def test_cluster_with_proxyarp_and_one_member_down(self,hosts=3,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001273 status = self.verify_cluster_status(onos_instances=onos_instances)
1274 assert_equal(status, True)
1275 master, standbys = self.get_cluster_current_master_standbys()
ChetanGaonker689b3862016-10-17 16:25:01 -07001276 assert_equal(len(standbys), (onos_instances-1))
1277 onos_names_ips = self.get_cluster_container_names_ips()
1278 member_onos_name = onos_names_ips[standbys[1]]
1279 self.proxyarp.setUpClass()
1280 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1281 ingress = hosts+1
1282 for hostip, hostmac in hosts_config:
1283 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1284 time.sleep(1)
1285 log.info('killing cluster member %s'%standbys[1])
1286 Container(member_onos_name,Onos.IMAGE).kill()
1287 time.sleep(20)
1288 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=master,verify=True)
1289 assert_equal(status, True)
1290 log.info('cluster came up with %d instances after member down'%(onos_instances-1))
1291 log.info('verifying proxy arp functionality after cluster member down')
1292 for hostip, hostmac in hosts_config:
1293 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1294 time.sleep(1)
1295
1296 #pass
1297 def test_cluster_with_proxyarp_and_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts=10,onos_instances=ONOS_INSTANCES):
1298 status = self.verify_cluster_status(onos_instances=onos_instances)
1299 assert_equal(status, True)
1300 self.proxyarp.setUpClass()
1301 master, standbys = self.get_cluster_current_master_standbys()
1302 assert_equal(len(standbys), (onos_instances-1))
1303 ports_map, egress_map, hosts_config = self.proxyarp.proxyarp_config(hosts = hosts, controller=master)
1304 self.success = True
1305 ingress = hosts+1
1306 ports = range(ingress,ingress+10)
1307 hostmac = []
1308 hostip = []
1309 for ip,mac in hosts_config:
1310 hostmac.append(mac)
1311 hostip.append(ip)
1312 success_dir = {}
1313 def verify_proxyarp(*r):
1314 ingress, hostmac, hostip = r[0],r[1],r[2]
1315 def mac_recv_task():
1316 def recv_cb(pkt):
1317 log.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
1318 success_dir[current_thread().name] = True
1319 sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
1320 prn = recv_cb, iface = self.proxyarp.port_map[ingress])
1321 t = threading.Thread(target = mac_recv_task)
1322 t.start()
1323 pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
1324 log.info('Sending arp request for dest ip %s on interface %s' %
1325 (hostip,self.proxyarp.port_map[ingress]))
1326 sendp(pkt, count = 10,iface = self.proxyarp.port_map[ingress])
1327 t.join()
1328 t = []
1329 for i in range(10):
1330 t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
1331 for i in range(10):
1332 t[i].start()
1333 time.sleep(2)
1334 for i in range(10):
1335 t[i].join()
1336 if len(success_dir) != 10:
1337 self.success = False
1338 assert_equal(self.success, True)
1339
1340 #pass
1341 def test_cluster_with_acl_rule_before_master_change_and_remove_acl_rule_after_master_change(self,onos_instances=ONOS_INSTANCES):
1342 status = self.verify_cluster_status(onos_instances=onos_instances)
1343 assert_equal(status, True)
1344 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001345 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001346 self.acl.setUp()
1347 acl_rule = ACLTest()
1348 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1349 if status is False:
1350 log.info('JSON request returned status %d' %code)
1351 assert_equal(status, True)
1352 result = acl_rule.get_acl_rules(controller=master)
1353 aclRules1 = result.json()['aclRules']
1354 log.info('Added acl rules is %s'%aclRules1)
1355 acl_Id = map(lambda d: d['id'], aclRules1)
1356 log.info('Changing cluster current master from %s to %s'%(master,standbys[0]))
1357 self.change_cluster_current_master(new_master=standbys[0])
1358 status,code = acl_rule.remove_acl_rule(acl_Id[0],controller=standbys[0])
1359 if status is False:
1360 log.info('JSON request returned status %d' %code)
1361 assert_equal(status, True)
1362
1363 #pass
1364 def test_cluster_verifying_acl_rule_in_new_master_after_current_master_is_down(self,onos_instances=ONOS_INSTANCES):
1365 status = self.verify_cluster_status(onos_instances=onos_instances)
1366 assert_equal(status, True)
1367 master,standbys = self.get_cluster_current_master_standbys()
1368 assert_equal(len(standbys),(onos_instances-1))
1369 onos_names_ips = self.get_cluster_container_names_ips()
1370 master_onos_name = onos_names_ips[master]
1371 self.acl.setUp()
1372 acl_rule = ACLTest()
1373 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1374 if status is False:
1375 log.info('JSON request returned status %d' %code)
1376 assert_equal(status, True)
1377 result1 = acl_rule.get_acl_rules(controller=master)
1378 aclRules1 = result1.json()['aclRules']
1379 log.info('Added acl rules is %s'%aclRules1)
1380 acl_Id1 = map(lambda d: d['id'], aclRules1)
1381 log.info('Killing cluster current master %s'%master)
1382 Container(master_onos_name,Onos.IMAGE).kill()
1383 time.sleep(45)
1384 status = self.verify_cluster_status(onos_instances=onos_instances,controller=standbys[0])
1385 assert_equal(status, True)
1386 new_master,standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
1387 assert_equal(len(standbys),(onos_instances-2))
1388 assert_not_equal(new_master,master)
1389 result2 = acl_rule.get_acl_rules(controller=new_master)
1390 aclRules2 = result2.json()['aclRules']
1391 acl_Id2 = map(lambda d: d['id'], aclRules2)
1392 log.info('Acl Ids before and after master down are %s and %s'%(acl_Id1,acl_Id2))
1393 assert_equal(acl_Id2,acl_Id1)
1394
1395 #acl traffic scenario not working as acl rule is not getting added to onos
1396 def test_cluster_with_acl_traffic_before_and_after_two_members_down(self,onos_instances=ONOS_INSTANCES):
1397 status = self.verify_cluster_status(onos_instances=onos_instances)
1398 assert_equal(status, True)
1399 master,standbys = self.get_cluster_current_master_standbys()
1400 assert_equal(len(standbys),(onos_instances-1))
1401 onos_names_ips = self.get_cluster_container_names_ips()
1402 member1_onos_name = onos_names_ips[standbys[0]]
1403 member2_onos_name = onos_names_ips[standbys[1]]
1404 ingress = self.acl.ingress_iface
1405 egress = self.acl.CURRENT_PORT_NUM
1406 acl_rule = ACLTest()
1407 status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.acl.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.acl.HOST_DST_IP)
1408 self.acl.CURRENT_PORT_NUM += 1
1409 time.sleep(5)
1410 if status is False:
1411 log.info('JSON request returned status %d' %code)
1412 assert_equal(status, True)
1413 srcMac = '00:00:00:00:00:11'
1414 dstMac = host_ip_mac[0][1]
1415 self.acl.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1, egress_iface_num = egress )
1416 status, code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'deny',controller=master)
1417 time.sleep(10)
1418 if status is False:
1419 log.info('JSON request returned status %d' %code)
1420 assert_equal(status, True)
1421 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1422 log.info('killing cluster members %s and %s'%(standbys[0],standbys[1]))
1423 Container(member1_onos_name, Onos.IMAGE).kill()
1424 Container(member2_onos_name, Onos.IMAGE).kill()
1425 time.sleep(40)
1426 status = self.verify_cluster_status(onos_instances=onos_instances-2,verify=True,controller=master)
1427 assert_equal(status, True)
1428 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1429 self.acl.acl_hosts_remove(egress_iface_count = 1, egress_iface_num = egress)
1430
1431 #pass
1432 def test_cluster_with_dhcpRelay_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1433 status = self.verify_cluster_status(onos_instances=onos_instances)
1434 assert_equal(status, True)
1435 master,standbys = self.get_cluster_current_master_standbys()
1436 assert_equal(len(standbys),(onos_instances-1))
1437 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001438 mac = self.dhcprelay.get_mac(iface)
1439 self.dhcprelay.host_load(iface)
1440 ##we use the defaults for this test that serves as an example for others
1441 ##You don't need to restart dhcpd server if retaining default config
1442 config = self.dhcprelay.default_config
1443 options = self.dhcprelay.default_options
1444 subnet = self.dhcprelay.default_subnet_config
1445 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1446 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1447 config = config,
1448 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001449 subnet = subnet,
1450 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001451 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
1452 cip, sip = self.dhcprelay.send_recv(mac)
1453 log.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
1454 self.change_master_current_cluster(new_master=standbys[0])
1455 log.info('Releasing ip %s to server %s' %(cip, sip))
1456 assert_equal(self.dhcprelay.dhcp.release(cip), True)
1457 log.info('Triggering DHCP discover again after release')
1458 cip2, sip2 = self.dhcprelay.send_recv(mac)
1459 log.info('Verifying released IP was given back on rediscover')
1460 assert_equal(cip, cip2)
1461 log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
1462 assert_equal(self.dhcprelay.dhcp.release(cip2), True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001463 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001464
ChetanGaonker689b3862016-10-17 16:25:01 -07001465
1466 def test_cluster_with_dhcpRelay_and_verify_dhcp_ip_after_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1467 status = self.verify_cluster_status(onos_instances=onos_instances)
1468 assert_equal(status, True)
1469 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001470 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001471 onos_names_ips = self.get_cluster_container_names_ips()
1472 master_onos_name = onos_names_ips[master]
1473 self.dhcprelay.setUpClass(controller=master)
1474 mac = self.dhcprelay.get_mac(iface)
1475 self.dhcprelay.host_load(iface)
1476 ##we use the defaults for this test that serves as an example for others
1477 ##You don't need to restart dhcpd server if retaining default config
1478 config = self.dhcprelay.default_config
1479 options = self.dhcprelay.default_options
1480 subnet = self.dhcprelay.default_subnet_config
1481 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1482 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1483 config = config,
1484 options = options,
1485 subnet = subnet,
1486 controller=master)
1487 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1488 log.info('Initiating dhcp process from client %s'%mac)
1489 cip, sip = self.dhcprelay.send_recv(mac)
1490 log.info('Killing cluster current master %s'%master)
1491 Container(master_onos_name, Onos.IMAGE).kill()
1492 time.sleep(60)
1493 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=standbys[0])
1494 assert_equal(status, True)
1495 mac = self.dhcprelay.dhcp.get_mac(cip)[0]
1496 log.info("Verifying dhcp clients gets same IP after cluster master restarts")
1497 new_cip, new_sip = self.dhcprelay.dhcp.only_request(cip, mac)
1498 assert_equal(new_cip, cip)
1499 self.dhcprelay.tearDownClass(controller=standbys[0])
1500
1501 #pass
1502 def test_cluster_with_dhcpRelay_and_simulate_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1503 status = self.verify_cluster_status(onos_instances=onos_instances)
1504 assert_equal(status, True)
1505 master,standbys = self.get_cluster_current_master_standbys()
1506 assert_equal(len(standbys),(onos_instances-1))
1507 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001508 macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3']
1509 self.dhcprelay.host_load(iface)
1510 ##we use the defaults for this test that serves as an example for others
1511 ##You don't need to restart dhcpd server if retaining default config
1512 config = self.dhcprelay.default_config
1513 options = self.dhcprelay.default_options
1514 subnet = self.dhcprelay.default_subnet_config
1515 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1516 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1517 config = config,
1518 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001519 subnet = subnet,
1520 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001521 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1522 cip1, sip1 = self.dhcprelay.send_recv(macs[0])
1523 assert_not_equal(cip1,None)
1524 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip1,macs[0],master))
1525 log.info('Changing cluster master from %s to %s'%(master, standbys[0]))
1526 self.change_master_current_cluster(new_master=standbys[0])
1527 cip2, sip2 = self.dhcprelay.send_recv(macs[1])
1528 assert_not_equal(cip2,None)
1529 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[1],standbys[0]))
1530 self.change_master_current_cluster(new_master=master)
1531 log.info('Changing cluster master from %s to %s'%(standbys[0],master))
1532 cip3, sip3 = self.dhcprelay.send_recv(macs[2])
1533 assert_not_equal(cip3,None)
1534 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master))
ChetanGaonker689b3862016-10-17 16:25:01 -07001535 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001536
ChetanGaonker689b3862016-10-17 16:25:01 -07001537 def test_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001538 status = self.verify_cluster_status(onos_instances=onos_instances)
1539 assert_equal(status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001540 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001541 self.subscriber.num_subscribers = 5
1542 self.subscriber.num_channels = 10
1543 for i in [0,1]:
1544 if i == 1:
1545 cord_test_onos_restart()
1546 time.sleep(45)
1547 status = self.verify_cluster_status(onos_instances=onos_instances)
1548 assert_equal(status, True)
1549 log.info('Verifying cord subscriber functionality after cluster restart')
1550 else:
1551 log.info('Verifying cord subscriber functionality before cluster restart')
1552 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1553 num_channels = self.subscriber.num_channels,
1554 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1555 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1556 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1557 self.subscriber.num_channels))
1558 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001559 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001560
ChetanGaonker689b3862016-10-17 16:25:01 -07001561 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1562 def test_cluster_with_cord_subscriber_join_next_channel_before_and_after_cluster_mastership_is_withdrawn(self,onos_instances=ONOS_INSTANCES):
1563 status = self.verify_cluster_status(onos_instances=onos_instances)
1564 assert_equal(status, True)
1565 master,standbys = self.get_cluster_current_master_standbys()
1566 assert_equal(len(standbys),(onos_instances-1))
1567 self.subscriber.setUpClass(controller=master)
1568 self.subscriber.num_subscribers = 5
1569 self.subscriber.num_channels = 10
1570 for i in [0,1]:
1571 if i == 1:
1572 status=self.withdraw_cluster_current_mastership(master_ip=master)
1573 asser_equal(status, True)
1574 master,standbys = self.get_cluster_current_master_standbys()
1575 log.info('verifying cord subscriber functionality after cluster current master withdraw mastership')
1576 else:
1577 log.info('verifying cord subscriber functionality before cluster master withdraw mastership')
1578 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1579 num_channels = self.subscriber.num_channels,
1580 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1581 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1582 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1583 self.subscriber.num_channels),controller=master)
1584 assert_equal(test_status, True)
1585 self.subscriber.tearDownClass(controller=master)
1586
1587 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1588 def test_cluster_with_cord_subscriber_join_recv_traffic_from_10channels_and_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001589 status = self.verify_cluster_status(onos_instances=onos_instances)
1590 assert_equal(status, True)
1591 master, standbys = self.get_cluster_current_master_standbys()
1592 assert_equal(len(standbys),(onos_instances-1))
1593 onos_names_ips = self.get_cluster_container_names_ips()
1594 member_onos_name = onos_names_ips[standbys[0]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001595 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001596 num_subscribers = 1
1597 num_channels = 10
1598 for i in [0,1]:
1599 if i == 1:
1600 cord_test_onos_shutdown(node = member_onos_name)
1601 time.sleep(30)
ChetanGaonker689b3862016-10-17 16:25:01 -07001602 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001603 assert_equal(status, True)
1604 log.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0])
1605 else:
1606 log.info('Verifying cord subscriber functionality before cluster member %s is down'%standbys[0])
1607 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1608 num_channels = num_channels,
1609 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify,
1610 self.subscriber.igmp_verify, self.subscriber.traffic_verify),
1611 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
ChetanGaonker689b3862016-10-17 16:25:01 -07001612 negative_subscriber_auth = 'all',controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001613 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001614 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001615
ChetanGaonker689b3862016-10-17 16:25:01 -07001616 def test_cluster_with_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001617 status = self.verify_cluster_status(onos_instances=onos_instances)
1618 assert_equal(status, True)
1619 master, standbys = self.get_cluster_current_master_standbys()
1620 assert_equal(len(standbys),(onos_instances-1))
1621 onos_names_ips = self.get_cluster_container_names_ips()
1622 member1_onos_name = onos_names_ips[standbys[0]]
1623 member2_onos_name = onos_names_ips[standbys[1]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001624 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001625 num_subscribers = 1
1626 num_channels = 10
1627 for i in [0,1]:
1628 if i == 1:
1629 cord_test_onos_shutdown(node = member1_onos_name)
1630 cord_test_onos_shutdown(node = member2_onos_name)
1631 time.sleep(60)
1632 status = self.verify_cluster_status(onos_instances=onos_instances-2)
1633 assert_equal(status, True)
1634 log.info('Verifying cord subscriber funtionality after cluster two members %s and %s down'%(standbys[0],standbys[1]))
1635 else:
1636 log.info('Verifying cord subscriber funtionality before cluster two members %s and %s down'%(standbys[0],standbys[1]))
1637 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1638 num_channels = num_channels,
1639 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1640 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1641 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
1642 negative_subscriber_auth = 'all')
1643 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001644 self.subscriber.tearDownClass(controller=master)
1645
1646 #pass
1647 def test_cluster_with_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1648 status = self.verify_cluster_status(onos_instances=onos_instances)
1649 assert_equal(status, True)
1650 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1651 for device in device_dict.keys():
1652 log.info("Device is %s"%device_dict[device])
1653 assert_not_equal(device_dict[device]['master'],'none')
1654 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1655 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1656
1657 #pass
1658 def test_cluster_state_in_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1659 status = self.verify_cluster_status(onos_instances=onos_instances)
1660 assert_equal(status, True)
1661 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1662 cluster_ips = self.get_cluster_current_member_ips()
1663 for ip in cluster_ips:
1664 device_dict= self.get_cluster_current_master_standbys_of_connected_devices(controller = ip)
1665 assert_equal(len(device_dict.keys()),onos_instances)
1666 for device in device_dict.keys():
1667 log.info("Device is %s"%device_dict[device])
1668 assert_not_equal(device_dict[device]['master'],'none')
1669 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1670 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1671
1672 #pass
1673 def test_cluster_verifying_multiple_ovs_switches_after_master_is_restarted(self,onos_instances = ONOS_INSTANCES):
1674 status = self.verify_cluster_status(onos_instances=onos_instances)
1675 assert_equal(status, True)
1676 onos_names_ips = self.get_cluster_container_names_ips()
1677 master_count = self.get_number_of_devices_of_master()
1678 log.info('Master count information is %s'%master_count)
1679 total_devices = 0
1680 for master in master_count.keys():
1681 total_devices += master_count[master]['size']
1682 if master_count[master]['size'] != 0:
1683 restart_ip = master
1684 assert_equal(total_devices,onos_instances)
1685 member_onos_name = onos_names_ips[restart_ip]
1686 log.info('Restarting cluster member %s having ip %s'%(member_onos_name,restart_ip))
1687 Container(member_onos_name, Onos.IMAGE).restart()
1688 time.sleep(40)
1689 master_count = self.get_number_of_devices_of_master()
1690 log.info('Master count information after restart is %s'%master_count)
1691 total_devices = 0
1692 for master in master_count.keys():
1693 total_devices += master_count[master]['size']
1694 if master == restart_ip:
1695 assert_equal(master_count[master]['size'], 0)
1696 assert_equal(total_devices,onos_instances)
1697
1698 #pass
1699 def test_cluster_verifying_multiple_ovs_switches_with_one_master_down(self,onos_instances = ONOS_INSTANCES):
1700 status = self.verify_cluster_status(onos_instances=onos_instances)
1701 assert_equal(status, True)
1702 onos_names_ips = self.get_cluster_container_names_ips()
1703 master_count = self.get_number_of_devices_of_master()
1704 log.info('Master count information is %s'%master_count)
1705 total_devices = 0
1706 for master in master_count.keys():
1707 total_devices += master_count[master]['size']
1708 if master_count[master]['size'] != 0:
1709 restart_ip = master
1710 assert_equal(total_devices,onos_instances)
1711 master_onos_name = onos_names_ips[restart_ip]
1712 log.info('Shutting down cluster member %s having ip %s'%(master_onos_name,restart_ip))
1713 Container(master_onos_name, Onos.IMAGE).kill()
1714 time.sleep(40)
1715 for ip in onos_names_ips.keys():
1716 if ip != restart_ip:
1717 controller_ip = ip
1718 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=controller_ip)
1719 assert_equal(status, True)
1720 master_count = self.get_number_of_devices_of_master(controller=controller_ip)
1721 log.info('Master count information after restart is %s'%master_count)
1722 total_devices = 0
1723 for master in master_count.keys():
1724 total_devices += master_count[master]['size']
1725 if master == restart_ip:
1726 assert_equal(master_count[master]['size'], 0)
1727 assert_equal(total_devices,onos_instances)
1728
1729 #pass
1730 def test_cluster_verifying_multiple_ovs_switches_with_current_master_withdrawing_mastership(self,onos_instances = ONOS_INSTANCES):
1731 status = self.verify_cluster_status(onos_instances=onos_instances)
1732 assert_equal(status, True)
1733 master_count = self.get_number_of_devices_of_master()
1734 log.info('Master count information is %s'%master_count)
1735 total_devices = 0
1736 for master in master_count.keys():
1737 total_devices += int(master_count[master]['size'])
1738 if master_count[master]['size'] != 0:
1739 master_ip = master
1740 log.info('Devices of master %s are %s'%(master_count[master]['devices'],master))
1741 device_id = str(master_count[master]['devices'][0])
1742 device_count = master_count[master]['size']
1743 assert_equal(total_devices,onos_instances)
1744 log.info('Withdrawing mastership of device %s for controller %s'%(device_id,master_ip))
1745 status=self.withdraw_cluster_current_mastership(master_ip=master_ip,device_id = device_id)
1746 assert_equal(status, True)
1747 master_count = self.get_number_of_devices_of_master()
1748 log.info('Master count information after cluster mastership withdraw is %s'%master_count)
1749 total_devices = 0
1750 for master in master_count.keys():
1751 total_devices += int(master_count[master]['size'])
1752 if master == master_ip:
1753 assert_equal(master_count[master]['size'], device_count-1)
1754 assert_equal(total_devices,onos_instances)
1755
1756 #pass
1757 def test_cluster_verifying_multiple_ovs_switches_and_restarting_cluster(self,onos_instances = ONOS_INSTANCES):
1758 status = self.verify_cluster_status(onos_instances=onos_instances)
1759 assert_equal(status, True)
1760 master_count = self.get_number_of_devices_of_master()
1761 log.info('Master count information is %s'%master_count)
1762 total_devices = 0
1763 for master in master_count.keys():
1764 total_devices += master_count[master]['size']
1765 assert_equal(total_devices,onos_instances)
1766 log.info('Restarting cluster')
1767 cord_test_onos_restart()
1768 time.sleep(60)
1769 master_count = self.get_number_of_devices_of_master()
1770 log.info('Master count information after restart is %s'%master_count)
1771 total_devices = 0
1772 for master in master_count.keys():
1773 total_devices += master_count[master]['size']
1774 assert_equal(total_devices,onos_instances)