blob: 64ae73f3641a7bbfc2c7c286854e73c76320b1bd [file] [log] [blame]
ChetanGaonker2099d722016-10-07 15:16:58 -07001#copyright 2016-present Ciena Corporation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14#
15import unittest
16from nose.tools import *
17from scapy.all import *
18from OnosCtrl import OnosCtrl, get_mac
19from OltConfig import OltConfig
20from socket import socket
21from OnosFlowCtrl import OnosFlowCtrl
22from nose.twistedtools import reactor, deferred
23from twisted.internet import defer
24from onosclidriver import OnosCliDriver
25from CordContainer import Container, Onos, Quagga
A.R Karthick2560f042016-11-30 14:38:52 -080026from CordTestServer import cord_test_onos_restart, cord_test_onos_shutdown, cord_test_onos_add_cluster, cord_test_quagga_restart, cord_test_restart_cluster
ChetanGaonker2099d722016-10-07 15:16:58 -070027from portmaps import g_subscriber_port_map
28from scapy.all import *
29import time, monotonic
30import threading
31from threading import current_thread
32from Cluster import *
33from EapTLS import TLSAuthTest
34from ACL import ACLTest
A R Karthick1f908202016-11-16 17:32:20 -080035from OnosLog import OnosLog
36from CordLogger import CordLogger
ChetanGaonker2099d722016-10-07 15:16:58 -070037import os
38import json
39import random
40import collections
41log.setLevel('INFO')
42
A R Karthick1f908202016-11-16 17:32:20 -080043class cluster_exchange(CordLogger):
ChetanGaonker2099d722016-10-07 15:16:58 -070044 test_path = os.path.dirname(os.path.realpath(__file__))
45 onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
46 mac = RandMAC()._fix()
47 flows_eth = Ether(src = RandMAC()._fix(), dst = RandMAC()._fix())
48 igmp_eth = Ether(dst = '01:00:5e:00:00:16', type = ETH_P_IP)
49 igmp_ip = IP(dst = '224.0.0.22')
50 ONOS_INSTANCES = 3
51 V_INF1 = 'veth0'
52 TLS_TIMEOUT = 100
53 device_id = 'of:' + get_mac()
54 igmp = cluster_igmp()
55 igmp_groups = igmp.mcast_ip_range(start_ip = '224.1.8.10',end_ip = '224.1.10.49')
56 igmp_sources = igmp.source_ip_range(start_ip = '38.24.29.35',end_ip='38.24.35.56')
57 tls = cluster_tls()
58 flows = cluster_flows()
59 proxyarp = cluster_proxyarp()
60 vrouter = cluster_vrouter()
61 acl = cluster_acl()
62 dhcprelay = cluster_dhcprelay()
63 subscriber = cluster_subscriber()
A.R Karthick2560f042016-11-30 14:38:52 -080064 testcaseLoggers = ('test_cluster_controller_restarts', 'test_cluster_single_controller_restarts', 'test_cluster_restarts')
A R Karthick1f908202016-11-16 17:32:20 -080065
66 def setUp(self):
67 if self._testMethodName not in self.testcaseLoggers:
68 super(cluster_exchange, self).setUp()
69
70 def tearDown(self):
71 if self._testMethodName not in self.testcaseLoggers:
72 super(cluster_exchange, self).tearDown()
ChetanGaonker2099d722016-10-07 15:16:58 -070073
74 def get_controller(self):
75 controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
76 controller = controller.split(',')[0]
77 return controller
78
A R Karthick1f908202016-11-16 17:32:20 -080079 @classmethod
80 def get_controllers(cls):
81 controllers = os.getenv('ONOS_CONTROLLER_IP') or ''
82 return controllers.split(',')
83
ChetanGaonker2099d722016-10-07 15:16:58 -070084 def cliEnter(self,controller = None):
85 retries = 0
86 while retries < 3:
87 self.cli = OnosCliDriver(controller = controller,connect = True)
88 if self.cli.handle:
89 break
90 else:
91 retries += 1
92 time.sleep(2)
93
94 def cliExit(self):
95 self.cli.disconnect()
96
A R Karthick1f908202016-11-16 17:32:20 -080097 def get_leader(self, controller = None):
98 self.cliEnter(controller = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -080099 try:
100 result = json.loads(self.cli.leaders(jsonFormat = True))
101 except:
102 result = None
103
A R Karthick1f908202016-11-16 17:32:20 -0800104 if result is None:
105 log.info('Leaders command failure for controller %s' %controller)
106 else:
107 log.info('Leaders returned: %s' %result)
108 self.cliExit()
109 return result
110
A R Karthicke14fc022016-12-08 14:50:29 -0800111 def log_set(self, level = None, app = 'org.onosproject', controllers = None):
112 CordLogger.logSet(level = level, app = app, controllers = controllers, forced = True)
A R Karthickef1232d2016-12-07 09:18:15 -0800113
A R Karthick1f908202016-11-16 17:32:20 -0800114 def get_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800115 result_map = {}
116 if controller is None:
117 controller = self.get_controller()
A R Karthick1f908202016-11-16 17:32:20 -0800118 if type(controller) in [ list, tuple ]:
119 for c in controller:
120 leaders = self.get_leader(controller = c)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800121 result_map[c] = leaders
A R Karthick1f908202016-11-16 17:32:20 -0800122 else:
123 leaders = self.get_leader(controller = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800124 result_map[controller] = leaders
125 return result_map
A R Karthick1f908202016-11-16 17:32:20 -0800126
A R Karthickec2db322016-11-17 15:06:01 -0800127 def verify_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800128 leaders_map = self.get_leaders(controller = controller)
129 failed = [ k for k,v in leaders_map.items() if v == None ]
A R Karthickec2db322016-11-17 15:06:01 -0800130 return failed
131
ChetanGaonker2099d722016-10-07 15:16:58 -0700132 def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
133 tries = 0
134 try:
135 self.cliEnter(controller = controller)
136 while tries <= 10:
137 cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
138 if cluster_summary:
139 log.info("cluster 'summary' command output is %s"%cluster_summary)
140 nodes = cluster_summary['nodes']
141 if verify:
142 if nodes == onos_instances:
143 self.cliExit()
144 return True
145 else:
146 tries += 1
147 time.sleep(1)
148 else:
149 if nodes >= onos_instances:
150 self.cliExit()
151 return True
152 else:
153 tries += 1
154 time.sleep(1)
155 else:
156 tries += 1
157 time.sleep(1)
158 self.cliExit()
159 return False
160 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700161 raise Exception('Failed to get cluster members')
162 return False
ChetanGaonker2099d722016-10-07 15:16:58 -0700163
A.R Karthick45ab3e12016-11-30 11:25:51 -0800164 def get_cluster_current_member_ips(self, controller = None, nodes_filter = None):
ChetanGaonker2099d722016-10-07 15:16:58 -0700165 tries = 0
166 cluster_ips = []
167 try:
168 self.cliEnter(controller = controller)
169 while tries <= 10:
170 cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
171 if cluster_nodes:
172 log.info("cluster 'nodes' output is %s"%cluster_nodes)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800173 if nodes_filter:
174 cluster_nodes = nodes_filter(cluster_nodes)
ChetanGaonker2099d722016-10-07 15:16:58 -0700175 cluster_ips = map(lambda c: c['id'], cluster_nodes)
176 self.cliExit()
177 cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
178 return cluster_ips
179 else:
180 tries += 1
181 self.cliExit()
182 return cluster_ips
183 except:
184 raise Exception('Failed to get cluster members')
185 return cluster_ips
186
ChetanGaonker689b3862016-10-17 16:25:01 -0700187 def get_cluster_container_names_ips(self,controller=None):
A R Karthick1f908202016-11-16 17:32:20 -0800188 onos_names_ips = {}
189 onos_ips = self.get_cluster_current_member_ips(controller=controller)
190 onos_names_ips[onos_ips[0]] = Onos.NAME
191 onos_names_ips[Onos.NAME] = onos_ips[0]
192 for i in range(1,len(onos_ips)):
193 name = '{0}-{1}'.format(Onos.NAME,i+1)
194 onos_names_ips[onos_ips[i]] = name
195 onos_names_ips[name] = onos_ips[i]
ChetanGaonker2099d722016-10-07 15:16:58 -0700196
197 return onos_names_ips
198
199 #identifying current master of a connected device, not tested
200 def get_cluster_current_master_standbys(self,controller=None,device_id=device_id):
201 master = None
202 standbys = []
203 tries = 0
204 try:
205 cli = self.cliEnter(controller = controller)
206 while tries <= 10:
207 roles = json.loads(self.cli.roles(jsonFormat = True))
208 log.info("cluster 'roles' command output is %s"%roles)
209 if roles:
210 for device in roles:
211 log.info('Verifying device info in line %s'%device)
212 if device['id'] == device_id:
213 master = str(device['master'])
214 standbys = map(lambda d: str(d), device['standbys'])
215 log.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
216 self.cliExit()
217 return master, standbys
218 self.cliExit()
219 return master, standbys
220 else:
221 tries += 1
ChetanGaonker689b3862016-10-17 16:25:01 -0700222 time.sleep(1)
223 self.cliExit()
224 return master,standbys
225 except:
226 raise Exception('Failed to get cluster members')
227 return master,standbys
228
229 def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
230 ''' returns master and standbys of all the connected devices to ONOS cluster instance'''
231 device_dict = {}
232 tries = 0
233 try:
234 cli = self.cliEnter(controller = controller)
235 while tries <= 10:
236 device_dict = {}
237 roles = json.loads(self.cli.roles(jsonFormat = True))
238 log.info("cluster 'roles' command output is %s"%roles)
239 if roles:
240 for device in roles:
241 device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
242 for i in range(len(device_dict[device['id']]['standbys'])):
243 device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
244 log.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
245 self.cliExit()
246 return device_dict
247 else:
248 tries += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700249 time.sleep(1)
250 self.cliExit()
ChetanGaonker689b3862016-10-17 16:25:01 -0700251 return device_dict
252 except:
253 raise Exception('Failed to get cluster members')
254 return device_dict
255
256 #identify current master of a connected device, not tested
257 def get_cluster_connected_devices(self,controller=None):
258 '''returns all the devices connected to ONOS cluster'''
259 device_list = []
260 tries = 0
261 try:
262 cli = self.cliEnter(controller = controller)
263 while tries <= 10:
264 device_list = []
265 devices = json.loads(self.cli.devices(jsonFormat = True))
266 log.info("cluster 'devices' command output is %s"%devices)
267 if devices:
268 for device in devices:
269 log.info('device id is %s'%device['id'])
270 device_list.append(str(device['id']))
271 self.cliExit()
272 return device_list
273 else:
274 tries += 1
275 time.sleep(1)
276 self.cliExit()
277 return device_list
278 except:
279 raise Exception('Failed to get cluster members')
280 return device_list
281
282 def get_number_of_devices_of_master(self,controller=None):
283 '''returns master-device pairs, which master having what devices'''
284 master_count = {}
285 try:
286 cli = self.cliEnter(controller = controller)
287 masters = json.loads(self.cli.masters(jsonFormat = True))
288 if masters:
289 for master in masters:
290 master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
291 return master_count
292 else:
293 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700294 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700295 raise Exception('Failed to get cluster members')
296 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700297
298 def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None):
299 if new_master is None: return False
ChetanGaonker689b3862016-10-17 16:25:01 -0700300 self.cliEnter(controller=controller)
ChetanGaonker2099d722016-10-07 15:16:58 -0700301 cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
302 command = self.cli.command(cmd = cmd, jsonFormat = False)
303 self.cliExit()
304 time.sleep(60)
305 master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
306 assert_equal(master,new_master)
307 log.info('Cluster master changed to %s successfully'%new_master)
308
ChetanGaonker689b3862016-10-17 16:25:01 -0700309 def withdraw_cluster_current_mastership(self,master_ip=None,device_id=device_id,controller=None):
310 '''current master looses its mastership and hence new master will be elected'''
311 self.cliEnter(controller=controller)
312 cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
313 command = self.cli.command(cmd = cmd, jsonFormat = False)
314 self.cliExit()
315 time.sleep(60)
316 new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
317 assert_not_equal(new_master_ip,master_ip)
318 log.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
319 log.info('Cluster new master is %s'%new_master_ip)
320 return True
321
A R Karthickec2db322016-11-17 15:06:01 -0800322 def test_cluster_controller_restarts(self):
A R Karthick1f908202016-11-16 17:32:20 -0800323 '''Test the cluster by repeatedly killing the controllers'''
324 controllers = self.get_controllers()
325 ctlr_len = len(controllers)
326 if ctlr_len <= 1:
327 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
328 assert_greater(ctlr_len, 1)
329
330 #this call would verify the cluster for once
331 onos_map = self.get_cluster_container_names_ips()
332
A R Karthickec2db322016-11-17 15:06:01 -0800333 def check_exception(controller = None):
A R Karthick1f908202016-11-16 17:32:20 -0800334 adjacent_controller = None
335 adjacent_controllers = None
336 if controller:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800337 adjacent_controllers = list(set(controllers) - set([controller]))
338 adjacent_controller = adjacent_controllers[0]
A R Karthick1f908202016-11-16 17:32:20 -0800339 for node in controllers:
340 onosLog = OnosLog(host = node)
341 ##check the logs for storage exception
342 _, output = onosLog.get_log(('ERROR', 'Exception',))
A R Karthickec2db322016-11-17 15:06:01 -0800343 if output and output.find('StorageException$Timeout') >= 0:
344 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
345 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
346 log.info('\n' + '-' * 50 + '\n')
A R Karthick1f908202016-11-16 17:32:20 -0800347 log.info('%s' %output)
A R Karthickec2db322016-11-17 15:06:01 -0800348 log.info('\n' + '-' * 50 + '\n')
349 failed = self.verify_leaders(controllers)
350 if failed:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800351 log.info('Leaders command failed on nodes: %s' %failed)
A R Karthickec2db322016-11-17 15:06:01 -0800352 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800353 return controller
354
355 try:
A R Karthickec2db322016-11-17 15:06:01 -0800356 ips = self.get_cluster_current_member_ips(controller = adjacent_controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800357 log.info('ONOS cluster formed with controllers: %s' %ips)
A R Karthick1f908202016-11-16 17:32:20 -0800358 st = True
359 except:
360 st = False
361
A R Karthickec2db322016-11-17 15:06:01 -0800362 failed = self.verify_leaders(controllers)
A R Karthick1f908202016-11-16 17:32:20 -0800363 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800364 if st is False:
365 log.info('No storage exception and ONOS cluster was not formed successfully')
366 else:
367 controller = None
368
369 return controller
370
371 next_controller = None
372 tries = 10
373 for num in range(tries):
374 index = num % ctlr_len
375 #index = random.randrange(0, ctlr_len)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800376 controller_name = onos_map[controllers[index]] if next_controller is None else onos_map[next_controller]
377 controller = onos_map[controller_name]
378 log.info('ITERATION: %d. Restarting Controller %s' %(num + 1, controller_name))
A R Karthick1f908202016-11-16 17:32:20 -0800379 try:
A R Karthickef1232d2016-12-07 09:18:15 -0800380 #enable debug log for the other controllers before restarting this controller
A R Karthicke14fc022016-12-08 14:50:29 -0800381 adjacent_controllers = list( set(controllers) - set([controller]) )
382 self.log_set(controllers = adjacent_controllers)
383 self.log_set(app = 'io.atomix', controllers = adjacent_controllers)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800384 cord_test_onos_restart(node = controller_name, timeout = 0)
A R Karthicke14fc022016-12-08 14:50:29 -0800385 self.log_set(controllers = controller)
386 self.log_set(app = 'io.atomix', controllers = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800387 time.sleep(60)
A R Karthick1f908202016-11-16 17:32:20 -0800388 except:
389 time.sleep(5)
390 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800391
392 #first archive the test case logs for this run
393 CordLogger.archive_results('test_cluster_controller_restarts',
394 controllers = controllers,
395 iteration = 'iteration_{}'.format(num+1))
A R Karthickec2db322016-11-17 15:06:01 -0800396 next_controller = check_exception(controller = controller)
A R Karthick1f908202016-11-16 17:32:20 -0800397
A.R Karthick45ab3e12016-11-30 11:25:51 -0800398 def test_cluster_single_controller_restarts(self):
399 '''Test the cluster by repeatedly restarting the same controller'''
400 controllers = self.get_controllers()
401 ctlr_len = len(controllers)
402 if ctlr_len <= 1:
403 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
404 assert_greater(ctlr_len, 1)
405
406 #this call would verify the cluster for once
407 onos_map = self.get_cluster_container_names_ips()
408
409 def check_exception(controller, inclusive = False):
410 adjacent_controllers = list(set(controllers) - set([controller]))
411 adjacent_controller = adjacent_controllers[0]
412 controller_list = adjacent_controllers if inclusive == False else controllers
413 storage_exceptions = []
414 for node in controller_list:
415 onosLog = OnosLog(host = node)
416 ##check the logs for storage exception
417 _, output = onosLog.get_log(('ERROR', 'Exception',))
418 if output and output.find('StorageException$Timeout') >= 0:
419 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
420 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
421 log.info('\n' + '-' * 50 + '\n')
422 log.info('%s' %output)
423 log.info('\n' + '-' * 50 + '\n')
424 storage_exceptions.append(node)
425
426 failed = self.verify_leaders(controller_list)
427 if failed:
428 log.info('Leaders command failed on nodes: %s' %failed)
429 if storage_exceptions:
430 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
431 assert_equal(len(failed), 0)
432 return controller
433
434 for ctlr in controller_list:
435 ips = self.get_cluster_current_member_ips(controller = ctlr,
436 nodes_filter = \
437 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
438 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
439 if controller in ips and inclusive is False:
440 log.info('Controller %s still ACTIVE on Node %s after it was shutdown' %(controller, ctlr))
441 if controller not in ips and inclusive is True:
442 log.info('Controller %s still INACTIVE on Node %s after it was shutdown' %(controller, ctlr))
443
444 return controller
445
446 tries = 10
447 #chose a random controller for shutdown/restarts
448 controller = controllers[random.randrange(0, ctlr_len)]
449 controller_name = onos_map[controller]
450 for num in range(tries):
451 index = num % ctlr_len
452 log.info('ITERATION: %d. Shutting down Controller %s' %(num + 1, controller_name))
453 try:
454 cord_test_onos_shutdown(node = controller_name)
455 time.sleep(20)
456 except:
457 time.sleep(5)
458 continue
459 #check for exceptions on the adjacent nodes
460 check_exception(controller)
461 #Now restart the controller back
462 log.info('Restarting back the controller %s' %controller_name)
463 cord_test_onos_restart(node = controller_name)
464 time.sleep(60)
A R Karthicke8935c62016-12-08 18:17:17 -0800465 #archive the logs for this run
466 CordLogger.archive_results('test_cluster_single_controller_restarts',
467 controllers = controllers,
468 iteration = 'iteration_{}'.format(num+1))
A.R Karthick45ab3e12016-11-30 11:25:51 -0800469 check_exception(controller, inclusive = True)
470
A.R Karthick2560f042016-11-30 14:38:52 -0800471 def test_cluster_restarts(self):
472 '''Test the cluster by repeatedly restarting the entire cluster'''
473 controllers = self.get_controllers()
474 ctlr_len = len(controllers)
475 if ctlr_len <= 1:
476 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
477 assert_greater(ctlr_len, 1)
478
479 #this call would verify the cluster for once
480 onos_map = self.get_cluster_container_names_ips()
481
482 def check_exception():
483 controller_list = controllers
484 storage_exceptions = []
485 for node in controller_list:
486 onosLog = OnosLog(host = node)
487 ##check the logs for storage exception
488 _, output = onosLog.get_log(('ERROR', 'Exception',))
489 if output and output.find('StorageException$Timeout') >= 0:
490 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
491 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
492 log.info('\n' + '-' * 50 + '\n')
493 log.info('%s' %output)
494 log.info('\n' + '-' * 50 + '\n')
495 storage_exceptions.append(node)
496
497 failed = self.verify_leaders(controller_list)
498 if failed:
499 log.info('Leaders command failed on nodes: %s' %failed)
500 if storage_exceptions:
501 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
502 assert_equal(len(failed), 0)
503 return
504
505 for ctlr in controller_list:
506 ips = self.get_cluster_current_member_ips(controller = ctlr,
507 nodes_filter = \
508 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
509 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
510 assert_equal(len(ips), len(controllers))
511
512 tries = 10
513 for num in range(tries):
514 log.info('ITERATION: %d. Restarting cluster with controllers at %s' %(num+1, controllers))
515 try:
516 cord_test_restart_cluster()
517 log.info('Delaying before verifying cluster status')
518 time.sleep(60)
519 except:
520 time.sleep(10)
521 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800522
523 #archive the logs for this run before verification
524 CordLogger.archive_results('test_cluster_restarts',
525 controllers = controllers,
526 iteration = 'iteration_{}'.format(num+1))
A.R Karthick2560f042016-11-30 14:38:52 -0800527 #check for exceptions on the adjacent nodes
528 check_exception()
529
ChetanGaonker2099d722016-10-07 15:16:58 -0700530 #pass
ChetanGaonker689b3862016-10-17 16:25:01 -0700531 def test_cluster_formation_and_verification(self,onos_instances = ONOS_INSTANCES):
532 status = self.verify_cluster_status(onos_instances = onos_instances)
533 assert_equal(status, True)
534 log.info('Cluster exists with %d ONOS instances'%onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -0700535
536 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700537 def test_cluster_adding_members(self, add = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700538 status = self.verify_cluster_status(onos_instances = onos_instances)
539 assert_equal(status, True)
540 onos_ips = self.get_cluster_current_member_ips()
541 onos_instances = len(onos_ips)+add
542 log.info('Adding %d nodes to the ONOS cluster' %add)
543 cord_test_onos_add_cluster(count = add)
544 status = self.verify_cluster_status(onos_instances=onos_instances)
545 assert_equal(status, True)
546
ChetanGaonker689b3862016-10-17 16:25:01 -0700547 def test_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700548 status = self.verify_cluster_status(onos_instances = onos_instances)
549 assert_equal(status, True)
550 master, standbys = self.get_cluster_current_master_standbys()
551 assert_equal(len(standbys),(onos_instances-1))
552 onos_names_ips = self.get_cluster_container_names_ips()
553 master_onos_name = onos_names_ips[master]
554 log.info('Removing cluster current master %s'%(master))
555 cord_test_onos_shutdown(node = master_onos_name)
556 time.sleep(60)
557 onos_instances -= 1
558 status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0])
559 assert_equal(status, True)
560 new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
561 assert_not_equal(master,new_master)
ChetanGaonker689b3862016-10-17 16:25:01 -0700562 log.info('Successfully removed clusters master instance')
ChetanGaonker2099d722016-10-07 15:16:58 -0700563
ChetanGaonker689b3862016-10-17 16:25:01 -0700564 def test_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700565 status = self.verify_cluster_status(onos_instances = onos_instances)
566 assert_equal(status, True)
567 master, standbys = self.get_cluster_current_master_standbys()
568 assert_equal(len(standbys),(onos_instances-1))
569 onos_names_ips = self.get_cluster_container_names_ips()
570 member_onos_name = onos_names_ips[standbys[0]]
571 log.info('Removing cluster member %s'%standbys[0])
572 cord_test_onos_shutdown(node = member_onos_name)
573 time.sleep(60)
574 onos_instances -= 1
575 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
576 assert_equal(status, True)
577
ChetanGaonker689b3862016-10-17 16:25:01 -0700578 def test_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700579 status = self.verify_cluster_status(onos_instances = onos_instances)
580 assert_equal(status, True)
581 master, standbys = self.get_cluster_current_master_standbys()
582 assert_equal(len(standbys),(onos_instances-1))
583 onos_names_ips = self.get_cluster_container_names_ips()
584 member1_onos_name = onos_names_ips[standbys[0]]
585 member2_onos_name = onos_names_ips[standbys[1]]
586 log.info('Removing cluster member %s'%standbys[0])
587 cord_test_onos_shutdown(node = member1_onos_name)
588 log.info('Removing cluster member %s'%standbys[1])
589 cord_test_onos_shutdown(node = member2_onos_name)
590 time.sleep(60)
591 onos_instances = onos_instances - 2
592 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
593 assert_equal(status, True)
594
ChetanGaonker689b3862016-10-17 16:25:01 -0700595 def test_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700596 status = self.verify_cluster_status(onos_instances = onos_instances)
597 assert_equal(status, True)
598 master, standbys = self.get_cluster_current_master_standbys()
599 assert_equal(len(standbys),(onos_instances-1))
600 onos_names_ips = self.get_cluster_container_names_ips()
601 for i in range(remove):
602 member_onos_name = onos_names_ips[standbys[i]]
603 log.info('Removing onos container with name %s'%standbys[i])
604 cord_test_onos_shutdown(node = member_onos_name)
605 time.sleep(60)
606 onos_instances = onos_instances - remove
607 status = self.verify_cluster_status(onos_instances = onos_instances, controller=master)
608 assert_equal(status, True)
609
610 #nottest test cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700611 def test_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES , add = 2, remove = 2):
ChetanGaonker2099d722016-10-07 15:16:58 -0700612 status = self.verify_cluster_status(onos_instances = onos_instances)
613 assert_equal(status, True)
614 onos_ips = self.get_cluster_current_member_ips()
615 onos_instances = len(onos_ips)+add
616 log.info('Adding %d ONOS instances to the cluster'%add)
617 cord_test_onos_add_cluster(count = add)
618 status = self.verify_cluster_status(onos_instances=onos_instances)
619 assert_equal(status, True)
620 log.info('Removing %d ONOS instances from the cluster'%remove)
621 for i in range(remove):
622 name = '{}-{}'.format(Onos.NAME, onos_instances - i)
623 log.info('Removing onos container with name %s'%name)
624 cord_test_onos_shutdown(node = name)
625 time.sleep(60)
626 onos_instances = onos_instances-remove
627 status = self.verify_cluster_status(onos_instances=onos_instances)
628 assert_equal(status, True)
629
630 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700631 def test_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
ChetanGaonker2099d722016-10-07 15:16:58 -0700632 status = self.verify_cluster_status(onos_instances = onos_instances)
633 assert_equal(status, True)
634 onos_ips = self.get_cluster_current_member_ips()
635 onos_instances = onos_instances-remove
636 log.info('Removing %d ONOS instances from the cluster'%remove)
637 for i in range(remove):
638 name = '{}-{}'.format(Onos.NAME, len(onos_ips)-i)
639 log.info('Removing onos container with name %s'%name)
640 cord_test_onos_shutdown(node = name)
641 time.sleep(60)
642 status = self.verify_cluster_status(onos_instances=onos_instances)
643 assert_equal(status, True)
644 log.info('Adding %d ONOS instances to the cluster'%add)
645 cord_test_onos_add_cluster(count = add)
646 onos_instances = onos_instances+add
647 status = self.verify_cluster_status(onos_instances=onos_instances)
648 assert_equal(status, True)
649
ChetanGaonker689b3862016-10-17 16:25:01 -0700650 def test_cluster_restart(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700651 status = self.verify_cluster_status(onos_instances = onos_instances)
652 assert_equal(status, True)
653 log.info('Restarting cluster')
654 cord_test_onos_restart()
655 status = self.verify_cluster_status(onos_instances = onos_instances)
656 assert_equal(status, True)
657
ChetanGaonker689b3862016-10-17 16:25:01 -0700658 def test_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700659 status = self.verify_cluster_status(onos_instances = onos_instances)
660 assert_equal(status, True)
661 master, standbys = self.get_cluster_current_master_standbys()
662 onos_names_ips = self.get_cluster_container_names_ips()
663 master_onos_name = onos_names_ips[master]
664 log.info('Restarting cluster master %s'%master)
665 cord_test_onos_restart(node = master_onos_name)
666 status = self.verify_cluster_status(onos_instances = onos_instances)
667 assert_equal(status, True)
668 log.info('Cluster came up after master restart as expected')
669
670 #test fail. master changing after restart. Need to check correct behavior.
ChetanGaonker689b3862016-10-17 16:25:01 -0700671 def test_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700672 status = self.verify_cluster_status(onos_instances = onos_instances)
673 assert_equal(status, True)
674 master1, standbys = self.get_cluster_current_master_standbys()
675 onos_names_ips = self.get_cluster_container_names_ips()
676 master_onos_name = onos_names_ips[master1]
677 log.info('Restarting cluster master %s'%master)
678 cord_test_onos_restart(node = master_onos_name)
679 status = self.verify_cluster_status(onos_instances = onos_instances)
680 assert_equal(status, True)
681 master2, standbys = self.get_cluster_current_master_standbys()
682 assert_equal(master1,master2)
683 log.info('Cluster master is same before and after cluster master restart as expected')
684
ChetanGaonker689b3862016-10-17 16:25:01 -0700685 def test_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700686 status = self.verify_cluster_status(onos_instances = onos_instances)
687 assert_equal(status, True)
688 master, standbys = self.get_cluster_current_master_standbys()
689 assert_equal(len(standbys),(onos_instances-1))
690 onos_names_ips = self.get_cluster_container_names_ips()
691 member_onos_name = onos_names_ips[standbys[0]]
692 log.info('Restarting cluster member %s'%standbys[0])
693 cord_test_onos_restart(node = member_onos_name)
694 status = self.verify_cluster_status(onos_instances = onos_instances)
695 assert_equal(status, True)
696 log.info('Cluster came up as expected after restarting one member')
697
ChetanGaonker689b3862016-10-17 16:25:01 -0700698 def test_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700699 status = self.verify_cluster_status(onos_instances = onos_instances)
700 assert_equal(status, True)
701 master, standbys = self.get_cluster_current_master_standbys()
702 assert_equal(len(standbys),(onos_instances-1))
703 onos_names_ips = self.get_cluster_container_names_ips()
704 member1_onos_name = onos_names_ips[standbys[0]]
705 member2_onos_name = onos_names_ips[standbys[1]]
706 log.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1]))
707 cord_test_onos_restart(node = member1_onos_name)
708 cord_test_onos_restart(node = member2_onos_name)
709 status = self.verify_cluster_status(onos_instances = onos_instances)
710 assert_equal(status, True)
711 log.info('Cluster came up as expected after restarting two members')
712
ChetanGaonker689b3862016-10-17 16:25:01 -0700713 def test_cluster_state_with_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700714 status = self.verify_cluster_status(onos_instances = onos_instances)
715 assert_equal(status,True)
716 master, standbys = self.get_cluster_current_master_standbys()
717 assert_equal(len(standbys),(onos_instances-1))
718 onos_names_ips = self.get_cluster_container_names_ips()
719 for i in range(members):
720 member_onos_name = onos_names_ips[standbys[i]]
721 log.info('Restarting cluster member %s'%standbys[i])
722 cord_test_onos_restart(node = member_onos_name)
723
724 status = self.verify_cluster_status(onos_instances = onos_instances)
725 assert_equal(status, True)
726 log.info('Cluster came up as expected after restarting %d members'%members)
727
ChetanGaonker689b3862016-10-17 16:25:01 -0700728 def test_cluster_state_with_master_change(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700729 status = self.verify_cluster_status(onos_instances=onos_instances)
730 assert_equal(status, True)
731 master, standbys = self.get_cluster_current_master_standbys()
732 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -0700733 log.info('Cluster current master of devices is %s'%master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700734 self.change_master_current_cluster(new_master=standbys[0])
735 log.info('Cluster master changed successfully')
736
737 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700738 def test_cluster_with_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700739 status = self.verify_cluster_status(onos_instances = onos_instances)
740 assert_equal(status, True)
741 onos_ips = self.get_cluster_current_member_ips()
742 self.vrouter.setUpClass()
743 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
744 assert_equal(res, True)
745 for onos_ip in onos_ips:
746 tries = 0
747 flag = False
748 try:
749 self.cliEnter(controller = onos_ip)
750 while tries <= 5:
751 routes = json.loads(self.cli.routes(jsonFormat = True))
752 if routes:
753 assert_equal(len(routes['routes4']), networks)
754 self.cliExit()
755 flag = True
756 break
757 else:
758 tries += 1
759 time.sleep(1)
760 assert_equal(flag, True)
761 except:
762 log.info('Exception occured while checking routes in onos instance %s'%onos_ip)
763 raise
764
765 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700766 def test_cluster_with_vrouter_and_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700767 status = self.verify_cluster_status(onos_instances = onos_instances)
768 assert_equal(status, True)
769 onos_ips = self.get_cluster_current_member_ips()
770 master, standbys = self.get_cluster_current_master_standbys()
771 onos_names_ips = self.get_cluster_container_names_ips()
772 master_onos_name = onos_names_ips[master]
773 self.vrouter.setUpClass()
774 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
775 assert_equal(res,True)
776 cord_test_onos_shutdown(node = master_onos_name)
777 time.sleep(60)
ChetanGaonker689b3862016-10-17 16:25:01 -0700778 log.info('Verifying vrouter traffic after cluster master is down')
ChetanGaonker2099d722016-10-07 15:16:58 -0700779 self.vrouter.vrouter_traffic_verify()
780
781 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700782 def test_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700783 status = self.verify_cluster_status(onos_instances = onos_instances)
784 assert_equal(status, True)
785 onos_ips = self.get_cluster_current_member_ips()
786 master, standbys = self.get_cluster_current_master_standbys()
787 onos_names_ips = self.get_cluster_container_names_ips()
788 master_onos_name = onos_names_ips[master]
789 self.vrouter.setUpClass()
790 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
791 assert_equal(res, True)
792 cord_test_onos_restart()
793 self.vrouter.vrouter_traffic_verify()
794
795 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700796 def test_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700797 status = self.verify_cluster_status(onos_instances = onos_instances)
798 assert_equal(status, True)
799 self.vrouter.setUpClass()
800 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
801 assert_equal(res, True)
802 self.vrouter.vrouter_activate(deactivate=True)
803 time.sleep(15)
804 self.vrouter.vrouter_traffic_verify(positive_test=False)
805 self.vrouter.vrouter_activate(deactivate=False)
806
807 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700808 def test_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700809 status = self.verify_cluster_status(onos_instances = onos_instances)
810 assert_equal(status, True)
811 master, standbys = self.get_cluster_current_master_standbys()
812 onos_names_ips = self.get_cluster_container_names_ips()
813 master_onos_name = onos_names_ips[master]
814 self.vrouter.setUpClass()
815 log.info('Verifying vrouter before master down')
816 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
817 assert_equal(res, True)
818 self.vrouter.vrouter_activate(deactivate=True)
819 log.info('Verifying vrouter traffic after app deactivated')
820 time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down
821 self.vrouter.vrouter_traffic_verify(positive_test=False)
822 log.info('Verifying vrouter traffic after master down')
823 cord_test_onos_shutdown(node = master_onos_name)
824 time.sleep(60)
825 self.vrouter.vrouter_traffic_verify(positive_test=False)
826 self.vrouter.vrouter_activate(deactivate=False)
827
828 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700829 def test_cluster_for_vrouter_app_and_making_member_down(self, networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700830 status = self.verify_cluster_status(onos_instances = onos_instances)
831 assert_equal(status, True)
832 master, standbys = self.get_cluster_current_master_standbys()
833 onos_names_ips = self.get_cluster_container_names_ips()
834 member_onos_name = onos_names_ips[standbys[0]]
835 self.vrouter.setUpClass()
836 log.info('Verifying vrouter before cluster member down')
837 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
838 assert_equal(res, True) # Expecting vrouter should work properly
839 log.info('Verifying vrouter after cluster member down')
840 cord_test_onos_shutdown(node = member_onos_name)
841 time.sleep(60)
842 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
843
844 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700845 def test_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700846 status = self.verify_cluster_status(onos_instances = onos_instances)
847 assert_equal(status, True)
848 master, standbys = self.get_cluster_current_master_standbys()
849 onos_names_ips = self.get_cluster_container_names_ips()
850 member_onos_name = onos_names_ips[standbys[1]]
851 self.vrouter.setUpClass()
852 log.info('Verifying vrouter traffic before cluster member restart')
853 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
854 assert_equal(res, True) # Expecting vrouter should work properly
855 cord_test_onos_restart(node = member_onos_name)
856 log.info('Verifying vrouter traffic after cluster member restart')
857 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
858
859 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700860 def test_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700861 status = self.verify_cluster_status(onos_instances = onos_instances)
862 assert_equal(status, True)
863 self.vrouter.setUpClass()
864 log.info('Verifying vrouter traffic before cluster restart')
865 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
866 assert_equal(res, True) # Expecting vrouter should work properly
867 cord_test_onos_restart()
868 log.info('Verifying vrouter traffic after cluster restart')
869 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
870
871
872 #test fails because flow state is in pending_add in onos
ChetanGaonker689b3862016-10-17 16:25:01 -0700873 def test_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700874 status = self.verify_cluster_status(onos_instances = onos_instances)
875 assert_equal(status, True)
876 master, standbys = self.get_cluster_current_master_standbys()
877 onos_names_ips = self.get_cluster_container_names_ips()
878 master_onos_name = onos_names_ips[master]
879 self.flows.setUpClass()
880 egress = 1
881 ingress = 2
882 egress_map = { 'ip': '192.168.30.1', 'udp_port': 9500 }
883 ingress_map = { 'ip': '192.168.40.1', 'udp_port': 9000 }
884 flow = OnosFlowCtrl(deviceId = self.device_id,
885 egressPort = egress,
886 ingressPort = ingress,
887 udpSrc = ingress_map['udp_port'],
888 udpDst = egress_map['udp_port'],
889 controller=master
890 )
891 result = flow.addFlow()
892 assert_equal(result, True)
893 time.sleep(1)
894 self.success = False
895 def mac_recv_task():
896 def recv_cb(pkt):
897 log.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
898 self.success = True
899 sniff(timeout=2,
900 lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
901 and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.flows.port_map[egress])
902
903 for i in [0,1]:
904 if i == 1:
905 cord_test_onos_shutdown(node = master_onos_name)
906 log.info('Verifying flows traffic after master killed')
907 time.sleep(45)
908 else:
909 log.info('Verifying flows traffic before master killed')
910 t = threading.Thread(target = mac_recv_task)
911 t.start()
912 L2 = self.flows_eth #Ether(src = ingress_map['ether'], dst = egress_map['ether'])
913 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
914 L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
915 pkt = L2/L3/L4
916 log.info('Sending packets to verify if flows are correct')
917 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
918 t.join()
919 assert_equal(self.success, True)
920
ChetanGaonker689b3862016-10-17 16:25:01 -0700921 def test_cluster_state_changing_master_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700922 status = self.verify_cluster_status(onos_instances=onos_instances)
923 assert_equal(status, True)
924 master, standbys = self.get_cluster_current_master_standbys()
925 self.flows.setUpClass()
926 egress = 1
927 ingress = 2
928 egress_map = { 'ip': '192.168.30.1' }
929 ingress_map = { 'ip': '192.168.40.1' }
930 flow = OnosFlowCtrl(deviceId = self.device_id,
931 egressPort = egress,
932 ingressPort = ingress,
933 ecn = 1,
934 controller=master
935 )
936 result = flow.addFlow()
937 assert_equal(result, True)
938 ##wait for flows to be added to ONOS
939 time.sleep(1)
940 self.success = False
941 def mac_recv_task():
942 def recv_cb(pkt):
943 log.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
944 self.success = True
945 sniff(count=2, timeout=5,
946 lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
947 and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb,
948 iface = self.flows.port_map[egress])
949 for i in [0,1]:
950 if i == 1:
951 log.info('Changing cluster master to %s'%standbys[0])
952 self.change_master_current_cluster(new_master=standbys[0])
953 log.info('Verifying flow traffic after cluster master chnaged')
954 else:
955 log.info('Verifying flow traffic before cluster master changed')
956 t = threading.Thread(target = mac_recv_task)
957 t.start()
958 L2 = self.flows_eth # Ether(src = ingress_map['ether'], dst = egress_map['ether'])
959 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1)
960 pkt = L2/L3
961 log.info('Sending a packet to verify if flows are correct')
962 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
963 t.join()
964 assert_equal(self.success, True)
965
ChetanGaonker689b3862016-10-17 16:25:01 -0700966 #pass
967 def test_cluster_flow_for_ipv6_extension_header_and_master_restart(self,onos_instances = ONOS_INSTANCES):
968 status = self.verify_cluster_status(onos_instances=onos_instances)
969 assert_equal(status, True)
970 master,standbys = self.get_cluster_current_master_standbys()
971 onos_names_ips = self.get_cluster_container_names_ips()
972 master_onos_name = onos_names_ips[master]
973 self.flows.setUpClass()
974 egress = 1
975 ingress = 2
976 egress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
977 ingress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
978 flow = OnosFlowCtrl(deviceId = self.device_id,
979 egressPort = egress,
980 ingressPort = ingress,
981 ipv6_extension = 0,
982 controller=master
983 )
984
985 result = flow.addFlow()
986 assert_equal(result, True)
987 ##wait for flows to be added to ONOS
988 time.sleep(1)
989 self.success = False
990 def mac_recv_task():
991 def recv_cb(pkt):
992 log.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s'%(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
993 self.success = True
994 sniff(timeout=2,count=5,
995 lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.flows.port_map[egress])
996 for i in [0,1]:
997 if i == 1:
998 log.info('Restart cluster current master %s'%master)
999 Container(master_onos_name,Onos.IMAGE).restart()
1000 time.sleep(45)
1001 log.info('Verifying flow traffic after master restart')
1002 else:
1003 log.info('Verifying flow traffic before master restart')
1004 t = threading.Thread(target = mac_recv_task)
1005 t.start()
1006 L2 = self.flows_eth
1007 L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
1008 pkt = L2/L3
1009 log.info('Sending packets to verify if flows are correct')
1010 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
1011 t.join()
1012 assert_equal(self.success, True)
1013
1014 def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
1015 dst_mac = self.igmp.iptomac(group)
1016 eth = Ether(dst= dst_mac)
1017 ip = IP(dst=group,src=source)
1018 data = repr(monotonic.monotonic())
1019 sendp(eth/ip/data,count=20, iface = intf)
1020 pkt = (eth/ip/data)
1021 log.info('multicast traffic packet %s'%pkt.show())
1022
1023 def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
1024 log.info('verifying multicast traffic for group %s from source %s'%(group,source))
1025 self.success = False
1026 def recv_task():
1027 def igmp_recv_cb(pkt):
1028 log.info('multicast data received for group %s from source %s'%(group,source))
1029 self.success = True
1030 sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
1031 t = threading.Thread(target = recv_task)
1032 t.start()
1033 self.send_multicast_data_traffic(group,source=source)
1034 t.join()
1035 return self.success
1036
1037 #pass
1038 def test_cluster_with_igmp_include_exclude_modes_and_restarting_master(self, onos_instances=ONOS_INSTANCES):
1039 status = self.verify_cluster_status(onos_instances=onos_instances)
1040 assert_equal(status, True)
1041 master, standbys = self.get_cluster_current_master_standbys()
1042 assert_equal(len(standbys), (onos_instances-1))
1043 onos_names_ips = self.get_cluster_container_names_ips()
1044 master_onos_name = onos_names_ips[master]
1045 self.igmp.setUp(controller=master)
1046 groups = ['224.2.3.4','230.5.6.7']
1047 src_list = ['2.2.2.2','3.3.3.3']
1048 self.igmp.onos_ssm_table_load(groups, src_list=src_list, controller=master)
1049 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1050 iface = self.V_INF1, delay = 2)
1051 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1052 iface = self.V_INF1, delay = 2)
1053 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1054 assert_equal(status,True)
1055 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1056 assert_equal(status,False)
1057 log.info('restarting cluster master %s'%master)
1058 Container(master_onos_name,Onos.IMAGE).restart()
1059 time.sleep(60)
1060 log.info('verifying multicast data traffic after master restart')
1061 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1062 assert_equal(status,True)
1063 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1064 assert_equal(status,False)
1065
1066 #pass
1067 def test_cluster_with_igmp_include_exclude_modes_and_making_master_down(self, onos_instances=ONOS_INSTANCES):
1068 status = self.verify_cluster_status(onos_instances=onos_instances)
1069 assert_equal(status, True)
1070 master, standbys = self.get_cluster_current_master_standbys()
1071 assert_equal(len(standbys), (onos_instances-1))
1072 onos_names_ips = self.get_cluster_container_names_ips()
1073 master_onos_name = onos_names_ips[master]
1074 self.igmp.setUp(controller=master)
1075 groups = [self.igmp.random_mcast_ip(),self.igmp.random_mcast_ip()]
1076 src_list = [self.igmp.randomsourceip()]
1077 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1078 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1079 iface = self.V_INF1, delay = 2)
1080 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1081 iface = self.V_INF1, delay = 2)
1082 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1083 assert_equal(status,True)
1084 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1085 assert_equal(status,False)
1086 log.info('Killing cluster master %s'%master)
1087 Container(master_onos_name,Onos.IMAGE).kill()
1088 time.sleep(60)
1089 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=standbys[0])
1090 assert_equal(status, True)
1091 log.info('Verifying multicast data traffic after cluster master down')
1092 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1093 assert_equal(status,True)
1094 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1095 assert_equal(status,False)
1096
1097 def test_cluster_with_igmp_include_mode_checking_traffic_recovery_time_after_master_is_down(self, onos_instances=ONOS_INSTANCES):
1098 status = self.verify_cluster_status(onos_instances=onos_instances)
1099 assert_equal(status, True)
1100 master, standbys = self.get_cluster_current_master_standbys()
1101 assert_equal(len(standbys), (onos_instances-1))
1102 onos_names_ips = self.get_cluster_container_names_ips()
1103 master_onos_name = onos_names_ips[master]
1104 self.igmp.setUp(controller=master)
1105 groups = [self.igmp.random_mcast_ip()]
1106 src_list = [self.igmp.randomsourceip()]
1107 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1108 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1109 iface = self.V_INF1, delay = 2)
1110 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1111 assert_equal(status,True)
1112 log.info('Killing clusters master %s'%master)
1113 Container(master_onos_name,Onos.IMAGE).kill()
1114 count = 0
1115 for i in range(60):
1116 log.info('Verifying multicast data traffic after cluster master down')
1117 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1118 if status:
1119 break
1120 else:
1121 count += 1
1122 time.sleep(1)
1123 assert_equal(status, True)
1124 log.info('Time taken to recover traffic after clusters master down is %d seconds'%count)
1125
1126
1127 #pass
1128 def test_cluster_state_with_igmp_leave_group_after_master_change(self, onos_instances=ONOS_INSTANCES):
1129 status = self.verify_cluster_status(onos_instances=onos_instances)
1130 assert_equal(status, True)
1131 master, standbys = self.get_cluster_current_master_standbys()
1132 assert_equal(len(standbys), (onos_instances-1))
1133 self.igmp.setUp(controller=master)
1134 groups = [self.igmp.random_mcast_ip()]
1135 src_list = [self.igmp.randomsourceip()]
1136 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1137 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1138 iface = self.V_INF1, delay = 2)
1139 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1140 assert_equal(status,True)
1141 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1142 self.change_cluster_current_master(new_master=standbys[0])
1143 log.info('Verifying multicast traffic after cluster master change')
1144 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1145 assert_equal(status,True)
1146 log.info('Sending igmp TO_EXCLUDE message to leave the group %s'%groups[0])
1147 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
1148 iface = self.V_INF1, delay = 1)
1149 time.sleep(10)
1150 status = self.verify_igmp_data_traffic(groups[0],intf = self.V_INF1,source= src_list[0])
1151 assert_equal(status,False)
1152
1153 #pass
1154 def test_cluster_state_with_igmp_join_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
1155 status = self.verify_cluster_status(onos_instances=onos_instances)
1156 assert_equal(status, True)
1157 master,standbys = self.get_cluster_current_master_standbys()
1158 assert_equal(len(standbys), (onos_instances-1))
1159 self.igmp.setUp(controller=master)
1160 groups = [self.igmp.random_mcast_ip()]
1161 src_list = [self.igmp.randomsourceip()]
1162 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1163 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1164 self.change_cluster_current_master(new_master = standbys[0])
1165 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1166 iface = self.V_INF1, delay = 2)
1167 time.sleep(1)
1168 self.change_cluster_current_master(new_master = master)
1169 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1170 assert_equal(status,True)
1171
1172 #pass
ChetanGaonker2099d722016-10-07 15:16:58 -07001173 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001174 def test_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001175 status = self.verify_cluster_status(onos_instances=onos_instances)
1176 assert_equal(status, True)
1177 master, standbys = self.get_cluster_current_master_standbys()
1178 assert_equal(len(standbys), (onos_instances-1))
1179 self.tls.setUp(controller=master)
1180 df = defer.Deferred()
1181 def eap_tls_verify(df):
1182 tls = TLSAuthTest()
1183 tls.runTest()
1184 df.callback(0)
1185 reactor.callLater(0, eap_tls_verify, df)
1186 return df
1187
1188 @deferred(120)
ChetanGaonker689b3862016-10-17 16:25:01 -07001189 def test_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001190 master, standbys = self.get_cluster_current_master_standbys()
1191 assert_equal(len(standbys), (onos_instances-1))
1192 self.tls.setUp()
1193 df = defer.Deferred()
1194 def eap_tls_verify2(df2):
1195 tls = TLSAuthTest()
1196 tls.runTest()
1197 df.callback(0)
1198 for i in [0,1]:
1199 if i == 1:
1200 log.info('Changing cluster master %s to %s'%(master, standbys[0]))
1201 self.change_master_current_cluster(new_master=standbys[0])
1202 log.info('Verifying tls authentication after cluster master changed to %s'%standbys[0])
1203 else:
1204 log.info('Verifying tls authentication before cluster master change')
1205 reactor.callLater(0, eap_tls_verify, df)
1206 return df
1207
1208 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001209 def test_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001210 status = self.verify_cluster_status(onos_instances=onos_instances)
1211 assert_equal(status, True)
1212 master, standbys = self.get_cluster_current_master_standbys()
1213 assert_equal(len(standbys), (onos_instances-1))
1214 onos_names_ips = self.get_cluster_container_names_ips()
1215 master_onos_name = onos_names_ips[master]
1216 self.tls.setUp()
1217 df = defer.Deferred()
1218 def eap_tls_verify(df):
1219 tls = TLSAuthTest()
1220 tls.runTest()
1221 df.callback(0)
1222 for i in [0,1]:
1223 if i == 1:
1224 log.info('Killing cluster current master %s'%master)
1225 cord_test_onos_shutdown(node = master_onos_name)
1226 time.sleep(20)
1227 status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True)
1228 assert_equal(status, True)
1229 log.info('Cluster came up with %d instances after killing master'%(onos_instances-1))
1230 log.info('Verifying tls authentication after killing cluster master')
1231 reactor.callLater(0, eap_tls_verify, df)
1232 return df
1233
1234 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001235 def test_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001236 status = self.verify_cluster_status(onos_instances=onos_instances)
1237 assert_equal(status, True)
1238 master, standbys = self.get_cluster_current_master_standbys()
1239 assert_equal(len(standbys), (onos_instances-1))
1240 onos_names_ips = self.get_cluster_container_names_ips()
1241 member_onos_name = onos_names_ips[standbys[0]]
1242 self.tls.setUp()
1243 df = defer.Deferred()
1244 def eap_tls_no_cert(df):
1245 def tls_no_cert_cb():
1246 log.info('TLS authentication failed with no certificate')
1247 tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
1248 tls.runTest()
1249 assert_equal(tls.failTest, True)
1250 df.callback(0)
1251 for i in [0,1]:
1252 if i == 1:
1253 log.info('Restart cluster member %s'%standbys[0])
1254 Container(member_onos_name,Onos.IMAGE).restart()
1255 time.sleep(20)
1256 status = self.verify_cluster_status(onos_instances=onos_instances)
1257 assert_equal(status, True)
1258 log.info('Cluster came up with %d instances after member restart'%(onos_instances))
1259 log.info('Verifying tls authentication after member restart')
1260 reactor.callLater(0, eap_tls_no_cert, df)
1261 return df
1262
ChetanGaonker689b3862016-10-17 16:25:01 -07001263 #pass
1264 def test_cluster_proxyarp_master_change_and_app_deactivation(self,onos_instances=ONOS_INSTANCES,hosts = 3):
1265 status = self.verify_cluster_status(onos_instances=onos_instances)
1266 assert_equal(status,True)
1267 master,standbys = self.get_cluster_current_master_standbys()
1268 assert_equal(len(standbys),(onos_instances-1))
1269 self.proxyarp.setUpClass()
1270 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1271 ingress = hosts+1
1272 for hostip, hostmac in hosts_config:
1273 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1274 time.sleep(1)
1275 log.info('changing cluster current master from %s to %s'%(master,standbys[0]))
1276 self.change_cluster_current_master(new_master=standbys[0])
1277 log.info('verifying proxyarp after master change')
1278 for hostip, hostmac in hosts_config:
1279 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1280 time.sleep(1)
1281 log.info('Deactivating proxyarp app and expecting proxyarp functionality not to work')
1282 self.proxyarp.proxyarp_activate(deactivate = True,controller=standbys[0])
1283 time.sleep(3)
1284 for hostip, hostmac in hosts_config:
1285 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
1286 time.sleep(1)
1287 log.info('activating proxyarp app and expecting to get arp reply from ONOS')
1288 self.proxyarp.proxyarp_activate(deactivate = False,controller=standbys[0])
1289 time.sleep(3)
1290 for hostip, hostmac in hosts_config:
1291 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1292 time.sleep(1)
ChetanGaonker2099d722016-10-07 15:16:58 -07001293
ChetanGaonker689b3862016-10-17 16:25:01 -07001294 #pass
1295 def test_cluster_with_proxyarp_and_one_member_down(self,hosts=3,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001296 status = self.verify_cluster_status(onos_instances=onos_instances)
1297 assert_equal(status, True)
1298 master, standbys = self.get_cluster_current_master_standbys()
ChetanGaonker689b3862016-10-17 16:25:01 -07001299 assert_equal(len(standbys), (onos_instances-1))
1300 onos_names_ips = self.get_cluster_container_names_ips()
1301 member_onos_name = onos_names_ips[standbys[1]]
1302 self.proxyarp.setUpClass()
1303 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1304 ingress = hosts+1
1305 for hostip, hostmac in hosts_config:
1306 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1307 time.sleep(1)
1308 log.info('killing cluster member %s'%standbys[1])
1309 Container(member_onos_name,Onos.IMAGE).kill()
1310 time.sleep(20)
1311 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=master,verify=True)
1312 assert_equal(status, True)
1313 log.info('cluster came up with %d instances after member down'%(onos_instances-1))
1314 log.info('verifying proxy arp functionality after cluster member down')
1315 for hostip, hostmac in hosts_config:
1316 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1317 time.sleep(1)
1318
1319 #pass
1320 def test_cluster_with_proxyarp_and_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts=10,onos_instances=ONOS_INSTANCES):
1321 status = self.verify_cluster_status(onos_instances=onos_instances)
1322 assert_equal(status, True)
1323 self.proxyarp.setUpClass()
1324 master, standbys = self.get_cluster_current_master_standbys()
1325 assert_equal(len(standbys), (onos_instances-1))
1326 ports_map, egress_map, hosts_config = self.proxyarp.proxyarp_config(hosts = hosts, controller=master)
1327 self.success = True
1328 ingress = hosts+1
1329 ports = range(ingress,ingress+10)
1330 hostmac = []
1331 hostip = []
1332 for ip,mac in hosts_config:
1333 hostmac.append(mac)
1334 hostip.append(ip)
1335 success_dir = {}
1336 def verify_proxyarp(*r):
1337 ingress, hostmac, hostip = r[0],r[1],r[2]
1338 def mac_recv_task():
1339 def recv_cb(pkt):
1340 log.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
1341 success_dir[current_thread().name] = True
1342 sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
1343 prn = recv_cb, iface = self.proxyarp.port_map[ingress])
1344 t = threading.Thread(target = mac_recv_task)
1345 t.start()
1346 pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
1347 log.info('Sending arp request for dest ip %s on interface %s' %
1348 (hostip,self.proxyarp.port_map[ingress]))
1349 sendp(pkt, count = 10,iface = self.proxyarp.port_map[ingress])
1350 t.join()
1351 t = []
1352 for i in range(10):
1353 t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
1354 for i in range(10):
1355 t[i].start()
1356 time.sleep(2)
1357 for i in range(10):
1358 t[i].join()
1359 if len(success_dir) != 10:
1360 self.success = False
1361 assert_equal(self.success, True)
1362
1363 #pass
1364 def test_cluster_with_acl_rule_before_master_change_and_remove_acl_rule_after_master_change(self,onos_instances=ONOS_INSTANCES):
1365 status = self.verify_cluster_status(onos_instances=onos_instances)
1366 assert_equal(status, True)
1367 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001368 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001369 self.acl.setUp()
1370 acl_rule = ACLTest()
1371 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1372 if status is False:
1373 log.info('JSON request returned status %d' %code)
1374 assert_equal(status, True)
1375 result = acl_rule.get_acl_rules(controller=master)
1376 aclRules1 = result.json()['aclRules']
1377 log.info('Added acl rules is %s'%aclRules1)
1378 acl_Id = map(lambda d: d['id'], aclRules1)
1379 log.info('Changing cluster current master from %s to %s'%(master,standbys[0]))
1380 self.change_cluster_current_master(new_master=standbys[0])
1381 status,code = acl_rule.remove_acl_rule(acl_Id[0],controller=standbys[0])
1382 if status is False:
1383 log.info('JSON request returned status %d' %code)
1384 assert_equal(status, True)
1385
1386 #pass
1387 def test_cluster_verifying_acl_rule_in_new_master_after_current_master_is_down(self,onos_instances=ONOS_INSTANCES):
1388 status = self.verify_cluster_status(onos_instances=onos_instances)
1389 assert_equal(status, True)
1390 master,standbys = self.get_cluster_current_master_standbys()
1391 assert_equal(len(standbys),(onos_instances-1))
1392 onos_names_ips = self.get_cluster_container_names_ips()
1393 master_onos_name = onos_names_ips[master]
1394 self.acl.setUp()
1395 acl_rule = ACLTest()
1396 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1397 if status is False:
1398 log.info('JSON request returned status %d' %code)
1399 assert_equal(status, True)
1400 result1 = acl_rule.get_acl_rules(controller=master)
1401 aclRules1 = result1.json()['aclRules']
1402 log.info('Added acl rules is %s'%aclRules1)
1403 acl_Id1 = map(lambda d: d['id'], aclRules1)
1404 log.info('Killing cluster current master %s'%master)
1405 Container(master_onos_name,Onos.IMAGE).kill()
1406 time.sleep(45)
1407 status = self.verify_cluster_status(onos_instances=onos_instances,controller=standbys[0])
1408 assert_equal(status, True)
1409 new_master,standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
1410 assert_equal(len(standbys),(onos_instances-2))
1411 assert_not_equal(new_master,master)
1412 result2 = acl_rule.get_acl_rules(controller=new_master)
1413 aclRules2 = result2.json()['aclRules']
1414 acl_Id2 = map(lambda d: d['id'], aclRules2)
1415 log.info('Acl Ids before and after master down are %s and %s'%(acl_Id1,acl_Id2))
1416 assert_equal(acl_Id2,acl_Id1)
1417
1418 #acl traffic scenario not working as acl rule is not getting added to onos
1419 def test_cluster_with_acl_traffic_before_and_after_two_members_down(self,onos_instances=ONOS_INSTANCES):
1420 status = self.verify_cluster_status(onos_instances=onos_instances)
1421 assert_equal(status, True)
1422 master,standbys = self.get_cluster_current_master_standbys()
1423 assert_equal(len(standbys),(onos_instances-1))
1424 onos_names_ips = self.get_cluster_container_names_ips()
1425 member1_onos_name = onos_names_ips[standbys[0]]
1426 member2_onos_name = onos_names_ips[standbys[1]]
1427 ingress = self.acl.ingress_iface
1428 egress = self.acl.CURRENT_PORT_NUM
1429 acl_rule = ACLTest()
1430 status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.acl.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.acl.HOST_DST_IP)
1431 self.acl.CURRENT_PORT_NUM += 1
1432 time.sleep(5)
1433 if status is False:
1434 log.info('JSON request returned status %d' %code)
1435 assert_equal(status, True)
1436 srcMac = '00:00:00:00:00:11'
1437 dstMac = host_ip_mac[0][1]
1438 self.acl.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1, egress_iface_num = egress )
1439 status, code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'deny',controller=master)
1440 time.sleep(10)
1441 if status is False:
1442 log.info('JSON request returned status %d' %code)
1443 assert_equal(status, True)
1444 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1445 log.info('killing cluster members %s and %s'%(standbys[0],standbys[1]))
1446 Container(member1_onos_name, Onos.IMAGE).kill()
1447 Container(member2_onos_name, Onos.IMAGE).kill()
1448 time.sleep(40)
1449 status = self.verify_cluster_status(onos_instances=onos_instances-2,verify=True,controller=master)
1450 assert_equal(status, True)
1451 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1452 self.acl.acl_hosts_remove(egress_iface_count = 1, egress_iface_num = egress)
1453
1454 #pass
1455 def test_cluster_with_dhcpRelay_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1456 status = self.verify_cluster_status(onos_instances=onos_instances)
1457 assert_equal(status, True)
1458 master,standbys = self.get_cluster_current_master_standbys()
1459 assert_equal(len(standbys),(onos_instances-1))
1460 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001461 mac = self.dhcprelay.get_mac(iface)
1462 self.dhcprelay.host_load(iface)
1463 ##we use the defaults for this test that serves as an example for others
1464 ##You don't need to restart dhcpd server if retaining default config
1465 config = self.dhcprelay.default_config
1466 options = self.dhcprelay.default_options
1467 subnet = self.dhcprelay.default_subnet_config
1468 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1469 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1470 config = config,
1471 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001472 subnet = subnet,
1473 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001474 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
1475 cip, sip = self.dhcprelay.send_recv(mac)
1476 log.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
1477 self.change_master_current_cluster(new_master=standbys[0])
1478 log.info('Releasing ip %s to server %s' %(cip, sip))
1479 assert_equal(self.dhcprelay.dhcp.release(cip), True)
1480 log.info('Triggering DHCP discover again after release')
1481 cip2, sip2 = self.dhcprelay.send_recv(mac)
1482 log.info('Verifying released IP was given back on rediscover')
1483 assert_equal(cip, cip2)
1484 log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
1485 assert_equal(self.dhcprelay.dhcp.release(cip2), True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001486 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001487
ChetanGaonker689b3862016-10-17 16:25:01 -07001488
1489 def test_cluster_with_dhcpRelay_and_verify_dhcp_ip_after_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1490 status = self.verify_cluster_status(onos_instances=onos_instances)
1491 assert_equal(status, True)
1492 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001493 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001494 onos_names_ips = self.get_cluster_container_names_ips()
1495 master_onos_name = onos_names_ips[master]
1496 self.dhcprelay.setUpClass(controller=master)
1497 mac = self.dhcprelay.get_mac(iface)
1498 self.dhcprelay.host_load(iface)
1499 ##we use the defaults for this test that serves as an example for others
1500 ##You don't need to restart dhcpd server if retaining default config
1501 config = self.dhcprelay.default_config
1502 options = self.dhcprelay.default_options
1503 subnet = self.dhcprelay.default_subnet_config
1504 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1505 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1506 config = config,
1507 options = options,
1508 subnet = subnet,
1509 controller=master)
1510 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1511 log.info('Initiating dhcp process from client %s'%mac)
1512 cip, sip = self.dhcprelay.send_recv(mac)
1513 log.info('Killing cluster current master %s'%master)
1514 Container(master_onos_name, Onos.IMAGE).kill()
1515 time.sleep(60)
1516 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=standbys[0])
1517 assert_equal(status, True)
1518 mac = self.dhcprelay.dhcp.get_mac(cip)[0]
1519 log.info("Verifying dhcp clients gets same IP after cluster master restarts")
1520 new_cip, new_sip = self.dhcprelay.dhcp.only_request(cip, mac)
1521 assert_equal(new_cip, cip)
1522 self.dhcprelay.tearDownClass(controller=standbys[0])
1523
1524 #pass
1525 def test_cluster_with_dhcpRelay_and_simulate_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1526 status = self.verify_cluster_status(onos_instances=onos_instances)
1527 assert_equal(status, True)
1528 master,standbys = self.get_cluster_current_master_standbys()
1529 assert_equal(len(standbys),(onos_instances-1))
1530 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001531 macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3']
1532 self.dhcprelay.host_load(iface)
1533 ##we use the defaults for this test that serves as an example for others
1534 ##You don't need to restart dhcpd server if retaining default config
1535 config = self.dhcprelay.default_config
1536 options = self.dhcprelay.default_options
1537 subnet = self.dhcprelay.default_subnet_config
1538 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1539 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1540 config = config,
1541 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001542 subnet = subnet,
1543 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001544 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1545 cip1, sip1 = self.dhcprelay.send_recv(macs[0])
1546 assert_not_equal(cip1,None)
1547 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip1,macs[0],master))
1548 log.info('Changing cluster master from %s to %s'%(master, standbys[0]))
1549 self.change_master_current_cluster(new_master=standbys[0])
1550 cip2, sip2 = self.dhcprelay.send_recv(macs[1])
1551 assert_not_equal(cip2,None)
1552 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[1],standbys[0]))
1553 self.change_master_current_cluster(new_master=master)
1554 log.info('Changing cluster master from %s to %s'%(standbys[0],master))
1555 cip3, sip3 = self.dhcprelay.send_recv(macs[2])
1556 assert_not_equal(cip3,None)
1557 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master))
ChetanGaonker689b3862016-10-17 16:25:01 -07001558 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001559
ChetanGaonker689b3862016-10-17 16:25:01 -07001560 def test_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001561 status = self.verify_cluster_status(onos_instances=onos_instances)
1562 assert_equal(status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001563 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001564 self.subscriber.num_subscribers = 5
1565 self.subscriber.num_channels = 10
1566 for i in [0,1]:
1567 if i == 1:
1568 cord_test_onos_restart()
1569 time.sleep(45)
1570 status = self.verify_cluster_status(onos_instances=onos_instances)
1571 assert_equal(status, True)
1572 log.info('Verifying cord subscriber functionality after cluster restart')
1573 else:
1574 log.info('Verifying cord subscriber functionality before cluster restart')
1575 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1576 num_channels = self.subscriber.num_channels,
1577 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1578 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1579 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1580 self.subscriber.num_channels))
1581 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001582 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001583
ChetanGaonker689b3862016-10-17 16:25:01 -07001584 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1585 def test_cluster_with_cord_subscriber_join_next_channel_before_and_after_cluster_mastership_is_withdrawn(self,onos_instances=ONOS_INSTANCES):
1586 status = self.verify_cluster_status(onos_instances=onos_instances)
1587 assert_equal(status, True)
1588 master,standbys = self.get_cluster_current_master_standbys()
1589 assert_equal(len(standbys),(onos_instances-1))
1590 self.subscriber.setUpClass(controller=master)
1591 self.subscriber.num_subscribers = 5
1592 self.subscriber.num_channels = 10
1593 for i in [0,1]:
1594 if i == 1:
1595 status=self.withdraw_cluster_current_mastership(master_ip=master)
1596 asser_equal(status, True)
1597 master,standbys = self.get_cluster_current_master_standbys()
1598 log.info('verifying cord subscriber functionality after cluster current master withdraw mastership')
1599 else:
1600 log.info('verifying cord subscriber functionality before cluster master withdraw mastership')
1601 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1602 num_channels = self.subscriber.num_channels,
1603 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1604 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1605 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1606 self.subscriber.num_channels),controller=master)
1607 assert_equal(test_status, True)
1608 self.subscriber.tearDownClass(controller=master)
1609
1610 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1611 def test_cluster_with_cord_subscriber_join_recv_traffic_from_10channels_and_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001612 status = self.verify_cluster_status(onos_instances=onos_instances)
1613 assert_equal(status, True)
1614 master, standbys = self.get_cluster_current_master_standbys()
1615 assert_equal(len(standbys),(onos_instances-1))
1616 onos_names_ips = self.get_cluster_container_names_ips()
1617 member_onos_name = onos_names_ips[standbys[0]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001618 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001619 num_subscribers = 1
1620 num_channels = 10
1621 for i in [0,1]:
1622 if i == 1:
1623 cord_test_onos_shutdown(node = member_onos_name)
1624 time.sleep(30)
ChetanGaonker689b3862016-10-17 16:25:01 -07001625 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001626 assert_equal(status, True)
1627 log.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0])
1628 else:
1629 log.info('Verifying cord subscriber functionality before cluster member %s is down'%standbys[0])
1630 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1631 num_channels = num_channels,
1632 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify,
1633 self.subscriber.igmp_verify, self.subscriber.traffic_verify),
1634 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
ChetanGaonker689b3862016-10-17 16:25:01 -07001635 negative_subscriber_auth = 'all',controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001636 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001637 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001638
ChetanGaonker689b3862016-10-17 16:25:01 -07001639 def test_cluster_with_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001640 status = self.verify_cluster_status(onos_instances=onos_instances)
1641 assert_equal(status, True)
1642 master, standbys = self.get_cluster_current_master_standbys()
1643 assert_equal(len(standbys),(onos_instances-1))
1644 onos_names_ips = self.get_cluster_container_names_ips()
1645 member1_onos_name = onos_names_ips[standbys[0]]
1646 member2_onos_name = onos_names_ips[standbys[1]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001647 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001648 num_subscribers = 1
1649 num_channels = 10
1650 for i in [0,1]:
1651 if i == 1:
1652 cord_test_onos_shutdown(node = member1_onos_name)
1653 cord_test_onos_shutdown(node = member2_onos_name)
1654 time.sleep(60)
1655 status = self.verify_cluster_status(onos_instances=onos_instances-2)
1656 assert_equal(status, True)
1657 log.info('Verifying cord subscriber funtionality after cluster two members %s and %s down'%(standbys[0],standbys[1]))
1658 else:
1659 log.info('Verifying cord subscriber funtionality before cluster two members %s and %s down'%(standbys[0],standbys[1]))
1660 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1661 num_channels = num_channels,
1662 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1663 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1664 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
1665 negative_subscriber_auth = 'all')
1666 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001667 self.subscriber.tearDownClass(controller=master)
1668
1669 #pass
1670 def test_cluster_with_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1671 status = self.verify_cluster_status(onos_instances=onos_instances)
1672 assert_equal(status, True)
1673 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1674 for device in device_dict.keys():
1675 log.info("Device is %s"%device_dict[device])
1676 assert_not_equal(device_dict[device]['master'],'none')
1677 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1678 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1679
1680 #pass
1681 def test_cluster_state_in_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1682 status = self.verify_cluster_status(onos_instances=onos_instances)
1683 assert_equal(status, True)
1684 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1685 cluster_ips = self.get_cluster_current_member_ips()
1686 for ip in cluster_ips:
1687 device_dict= self.get_cluster_current_master_standbys_of_connected_devices(controller = ip)
1688 assert_equal(len(device_dict.keys()),onos_instances)
1689 for device in device_dict.keys():
1690 log.info("Device is %s"%device_dict[device])
1691 assert_not_equal(device_dict[device]['master'],'none')
1692 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1693 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1694
1695 #pass
1696 def test_cluster_verifying_multiple_ovs_switches_after_master_is_restarted(self,onos_instances = ONOS_INSTANCES):
1697 status = self.verify_cluster_status(onos_instances=onos_instances)
1698 assert_equal(status, True)
1699 onos_names_ips = self.get_cluster_container_names_ips()
1700 master_count = self.get_number_of_devices_of_master()
1701 log.info('Master count information is %s'%master_count)
1702 total_devices = 0
1703 for master in master_count.keys():
1704 total_devices += master_count[master]['size']
1705 if master_count[master]['size'] != 0:
1706 restart_ip = master
1707 assert_equal(total_devices,onos_instances)
1708 member_onos_name = onos_names_ips[restart_ip]
1709 log.info('Restarting cluster member %s having ip %s'%(member_onos_name,restart_ip))
1710 Container(member_onos_name, Onos.IMAGE).restart()
1711 time.sleep(40)
1712 master_count = self.get_number_of_devices_of_master()
1713 log.info('Master count information after restart is %s'%master_count)
1714 total_devices = 0
1715 for master in master_count.keys():
1716 total_devices += master_count[master]['size']
1717 if master == restart_ip:
1718 assert_equal(master_count[master]['size'], 0)
1719 assert_equal(total_devices,onos_instances)
1720
1721 #pass
1722 def test_cluster_verifying_multiple_ovs_switches_with_one_master_down(self,onos_instances = ONOS_INSTANCES):
1723 status = self.verify_cluster_status(onos_instances=onos_instances)
1724 assert_equal(status, True)
1725 onos_names_ips = self.get_cluster_container_names_ips()
1726 master_count = self.get_number_of_devices_of_master()
1727 log.info('Master count information is %s'%master_count)
1728 total_devices = 0
1729 for master in master_count.keys():
1730 total_devices += master_count[master]['size']
1731 if master_count[master]['size'] != 0:
1732 restart_ip = master
1733 assert_equal(total_devices,onos_instances)
1734 master_onos_name = onos_names_ips[restart_ip]
1735 log.info('Shutting down cluster member %s having ip %s'%(master_onos_name,restart_ip))
1736 Container(master_onos_name, Onos.IMAGE).kill()
1737 time.sleep(40)
1738 for ip in onos_names_ips.keys():
1739 if ip != restart_ip:
1740 controller_ip = ip
1741 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=controller_ip)
1742 assert_equal(status, True)
1743 master_count = self.get_number_of_devices_of_master(controller=controller_ip)
1744 log.info('Master count information after restart is %s'%master_count)
1745 total_devices = 0
1746 for master in master_count.keys():
1747 total_devices += master_count[master]['size']
1748 if master == restart_ip:
1749 assert_equal(master_count[master]['size'], 0)
1750 assert_equal(total_devices,onos_instances)
1751
1752 #pass
1753 def test_cluster_verifying_multiple_ovs_switches_with_current_master_withdrawing_mastership(self,onos_instances = ONOS_INSTANCES):
1754 status = self.verify_cluster_status(onos_instances=onos_instances)
1755 assert_equal(status, True)
1756 master_count = self.get_number_of_devices_of_master()
1757 log.info('Master count information is %s'%master_count)
1758 total_devices = 0
1759 for master in master_count.keys():
1760 total_devices += int(master_count[master]['size'])
1761 if master_count[master]['size'] != 0:
1762 master_ip = master
1763 log.info('Devices of master %s are %s'%(master_count[master]['devices'],master))
1764 device_id = str(master_count[master]['devices'][0])
1765 device_count = master_count[master]['size']
1766 assert_equal(total_devices,onos_instances)
1767 log.info('Withdrawing mastership of device %s for controller %s'%(device_id,master_ip))
1768 status=self.withdraw_cluster_current_mastership(master_ip=master_ip,device_id = device_id)
1769 assert_equal(status, True)
1770 master_count = self.get_number_of_devices_of_master()
1771 log.info('Master count information after cluster mastership withdraw is %s'%master_count)
1772 total_devices = 0
1773 for master in master_count.keys():
1774 total_devices += int(master_count[master]['size'])
1775 if master == master_ip:
1776 assert_equal(master_count[master]['size'], device_count-1)
1777 assert_equal(total_devices,onos_instances)
1778
1779 #pass
1780 def test_cluster_verifying_multiple_ovs_switches_and_restarting_cluster(self,onos_instances = ONOS_INSTANCES):
1781 status = self.verify_cluster_status(onos_instances=onos_instances)
1782 assert_equal(status, True)
1783 master_count = self.get_number_of_devices_of_master()
1784 log.info('Master count information is %s'%master_count)
1785 total_devices = 0
1786 for master in master_count.keys():
1787 total_devices += master_count[master]['size']
1788 assert_equal(total_devices,onos_instances)
1789 log.info('Restarting cluster')
1790 cord_test_onos_restart()
1791 time.sleep(60)
1792 master_count = self.get_number_of_devices_of_master()
1793 log.info('Master count information after restart is %s'%master_count)
1794 total_devices = 0
1795 for master in master_count.keys():
1796 total_devices += master_count[master]['size']
1797 assert_equal(total_devices,onos_instances)