blob: 5fa6d3690c1c6baaea2ff1ff12a31c831cbfe0c1 [file] [log] [blame]
ChetanGaonker2099d722016-10-07 15:16:58 -07001#copyright 2016-present Ciena Corporation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14#
15import unittest
16from nose.tools import *
17from scapy.all import *
18from OnosCtrl import OnosCtrl, get_mac
19from OltConfig import OltConfig
20from socket import socket
21from OnosFlowCtrl import OnosFlowCtrl
22from nose.twistedtools import reactor, deferred
23from twisted.internet import defer
24from onosclidriver import OnosCliDriver
25from CordContainer import Container, Onos, Quagga
A.R Karthick2560f042016-11-30 14:38:52 -080026from CordTestServer import cord_test_onos_restart, cord_test_onos_shutdown, cord_test_onos_add_cluster, cord_test_quagga_restart, cord_test_restart_cluster
ChetanGaonker2099d722016-10-07 15:16:58 -070027from portmaps import g_subscriber_port_map
28from scapy.all import *
29import time, monotonic
30import threading
31from threading import current_thread
32from Cluster import *
33from EapTLS import TLSAuthTest
34from ACL import ACLTest
A R Karthick1f908202016-11-16 17:32:20 -080035from OnosLog import OnosLog
36from CordLogger import CordLogger
ChetanGaonker2099d722016-10-07 15:16:58 -070037import os
38import json
39import random
40import collections
41log.setLevel('INFO')
42
A R Karthick1f908202016-11-16 17:32:20 -080043class cluster_exchange(CordLogger):
ChetanGaonker2099d722016-10-07 15:16:58 -070044 test_path = os.path.dirname(os.path.realpath(__file__))
45 onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
46 mac = RandMAC()._fix()
47 flows_eth = Ether(src = RandMAC()._fix(), dst = RandMAC()._fix())
48 igmp_eth = Ether(dst = '01:00:5e:00:00:16', type = ETH_P_IP)
49 igmp_ip = IP(dst = '224.0.0.22')
50 ONOS_INSTANCES = 3
51 V_INF1 = 'veth0'
52 TLS_TIMEOUT = 100
53 device_id = 'of:' + get_mac()
54 igmp = cluster_igmp()
55 igmp_groups = igmp.mcast_ip_range(start_ip = '224.1.8.10',end_ip = '224.1.10.49')
56 igmp_sources = igmp.source_ip_range(start_ip = '38.24.29.35',end_ip='38.24.35.56')
57 tls = cluster_tls()
58 flows = cluster_flows()
59 proxyarp = cluster_proxyarp()
60 vrouter = cluster_vrouter()
61 acl = cluster_acl()
62 dhcprelay = cluster_dhcprelay()
63 subscriber = cluster_subscriber()
A.R Karthick2560f042016-11-30 14:38:52 -080064 testcaseLoggers = ('test_cluster_controller_restarts', 'test_cluster_single_controller_restarts', 'test_cluster_restarts')
A R Karthick1f908202016-11-16 17:32:20 -080065
66 def setUp(self):
67 if self._testMethodName not in self.testcaseLoggers:
68 super(cluster_exchange, self).setUp()
69
70 def tearDown(self):
71 if self._testMethodName not in self.testcaseLoggers:
72 super(cluster_exchange, self).tearDown()
ChetanGaonker2099d722016-10-07 15:16:58 -070073
74 def get_controller(self):
75 controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
76 controller = controller.split(',')[0]
77 return controller
78
A R Karthick1f908202016-11-16 17:32:20 -080079 @classmethod
80 def get_controllers(cls):
81 controllers = os.getenv('ONOS_CONTROLLER_IP') or ''
82 return controllers.split(',')
83
A R Karthick6cc8b812016-12-09 10:24:40 -080084 def cliEnter(self, controller = None):
ChetanGaonker2099d722016-10-07 15:16:58 -070085 retries = 0
A R Karthick6cc8b812016-12-09 10:24:40 -080086 while retries < 30:
87 self.cli = OnosCliDriver(controller = controller, connect = True)
ChetanGaonker2099d722016-10-07 15:16:58 -070088 if self.cli.handle:
89 break
90 else:
91 retries += 1
92 time.sleep(2)
93
94 def cliExit(self):
95 self.cli.disconnect()
96
A R Karthick1f908202016-11-16 17:32:20 -080097 def get_leader(self, controller = None):
98 self.cliEnter(controller = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -080099 try:
100 result = json.loads(self.cli.leaders(jsonFormat = True))
101 except:
102 result = None
103
A R Karthick1f908202016-11-16 17:32:20 -0800104 if result is None:
105 log.info('Leaders command failure for controller %s' %controller)
106 else:
107 log.info('Leaders returned: %s' %result)
108 self.cliExit()
109 return result
110
A R Karthicke14fc022016-12-08 14:50:29 -0800111 def log_set(self, level = None, app = 'org.onosproject', controllers = None):
112 CordLogger.logSet(level = level, app = app, controllers = controllers, forced = True)
A R Karthickef1232d2016-12-07 09:18:15 -0800113
A R Karthick1f908202016-11-16 17:32:20 -0800114 def get_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800115 result_map = {}
116 if controller is None:
117 controller = self.get_controller()
A R Karthick1f908202016-11-16 17:32:20 -0800118 if type(controller) in [ list, tuple ]:
119 for c in controller:
120 leaders = self.get_leader(controller = c)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800121 result_map[c] = leaders
A R Karthick1f908202016-11-16 17:32:20 -0800122 else:
123 leaders = self.get_leader(controller = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800124 result_map[controller] = leaders
125 return result_map
A R Karthick1f908202016-11-16 17:32:20 -0800126
A R Karthickec2db322016-11-17 15:06:01 -0800127 def verify_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800128 leaders_map = self.get_leaders(controller = controller)
129 failed = [ k for k,v in leaders_map.items() if v == None ]
A R Karthickec2db322016-11-17 15:06:01 -0800130 return failed
131
ChetanGaonker2099d722016-10-07 15:16:58 -0700132 def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
133 tries = 0
134 try:
135 self.cliEnter(controller = controller)
136 while tries <= 10:
137 cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
138 if cluster_summary:
139 log.info("cluster 'summary' command output is %s"%cluster_summary)
140 nodes = cluster_summary['nodes']
141 if verify:
142 if nodes == onos_instances:
143 self.cliExit()
144 return True
145 else:
146 tries += 1
147 time.sleep(1)
148 else:
149 if nodes >= onos_instances:
150 self.cliExit()
151 return True
152 else:
153 tries += 1
154 time.sleep(1)
155 else:
156 tries += 1
157 time.sleep(1)
158 self.cliExit()
159 return False
160 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700161 raise Exception('Failed to get cluster members')
162 return False
ChetanGaonker2099d722016-10-07 15:16:58 -0700163
A.R Karthick45ab3e12016-11-30 11:25:51 -0800164 def get_cluster_current_member_ips(self, controller = None, nodes_filter = None):
ChetanGaonker2099d722016-10-07 15:16:58 -0700165 tries = 0
166 cluster_ips = []
167 try:
168 self.cliEnter(controller = controller)
169 while tries <= 10:
170 cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
171 if cluster_nodes:
172 log.info("cluster 'nodes' output is %s"%cluster_nodes)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800173 if nodes_filter:
174 cluster_nodes = nodes_filter(cluster_nodes)
ChetanGaonker2099d722016-10-07 15:16:58 -0700175 cluster_ips = map(lambda c: c['id'], cluster_nodes)
176 self.cliExit()
177 cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
178 return cluster_ips
179 else:
180 tries += 1
181 self.cliExit()
182 return cluster_ips
183 except:
184 raise Exception('Failed to get cluster members')
185 return cluster_ips
186
ChetanGaonker689b3862016-10-17 16:25:01 -0700187 def get_cluster_container_names_ips(self,controller=None):
A R Karthick1f908202016-11-16 17:32:20 -0800188 onos_names_ips = {}
189 onos_ips = self.get_cluster_current_member_ips(controller=controller)
190 onos_names_ips[onos_ips[0]] = Onos.NAME
191 onos_names_ips[Onos.NAME] = onos_ips[0]
192 for i in range(1,len(onos_ips)):
193 name = '{0}-{1}'.format(Onos.NAME,i+1)
194 onos_names_ips[onos_ips[i]] = name
195 onos_names_ips[name] = onos_ips[i]
ChetanGaonker2099d722016-10-07 15:16:58 -0700196
197 return onos_names_ips
198
199 #identifying current master of a connected device, not tested
200 def get_cluster_current_master_standbys(self,controller=None,device_id=device_id):
201 master = None
202 standbys = []
203 tries = 0
204 try:
205 cli = self.cliEnter(controller = controller)
206 while tries <= 10:
207 roles = json.loads(self.cli.roles(jsonFormat = True))
208 log.info("cluster 'roles' command output is %s"%roles)
209 if roles:
210 for device in roles:
211 log.info('Verifying device info in line %s'%device)
212 if device['id'] == device_id:
213 master = str(device['master'])
214 standbys = map(lambda d: str(d), device['standbys'])
215 log.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
216 self.cliExit()
217 return master, standbys
218 self.cliExit()
219 return master, standbys
220 else:
221 tries += 1
ChetanGaonker689b3862016-10-17 16:25:01 -0700222 time.sleep(1)
223 self.cliExit()
224 return master,standbys
225 except:
226 raise Exception('Failed to get cluster members')
227 return master,standbys
228
229 def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
230 ''' returns master and standbys of all the connected devices to ONOS cluster instance'''
231 device_dict = {}
232 tries = 0
233 try:
234 cli = self.cliEnter(controller = controller)
235 while tries <= 10:
236 device_dict = {}
237 roles = json.loads(self.cli.roles(jsonFormat = True))
238 log.info("cluster 'roles' command output is %s"%roles)
239 if roles:
240 for device in roles:
241 device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
242 for i in range(len(device_dict[device['id']]['standbys'])):
243 device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
244 log.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
245 self.cliExit()
246 return device_dict
247 else:
248 tries += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700249 time.sleep(1)
250 self.cliExit()
ChetanGaonker689b3862016-10-17 16:25:01 -0700251 return device_dict
252 except:
253 raise Exception('Failed to get cluster members')
254 return device_dict
255
256 #identify current master of a connected device, not tested
257 def get_cluster_connected_devices(self,controller=None):
258 '''returns all the devices connected to ONOS cluster'''
259 device_list = []
260 tries = 0
261 try:
262 cli = self.cliEnter(controller = controller)
263 while tries <= 10:
264 device_list = []
265 devices = json.loads(self.cli.devices(jsonFormat = True))
266 log.info("cluster 'devices' command output is %s"%devices)
267 if devices:
268 for device in devices:
269 log.info('device id is %s'%device['id'])
270 device_list.append(str(device['id']))
271 self.cliExit()
272 return device_list
273 else:
274 tries += 1
275 time.sleep(1)
276 self.cliExit()
277 return device_list
278 except:
279 raise Exception('Failed to get cluster members')
280 return device_list
281
282 def get_number_of_devices_of_master(self,controller=None):
283 '''returns master-device pairs, which master having what devices'''
284 master_count = {}
285 try:
286 cli = self.cliEnter(controller = controller)
287 masters = json.loads(self.cli.masters(jsonFormat = True))
288 if masters:
289 for master in masters:
290 master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
291 return master_count
292 else:
293 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700294 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700295 raise Exception('Failed to get cluster members')
296 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700297
298 def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None):
299 if new_master is None: return False
ChetanGaonker689b3862016-10-17 16:25:01 -0700300 self.cliEnter(controller=controller)
ChetanGaonker2099d722016-10-07 15:16:58 -0700301 cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
302 command = self.cli.command(cmd = cmd, jsonFormat = False)
303 self.cliExit()
304 time.sleep(60)
305 master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
306 assert_equal(master,new_master)
307 log.info('Cluster master changed to %s successfully'%new_master)
308
ChetanGaonker689b3862016-10-17 16:25:01 -0700309 def withdraw_cluster_current_mastership(self,master_ip=None,device_id=device_id,controller=None):
310 '''current master looses its mastership and hence new master will be elected'''
311 self.cliEnter(controller=controller)
312 cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
313 command = self.cli.command(cmd = cmd, jsonFormat = False)
314 self.cliExit()
315 time.sleep(60)
316 new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
317 assert_not_equal(new_master_ip,master_ip)
318 log.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
319 log.info('Cluster new master is %s'%new_master_ip)
320 return True
321
A R Karthickec2db322016-11-17 15:06:01 -0800322 def test_cluster_controller_restarts(self):
A R Karthick1f908202016-11-16 17:32:20 -0800323 '''Test the cluster by repeatedly killing the controllers'''
324 controllers = self.get_controllers()
325 ctlr_len = len(controllers)
326 if ctlr_len <= 1:
327 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
328 assert_greater(ctlr_len, 1)
329
330 #this call would verify the cluster for once
331 onos_map = self.get_cluster_container_names_ips()
332
A R Karthickec2db322016-11-17 15:06:01 -0800333 def check_exception(controller = None):
A R Karthick1f908202016-11-16 17:32:20 -0800334 adjacent_controller = None
335 adjacent_controllers = None
336 if controller:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800337 adjacent_controllers = list(set(controllers) - set([controller]))
338 adjacent_controller = adjacent_controllers[0]
A R Karthick1f908202016-11-16 17:32:20 -0800339 for node in controllers:
340 onosLog = OnosLog(host = node)
341 ##check the logs for storage exception
342 _, output = onosLog.get_log(('ERROR', 'Exception',))
A R Karthickec2db322016-11-17 15:06:01 -0800343 if output and output.find('StorageException$Timeout') >= 0:
344 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
345 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
346 log.info('\n' + '-' * 50 + '\n')
A R Karthick1f908202016-11-16 17:32:20 -0800347 log.info('%s' %output)
A R Karthickec2db322016-11-17 15:06:01 -0800348 log.info('\n' + '-' * 50 + '\n')
349 failed = self.verify_leaders(controllers)
350 if failed:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800351 log.info('Leaders command failed on nodes: %s' %failed)
A R Karthickec2db322016-11-17 15:06:01 -0800352 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800353 return controller
354
355 try:
A R Karthickec2db322016-11-17 15:06:01 -0800356 ips = self.get_cluster_current_member_ips(controller = adjacent_controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800357 log.info('ONOS cluster formed with controllers: %s' %ips)
A R Karthick1f908202016-11-16 17:32:20 -0800358 st = True
359 except:
360 st = False
361
A R Karthickec2db322016-11-17 15:06:01 -0800362 failed = self.verify_leaders(controllers)
A R Karthick1f908202016-11-16 17:32:20 -0800363 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800364 if st is False:
365 log.info('No storage exception and ONOS cluster was not formed successfully')
366 else:
367 controller = None
368
369 return controller
370
371 next_controller = None
372 tries = 10
373 for num in range(tries):
374 index = num % ctlr_len
375 #index = random.randrange(0, ctlr_len)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800376 controller_name = onos_map[controllers[index]] if next_controller is None else onos_map[next_controller]
377 controller = onos_map[controller_name]
378 log.info('ITERATION: %d. Restarting Controller %s' %(num + 1, controller_name))
A R Karthick1f908202016-11-16 17:32:20 -0800379 try:
A R Karthickef1232d2016-12-07 09:18:15 -0800380 #enable debug log for the other controllers before restarting this controller
A R Karthicke14fc022016-12-08 14:50:29 -0800381 adjacent_controllers = list( set(controllers) - set([controller]) )
382 self.log_set(controllers = adjacent_controllers)
383 self.log_set(app = 'io.atomix', controllers = adjacent_controllers)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800384 cord_test_onos_restart(node = controller_name, timeout = 0)
A R Karthicke14fc022016-12-08 14:50:29 -0800385 self.log_set(controllers = controller)
386 self.log_set(app = 'io.atomix', controllers = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800387 time.sleep(60)
A R Karthick1f908202016-11-16 17:32:20 -0800388 except:
389 time.sleep(5)
390 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800391
392 #first archive the test case logs for this run
393 CordLogger.archive_results('test_cluster_controller_restarts',
394 controllers = controllers,
395 iteration = 'iteration_{}'.format(num+1))
A R Karthickec2db322016-11-17 15:06:01 -0800396 next_controller = check_exception(controller = controller)
A R Karthick1f908202016-11-16 17:32:20 -0800397
A.R Karthick45ab3e12016-11-30 11:25:51 -0800398 def test_cluster_single_controller_restarts(self):
399 '''Test the cluster by repeatedly restarting the same controller'''
400 controllers = self.get_controllers()
401 ctlr_len = len(controllers)
402 if ctlr_len <= 1:
403 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
404 assert_greater(ctlr_len, 1)
405
406 #this call would verify the cluster for once
407 onos_map = self.get_cluster_container_names_ips()
408
409 def check_exception(controller, inclusive = False):
410 adjacent_controllers = list(set(controllers) - set([controller]))
411 adjacent_controller = adjacent_controllers[0]
412 controller_list = adjacent_controllers if inclusive == False else controllers
413 storage_exceptions = []
414 for node in controller_list:
415 onosLog = OnosLog(host = node)
416 ##check the logs for storage exception
417 _, output = onosLog.get_log(('ERROR', 'Exception',))
418 if output and output.find('StorageException$Timeout') >= 0:
419 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
420 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
421 log.info('\n' + '-' * 50 + '\n')
422 log.info('%s' %output)
423 log.info('\n' + '-' * 50 + '\n')
424 storage_exceptions.append(node)
425
426 failed = self.verify_leaders(controller_list)
427 if failed:
428 log.info('Leaders command failed on nodes: %s' %failed)
429 if storage_exceptions:
430 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
431 assert_equal(len(failed), 0)
432 return controller
433
434 for ctlr in controller_list:
435 ips = self.get_cluster_current_member_ips(controller = ctlr,
436 nodes_filter = \
437 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
438 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
439 if controller in ips and inclusive is False:
440 log.info('Controller %s still ACTIVE on Node %s after it was shutdown' %(controller, ctlr))
441 if controller not in ips and inclusive is True:
A R Karthick6cc8b812016-12-09 10:24:40 -0800442 log.info('Controller %s still INACTIVE on Node %s after it was restarted' %(controller, ctlr))
A.R Karthick45ab3e12016-11-30 11:25:51 -0800443
444 return controller
445
446 tries = 10
447 #chose a random controller for shutdown/restarts
448 controller = controllers[random.randrange(0, ctlr_len)]
449 controller_name = onos_map[controller]
A R Karthick6cc8b812016-12-09 10:24:40 -0800450 ##enable the log level for the controllers
451 self.log_set(controllers = controllers)
452 self.log_set(app = 'io.atomix', controllers = controllers)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800453 for num in range(tries):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800454 log.info('ITERATION: %d. Shutting down Controller %s' %(num + 1, controller_name))
455 try:
456 cord_test_onos_shutdown(node = controller_name)
457 time.sleep(20)
458 except:
459 time.sleep(5)
460 continue
461 #check for exceptions on the adjacent nodes
462 check_exception(controller)
463 #Now restart the controller back
464 log.info('Restarting back the controller %s' %controller_name)
465 cord_test_onos_restart(node = controller_name)
A R Karthick6cc8b812016-12-09 10:24:40 -0800466 self.log_set(controllers = controller)
467 self.log_set(app = 'io.atomix', controllers = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800468 time.sleep(60)
A R Karthicke8935c62016-12-08 18:17:17 -0800469 #archive the logs for this run
470 CordLogger.archive_results('test_cluster_single_controller_restarts',
471 controllers = controllers,
472 iteration = 'iteration_{}'.format(num+1))
A.R Karthick45ab3e12016-11-30 11:25:51 -0800473 check_exception(controller, inclusive = True)
474
A.R Karthick2560f042016-11-30 14:38:52 -0800475 def test_cluster_restarts(self):
476 '''Test the cluster by repeatedly restarting the entire cluster'''
477 controllers = self.get_controllers()
478 ctlr_len = len(controllers)
479 if ctlr_len <= 1:
480 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
481 assert_greater(ctlr_len, 1)
482
483 #this call would verify the cluster for once
484 onos_map = self.get_cluster_container_names_ips()
485
486 def check_exception():
487 controller_list = controllers
488 storage_exceptions = []
489 for node in controller_list:
490 onosLog = OnosLog(host = node)
491 ##check the logs for storage exception
492 _, output = onosLog.get_log(('ERROR', 'Exception',))
493 if output and output.find('StorageException$Timeout') >= 0:
494 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
495 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
496 log.info('\n' + '-' * 50 + '\n')
497 log.info('%s' %output)
498 log.info('\n' + '-' * 50 + '\n')
499 storage_exceptions.append(node)
500
501 failed = self.verify_leaders(controller_list)
502 if failed:
503 log.info('Leaders command failed on nodes: %s' %failed)
504 if storage_exceptions:
505 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
506 assert_equal(len(failed), 0)
507 return
508
509 for ctlr in controller_list:
510 ips = self.get_cluster_current_member_ips(controller = ctlr,
511 nodes_filter = \
512 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
513 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
514 assert_equal(len(ips), len(controllers))
515
516 tries = 10
517 for num in range(tries):
518 log.info('ITERATION: %d. Restarting cluster with controllers at %s' %(num+1, controllers))
519 try:
520 cord_test_restart_cluster()
A R Karthick6cc8b812016-12-09 10:24:40 -0800521 self.log_set(controllers = controllers)
522 self.log_set(app = 'io.atomix', controllers = controllers)
A.R Karthick2560f042016-11-30 14:38:52 -0800523 log.info('Delaying before verifying cluster status')
524 time.sleep(60)
525 except:
526 time.sleep(10)
527 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800528
529 #archive the logs for this run before verification
530 CordLogger.archive_results('test_cluster_restarts',
531 controllers = controllers,
532 iteration = 'iteration_{}'.format(num+1))
A.R Karthick2560f042016-11-30 14:38:52 -0800533 #check for exceptions on the adjacent nodes
534 check_exception()
535
ChetanGaonker2099d722016-10-07 15:16:58 -0700536 #pass
ChetanGaonker689b3862016-10-17 16:25:01 -0700537 def test_cluster_formation_and_verification(self,onos_instances = ONOS_INSTANCES):
538 status = self.verify_cluster_status(onos_instances = onos_instances)
539 assert_equal(status, True)
540 log.info('Cluster exists with %d ONOS instances'%onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -0700541
542 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700543 def test_cluster_adding_members(self, add = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700544 status = self.verify_cluster_status(onos_instances = onos_instances)
545 assert_equal(status, True)
546 onos_ips = self.get_cluster_current_member_ips()
547 onos_instances = len(onos_ips)+add
548 log.info('Adding %d nodes to the ONOS cluster' %add)
549 cord_test_onos_add_cluster(count = add)
550 status = self.verify_cluster_status(onos_instances=onos_instances)
551 assert_equal(status, True)
552
ChetanGaonker689b3862016-10-17 16:25:01 -0700553 def test_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700554 status = self.verify_cluster_status(onos_instances = onos_instances)
555 assert_equal(status, True)
556 master, standbys = self.get_cluster_current_master_standbys()
557 assert_equal(len(standbys),(onos_instances-1))
558 onos_names_ips = self.get_cluster_container_names_ips()
559 master_onos_name = onos_names_ips[master]
560 log.info('Removing cluster current master %s'%(master))
561 cord_test_onos_shutdown(node = master_onos_name)
562 time.sleep(60)
563 onos_instances -= 1
564 status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0])
565 assert_equal(status, True)
566 new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
567 assert_not_equal(master,new_master)
ChetanGaonker689b3862016-10-17 16:25:01 -0700568 log.info('Successfully removed clusters master instance')
ChetanGaonker2099d722016-10-07 15:16:58 -0700569
ChetanGaonker689b3862016-10-17 16:25:01 -0700570 def test_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700571 status = self.verify_cluster_status(onos_instances = onos_instances)
572 assert_equal(status, True)
573 master, standbys = self.get_cluster_current_master_standbys()
574 assert_equal(len(standbys),(onos_instances-1))
575 onos_names_ips = self.get_cluster_container_names_ips()
576 member_onos_name = onos_names_ips[standbys[0]]
577 log.info('Removing cluster member %s'%standbys[0])
578 cord_test_onos_shutdown(node = member_onos_name)
579 time.sleep(60)
580 onos_instances -= 1
581 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
582 assert_equal(status, True)
583
ChetanGaonker689b3862016-10-17 16:25:01 -0700584 def test_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700585 status = self.verify_cluster_status(onos_instances = onos_instances)
586 assert_equal(status, True)
587 master, standbys = self.get_cluster_current_master_standbys()
588 assert_equal(len(standbys),(onos_instances-1))
589 onos_names_ips = self.get_cluster_container_names_ips()
590 member1_onos_name = onos_names_ips[standbys[0]]
591 member2_onos_name = onos_names_ips[standbys[1]]
592 log.info('Removing cluster member %s'%standbys[0])
593 cord_test_onos_shutdown(node = member1_onos_name)
594 log.info('Removing cluster member %s'%standbys[1])
595 cord_test_onos_shutdown(node = member2_onos_name)
596 time.sleep(60)
597 onos_instances = onos_instances - 2
598 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
599 assert_equal(status, True)
600
ChetanGaonker689b3862016-10-17 16:25:01 -0700601 def test_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700602 status = self.verify_cluster_status(onos_instances = onos_instances)
603 assert_equal(status, True)
604 master, standbys = self.get_cluster_current_master_standbys()
605 assert_equal(len(standbys),(onos_instances-1))
606 onos_names_ips = self.get_cluster_container_names_ips()
607 for i in range(remove):
608 member_onos_name = onos_names_ips[standbys[i]]
609 log.info('Removing onos container with name %s'%standbys[i])
610 cord_test_onos_shutdown(node = member_onos_name)
611 time.sleep(60)
612 onos_instances = onos_instances - remove
613 status = self.verify_cluster_status(onos_instances = onos_instances, controller=master)
614 assert_equal(status, True)
615
616 #nottest test cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700617 def test_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES , add = 2, remove = 2):
ChetanGaonker2099d722016-10-07 15:16:58 -0700618 status = self.verify_cluster_status(onos_instances = onos_instances)
619 assert_equal(status, True)
620 onos_ips = self.get_cluster_current_member_ips()
621 onos_instances = len(onos_ips)+add
622 log.info('Adding %d ONOS instances to the cluster'%add)
623 cord_test_onos_add_cluster(count = add)
624 status = self.verify_cluster_status(onos_instances=onos_instances)
625 assert_equal(status, True)
626 log.info('Removing %d ONOS instances from the cluster'%remove)
627 for i in range(remove):
628 name = '{}-{}'.format(Onos.NAME, onos_instances - i)
629 log.info('Removing onos container with name %s'%name)
630 cord_test_onos_shutdown(node = name)
631 time.sleep(60)
632 onos_instances = onos_instances-remove
633 status = self.verify_cluster_status(onos_instances=onos_instances)
634 assert_equal(status, True)
635
636 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700637 def test_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
ChetanGaonker2099d722016-10-07 15:16:58 -0700638 status = self.verify_cluster_status(onos_instances = onos_instances)
639 assert_equal(status, True)
640 onos_ips = self.get_cluster_current_member_ips()
641 onos_instances = onos_instances-remove
642 log.info('Removing %d ONOS instances from the cluster'%remove)
643 for i in range(remove):
644 name = '{}-{}'.format(Onos.NAME, len(onos_ips)-i)
645 log.info('Removing onos container with name %s'%name)
646 cord_test_onos_shutdown(node = name)
647 time.sleep(60)
648 status = self.verify_cluster_status(onos_instances=onos_instances)
649 assert_equal(status, True)
650 log.info('Adding %d ONOS instances to the cluster'%add)
651 cord_test_onos_add_cluster(count = add)
652 onos_instances = onos_instances+add
653 status = self.verify_cluster_status(onos_instances=onos_instances)
654 assert_equal(status, True)
655
ChetanGaonker689b3862016-10-17 16:25:01 -0700656 def test_cluster_restart(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700657 status = self.verify_cluster_status(onos_instances = onos_instances)
658 assert_equal(status, True)
659 log.info('Restarting cluster')
660 cord_test_onos_restart()
661 status = self.verify_cluster_status(onos_instances = onos_instances)
662 assert_equal(status, True)
663
ChetanGaonker689b3862016-10-17 16:25:01 -0700664 def test_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700665 status = self.verify_cluster_status(onos_instances = onos_instances)
666 assert_equal(status, True)
667 master, standbys = self.get_cluster_current_master_standbys()
668 onos_names_ips = self.get_cluster_container_names_ips()
669 master_onos_name = onos_names_ips[master]
670 log.info('Restarting cluster master %s'%master)
671 cord_test_onos_restart(node = master_onos_name)
672 status = self.verify_cluster_status(onos_instances = onos_instances)
673 assert_equal(status, True)
674 log.info('Cluster came up after master restart as expected')
675
676 #test fail. master changing after restart. Need to check correct behavior.
ChetanGaonker689b3862016-10-17 16:25:01 -0700677 def test_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700678 status = self.verify_cluster_status(onos_instances = onos_instances)
679 assert_equal(status, True)
680 master1, standbys = self.get_cluster_current_master_standbys()
681 onos_names_ips = self.get_cluster_container_names_ips()
682 master_onos_name = onos_names_ips[master1]
683 log.info('Restarting cluster master %s'%master)
684 cord_test_onos_restart(node = master_onos_name)
685 status = self.verify_cluster_status(onos_instances = onos_instances)
686 assert_equal(status, True)
687 master2, standbys = self.get_cluster_current_master_standbys()
688 assert_equal(master1,master2)
689 log.info('Cluster master is same before and after cluster master restart as expected')
690
ChetanGaonker689b3862016-10-17 16:25:01 -0700691 def test_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700692 status = self.verify_cluster_status(onos_instances = onos_instances)
693 assert_equal(status, True)
694 master, standbys = self.get_cluster_current_master_standbys()
695 assert_equal(len(standbys),(onos_instances-1))
696 onos_names_ips = self.get_cluster_container_names_ips()
697 member_onos_name = onos_names_ips[standbys[0]]
698 log.info('Restarting cluster member %s'%standbys[0])
699 cord_test_onos_restart(node = member_onos_name)
700 status = self.verify_cluster_status(onos_instances = onos_instances)
701 assert_equal(status, True)
702 log.info('Cluster came up as expected after restarting one member')
703
ChetanGaonker689b3862016-10-17 16:25:01 -0700704 def test_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700705 status = self.verify_cluster_status(onos_instances = onos_instances)
706 assert_equal(status, True)
707 master, standbys = self.get_cluster_current_master_standbys()
708 assert_equal(len(standbys),(onos_instances-1))
709 onos_names_ips = self.get_cluster_container_names_ips()
710 member1_onos_name = onos_names_ips[standbys[0]]
711 member2_onos_name = onos_names_ips[standbys[1]]
712 log.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1]))
713 cord_test_onos_restart(node = member1_onos_name)
714 cord_test_onos_restart(node = member2_onos_name)
715 status = self.verify_cluster_status(onos_instances = onos_instances)
716 assert_equal(status, True)
717 log.info('Cluster came up as expected after restarting two members')
718
ChetanGaonker689b3862016-10-17 16:25:01 -0700719 def test_cluster_state_with_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700720 status = self.verify_cluster_status(onos_instances = onos_instances)
721 assert_equal(status,True)
722 master, standbys = self.get_cluster_current_master_standbys()
723 assert_equal(len(standbys),(onos_instances-1))
724 onos_names_ips = self.get_cluster_container_names_ips()
725 for i in range(members):
726 member_onos_name = onos_names_ips[standbys[i]]
727 log.info('Restarting cluster member %s'%standbys[i])
728 cord_test_onos_restart(node = member_onos_name)
729
730 status = self.verify_cluster_status(onos_instances = onos_instances)
731 assert_equal(status, True)
732 log.info('Cluster came up as expected after restarting %d members'%members)
733
ChetanGaonker689b3862016-10-17 16:25:01 -0700734 def test_cluster_state_with_master_change(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700735 status = self.verify_cluster_status(onos_instances=onos_instances)
736 assert_equal(status, True)
737 master, standbys = self.get_cluster_current_master_standbys()
738 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -0700739 log.info('Cluster current master of devices is %s'%master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700740 self.change_master_current_cluster(new_master=standbys[0])
741 log.info('Cluster master changed successfully')
742
743 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700744 def test_cluster_with_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700745 status = self.verify_cluster_status(onos_instances = onos_instances)
746 assert_equal(status, True)
747 onos_ips = self.get_cluster_current_member_ips()
748 self.vrouter.setUpClass()
749 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
750 assert_equal(res, True)
751 for onos_ip in onos_ips:
752 tries = 0
753 flag = False
754 try:
755 self.cliEnter(controller = onos_ip)
756 while tries <= 5:
757 routes = json.loads(self.cli.routes(jsonFormat = True))
758 if routes:
759 assert_equal(len(routes['routes4']), networks)
760 self.cliExit()
761 flag = True
762 break
763 else:
764 tries += 1
765 time.sleep(1)
766 assert_equal(flag, True)
767 except:
768 log.info('Exception occured while checking routes in onos instance %s'%onos_ip)
769 raise
770
771 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700772 def test_cluster_with_vrouter_and_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700773 status = self.verify_cluster_status(onos_instances = onos_instances)
774 assert_equal(status, True)
775 onos_ips = self.get_cluster_current_member_ips()
776 master, standbys = self.get_cluster_current_master_standbys()
777 onos_names_ips = self.get_cluster_container_names_ips()
778 master_onos_name = onos_names_ips[master]
779 self.vrouter.setUpClass()
780 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
781 assert_equal(res,True)
782 cord_test_onos_shutdown(node = master_onos_name)
783 time.sleep(60)
ChetanGaonker689b3862016-10-17 16:25:01 -0700784 log.info('Verifying vrouter traffic after cluster master is down')
ChetanGaonker2099d722016-10-07 15:16:58 -0700785 self.vrouter.vrouter_traffic_verify()
786
787 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700788 def test_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700789 status = self.verify_cluster_status(onos_instances = onos_instances)
790 assert_equal(status, True)
791 onos_ips = self.get_cluster_current_member_ips()
792 master, standbys = self.get_cluster_current_master_standbys()
793 onos_names_ips = self.get_cluster_container_names_ips()
794 master_onos_name = onos_names_ips[master]
795 self.vrouter.setUpClass()
796 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
797 assert_equal(res, True)
798 cord_test_onos_restart()
799 self.vrouter.vrouter_traffic_verify()
800
801 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700802 def test_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700803 status = self.verify_cluster_status(onos_instances = onos_instances)
804 assert_equal(status, True)
805 self.vrouter.setUpClass()
806 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
807 assert_equal(res, True)
808 self.vrouter.vrouter_activate(deactivate=True)
809 time.sleep(15)
810 self.vrouter.vrouter_traffic_verify(positive_test=False)
811 self.vrouter.vrouter_activate(deactivate=False)
812
813 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700814 def test_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700815 status = self.verify_cluster_status(onos_instances = onos_instances)
816 assert_equal(status, True)
817 master, standbys = self.get_cluster_current_master_standbys()
818 onos_names_ips = self.get_cluster_container_names_ips()
819 master_onos_name = onos_names_ips[master]
820 self.vrouter.setUpClass()
821 log.info('Verifying vrouter before master down')
822 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
823 assert_equal(res, True)
824 self.vrouter.vrouter_activate(deactivate=True)
825 log.info('Verifying vrouter traffic after app deactivated')
826 time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down
827 self.vrouter.vrouter_traffic_verify(positive_test=False)
828 log.info('Verifying vrouter traffic after master down')
829 cord_test_onos_shutdown(node = master_onos_name)
830 time.sleep(60)
831 self.vrouter.vrouter_traffic_verify(positive_test=False)
832 self.vrouter.vrouter_activate(deactivate=False)
833
834 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700835 def test_cluster_for_vrouter_app_and_making_member_down(self, networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700836 status = self.verify_cluster_status(onos_instances = onos_instances)
837 assert_equal(status, True)
838 master, standbys = self.get_cluster_current_master_standbys()
839 onos_names_ips = self.get_cluster_container_names_ips()
840 member_onos_name = onos_names_ips[standbys[0]]
841 self.vrouter.setUpClass()
842 log.info('Verifying vrouter before cluster member down')
843 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
844 assert_equal(res, True) # Expecting vrouter should work properly
845 log.info('Verifying vrouter after cluster member down')
846 cord_test_onos_shutdown(node = member_onos_name)
847 time.sleep(60)
848 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
849
850 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700851 def test_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700852 status = self.verify_cluster_status(onos_instances = onos_instances)
853 assert_equal(status, True)
854 master, standbys = self.get_cluster_current_master_standbys()
855 onos_names_ips = self.get_cluster_container_names_ips()
856 member_onos_name = onos_names_ips[standbys[1]]
857 self.vrouter.setUpClass()
858 log.info('Verifying vrouter traffic before cluster member restart')
859 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
860 assert_equal(res, True) # Expecting vrouter should work properly
861 cord_test_onos_restart(node = member_onos_name)
862 log.info('Verifying vrouter traffic after cluster member restart')
863 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
864
865 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700866 def test_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700867 status = self.verify_cluster_status(onos_instances = onos_instances)
868 assert_equal(status, True)
869 self.vrouter.setUpClass()
870 log.info('Verifying vrouter traffic before cluster restart')
871 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
872 assert_equal(res, True) # Expecting vrouter should work properly
873 cord_test_onos_restart()
874 log.info('Verifying vrouter traffic after cluster restart')
875 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
876
877
878 #test fails because flow state is in pending_add in onos
ChetanGaonker689b3862016-10-17 16:25:01 -0700879 def test_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700880 status = self.verify_cluster_status(onos_instances = onos_instances)
881 assert_equal(status, True)
882 master, standbys = self.get_cluster_current_master_standbys()
883 onos_names_ips = self.get_cluster_container_names_ips()
884 master_onos_name = onos_names_ips[master]
885 self.flows.setUpClass()
886 egress = 1
887 ingress = 2
888 egress_map = { 'ip': '192.168.30.1', 'udp_port': 9500 }
889 ingress_map = { 'ip': '192.168.40.1', 'udp_port': 9000 }
890 flow = OnosFlowCtrl(deviceId = self.device_id,
891 egressPort = egress,
892 ingressPort = ingress,
893 udpSrc = ingress_map['udp_port'],
894 udpDst = egress_map['udp_port'],
895 controller=master
896 )
897 result = flow.addFlow()
898 assert_equal(result, True)
899 time.sleep(1)
900 self.success = False
901 def mac_recv_task():
902 def recv_cb(pkt):
903 log.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
904 self.success = True
905 sniff(timeout=2,
906 lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
907 and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.flows.port_map[egress])
908
909 for i in [0,1]:
910 if i == 1:
911 cord_test_onos_shutdown(node = master_onos_name)
912 log.info('Verifying flows traffic after master killed')
913 time.sleep(45)
914 else:
915 log.info('Verifying flows traffic before master killed')
916 t = threading.Thread(target = mac_recv_task)
917 t.start()
918 L2 = self.flows_eth #Ether(src = ingress_map['ether'], dst = egress_map['ether'])
919 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
920 L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
921 pkt = L2/L3/L4
922 log.info('Sending packets to verify if flows are correct')
923 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
924 t.join()
925 assert_equal(self.success, True)
926
ChetanGaonker689b3862016-10-17 16:25:01 -0700927 def test_cluster_state_changing_master_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700928 status = self.verify_cluster_status(onos_instances=onos_instances)
929 assert_equal(status, True)
930 master, standbys = self.get_cluster_current_master_standbys()
931 self.flows.setUpClass()
932 egress = 1
933 ingress = 2
934 egress_map = { 'ip': '192.168.30.1' }
935 ingress_map = { 'ip': '192.168.40.1' }
936 flow = OnosFlowCtrl(deviceId = self.device_id,
937 egressPort = egress,
938 ingressPort = ingress,
939 ecn = 1,
940 controller=master
941 )
942 result = flow.addFlow()
943 assert_equal(result, True)
944 ##wait for flows to be added to ONOS
945 time.sleep(1)
946 self.success = False
947 def mac_recv_task():
948 def recv_cb(pkt):
949 log.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
950 self.success = True
951 sniff(count=2, timeout=5,
952 lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
953 and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb,
954 iface = self.flows.port_map[egress])
955 for i in [0,1]:
956 if i == 1:
957 log.info('Changing cluster master to %s'%standbys[0])
958 self.change_master_current_cluster(new_master=standbys[0])
959 log.info('Verifying flow traffic after cluster master chnaged')
960 else:
961 log.info('Verifying flow traffic before cluster master changed')
962 t = threading.Thread(target = mac_recv_task)
963 t.start()
964 L2 = self.flows_eth # Ether(src = ingress_map['ether'], dst = egress_map['ether'])
965 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1)
966 pkt = L2/L3
967 log.info('Sending a packet to verify if flows are correct')
968 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
969 t.join()
970 assert_equal(self.success, True)
971
ChetanGaonker689b3862016-10-17 16:25:01 -0700972 #pass
973 def test_cluster_flow_for_ipv6_extension_header_and_master_restart(self,onos_instances = ONOS_INSTANCES):
974 status = self.verify_cluster_status(onos_instances=onos_instances)
975 assert_equal(status, True)
976 master,standbys = self.get_cluster_current_master_standbys()
977 onos_names_ips = self.get_cluster_container_names_ips()
978 master_onos_name = onos_names_ips[master]
979 self.flows.setUpClass()
980 egress = 1
981 ingress = 2
982 egress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
983 ingress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
984 flow = OnosFlowCtrl(deviceId = self.device_id,
985 egressPort = egress,
986 ingressPort = ingress,
987 ipv6_extension = 0,
988 controller=master
989 )
990
991 result = flow.addFlow()
992 assert_equal(result, True)
993 ##wait for flows to be added to ONOS
994 time.sleep(1)
995 self.success = False
996 def mac_recv_task():
997 def recv_cb(pkt):
998 log.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s'%(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
999 self.success = True
1000 sniff(timeout=2,count=5,
1001 lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.flows.port_map[egress])
1002 for i in [0,1]:
1003 if i == 1:
1004 log.info('Restart cluster current master %s'%master)
1005 Container(master_onos_name,Onos.IMAGE).restart()
1006 time.sleep(45)
1007 log.info('Verifying flow traffic after master restart')
1008 else:
1009 log.info('Verifying flow traffic before master restart')
1010 t = threading.Thread(target = mac_recv_task)
1011 t.start()
1012 L2 = self.flows_eth
1013 L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
1014 pkt = L2/L3
1015 log.info('Sending packets to verify if flows are correct')
1016 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
1017 t.join()
1018 assert_equal(self.success, True)
1019
1020 def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
1021 dst_mac = self.igmp.iptomac(group)
1022 eth = Ether(dst= dst_mac)
1023 ip = IP(dst=group,src=source)
1024 data = repr(monotonic.monotonic())
1025 sendp(eth/ip/data,count=20, iface = intf)
1026 pkt = (eth/ip/data)
1027 log.info('multicast traffic packet %s'%pkt.show())
1028
1029 def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
1030 log.info('verifying multicast traffic for group %s from source %s'%(group,source))
1031 self.success = False
1032 def recv_task():
1033 def igmp_recv_cb(pkt):
1034 log.info('multicast data received for group %s from source %s'%(group,source))
1035 self.success = True
1036 sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
1037 t = threading.Thread(target = recv_task)
1038 t.start()
1039 self.send_multicast_data_traffic(group,source=source)
1040 t.join()
1041 return self.success
1042
1043 #pass
1044 def test_cluster_with_igmp_include_exclude_modes_and_restarting_master(self, onos_instances=ONOS_INSTANCES):
1045 status = self.verify_cluster_status(onos_instances=onos_instances)
1046 assert_equal(status, True)
1047 master, standbys = self.get_cluster_current_master_standbys()
1048 assert_equal(len(standbys), (onos_instances-1))
1049 onos_names_ips = self.get_cluster_container_names_ips()
1050 master_onos_name = onos_names_ips[master]
1051 self.igmp.setUp(controller=master)
1052 groups = ['224.2.3.4','230.5.6.7']
1053 src_list = ['2.2.2.2','3.3.3.3']
1054 self.igmp.onos_ssm_table_load(groups, src_list=src_list, controller=master)
1055 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1056 iface = self.V_INF1, delay = 2)
1057 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1058 iface = self.V_INF1, delay = 2)
1059 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1060 assert_equal(status,True)
1061 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1062 assert_equal(status,False)
1063 log.info('restarting cluster master %s'%master)
1064 Container(master_onos_name,Onos.IMAGE).restart()
1065 time.sleep(60)
1066 log.info('verifying multicast data traffic after master restart')
1067 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1068 assert_equal(status,True)
1069 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1070 assert_equal(status,False)
1071
1072 #pass
1073 def test_cluster_with_igmp_include_exclude_modes_and_making_master_down(self, onos_instances=ONOS_INSTANCES):
1074 status = self.verify_cluster_status(onos_instances=onos_instances)
1075 assert_equal(status, True)
1076 master, standbys = self.get_cluster_current_master_standbys()
1077 assert_equal(len(standbys), (onos_instances-1))
1078 onos_names_ips = self.get_cluster_container_names_ips()
1079 master_onos_name = onos_names_ips[master]
1080 self.igmp.setUp(controller=master)
1081 groups = [self.igmp.random_mcast_ip(),self.igmp.random_mcast_ip()]
1082 src_list = [self.igmp.randomsourceip()]
1083 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1084 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1085 iface = self.V_INF1, delay = 2)
1086 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1087 iface = self.V_INF1, delay = 2)
1088 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1089 assert_equal(status,True)
1090 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1091 assert_equal(status,False)
1092 log.info('Killing cluster master %s'%master)
1093 Container(master_onos_name,Onos.IMAGE).kill()
1094 time.sleep(60)
1095 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=standbys[0])
1096 assert_equal(status, True)
1097 log.info('Verifying multicast data traffic after cluster master down')
1098 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1099 assert_equal(status,True)
1100 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1101 assert_equal(status,False)
1102
1103 def test_cluster_with_igmp_include_mode_checking_traffic_recovery_time_after_master_is_down(self, onos_instances=ONOS_INSTANCES):
1104 status = self.verify_cluster_status(onos_instances=onos_instances)
1105 assert_equal(status, True)
1106 master, standbys = self.get_cluster_current_master_standbys()
1107 assert_equal(len(standbys), (onos_instances-1))
1108 onos_names_ips = self.get_cluster_container_names_ips()
1109 master_onos_name = onos_names_ips[master]
1110 self.igmp.setUp(controller=master)
1111 groups = [self.igmp.random_mcast_ip()]
1112 src_list = [self.igmp.randomsourceip()]
1113 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1114 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1115 iface = self.V_INF1, delay = 2)
1116 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1117 assert_equal(status,True)
1118 log.info('Killing clusters master %s'%master)
1119 Container(master_onos_name,Onos.IMAGE).kill()
1120 count = 0
1121 for i in range(60):
1122 log.info('Verifying multicast data traffic after cluster master down')
1123 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1124 if status:
1125 break
1126 else:
1127 count += 1
1128 time.sleep(1)
1129 assert_equal(status, True)
1130 log.info('Time taken to recover traffic after clusters master down is %d seconds'%count)
1131
1132
1133 #pass
1134 def test_cluster_state_with_igmp_leave_group_after_master_change(self, onos_instances=ONOS_INSTANCES):
1135 status = self.verify_cluster_status(onos_instances=onos_instances)
1136 assert_equal(status, True)
1137 master, standbys = self.get_cluster_current_master_standbys()
1138 assert_equal(len(standbys), (onos_instances-1))
1139 self.igmp.setUp(controller=master)
1140 groups = [self.igmp.random_mcast_ip()]
1141 src_list = [self.igmp.randomsourceip()]
1142 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1143 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1144 iface = self.V_INF1, delay = 2)
1145 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1146 assert_equal(status,True)
1147 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1148 self.change_cluster_current_master(new_master=standbys[0])
1149 log.info('Verifying multicast traffic after cluster master change')
1150 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1151 assert_equal(status,True)
1152 log.info('Sending igmp TO_EXCLUDE message to leave the group %s'%groups[0])
1153 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
1154 iface = self.V_INF1, delay = 1)
1155 time.sleep(10)
1156 status = self.verify_igmp_data_traffic(groups[0],intf = self.V_INF1,source= src_list[0])
1157 assert_equal(status,False)
1158
1159 #pass
1160 def test_cluster_state_with_igmp_join_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
1161 status = self.verify_cluster_status(onos_instances=onos_instances)
1162 assert_equal(status, True)
1163 master,standbys = self.get_cluster_current_master_standbys()
1164 assert_equal(len(standbys), (onos_instances-1))
1165 self.igmp.setUp(controller=master)
1166 groups = [self.igmp.random_mcast_ip()]
1167 src_list = [self.igmp.randomsourceip()]
1168 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1169 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1170 self.change_cluster_current_master(new_master = standbys[0])
1171 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1172 iface = self.V_INF1, delay = 2)
1173 time.sleep(1)
1174 self.change_cluster_current_master(new_master = master)
1175 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1176 assert_equal(status,True)
1177
1178 #pass
ChetanGaonker2099d722016-10-07 15:16:58 -07001179 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001180 def test_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001181 status = self.verify_cluster_status(onos_instances=onos_instances)
1182 assert_equal(status, True)
1183 master, standbys = self.get_cluster_current_master_standbys()
1184 assert_equal(len(standbys), (onos_instances-1))
1185 self.tls.setUp(controller=master)
1186 df = defer.Deferred()
1187 def eap_tls_verify(df):
1188 tls = TLSAuthTest()
1189 tls.runTest()
1190 df.callback(0)
1191 reactor.callLater(0, eap_tls_verify, df)
1192 return df
1193
1194 @deferred(120)
ChetanGaonker689b3862016-10-17 16:25:01 -07001195 def test_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001196 master, standbys = self.get_cluster_current_master_standbys()
1197 assert_equal(len(standbys), (onos_instances-1))
1198 self.tls.setUp()
1199 df = defer.Deferred()
1200 def eap_tls_verify2(df2):
1201 tls = TLSAuthTest()
1202 tls.runTest()
1203 df.callback(0)
1204 for i in [0,1]:
1205 if i == 1:
1206 log.info('Changing cluster master %s to %s'%(master, standbys[0]))
1207 self.change_master_current_cluster(new_master=standbys[0])
1208 log.info('Verifying tls authentication after cluster master changed to %s'%standbys[0])
1209 else:
1210 log.info('Verifying tls authentication before cluster master change')
1211 reactor.callLater(0, eap_tls_verify, df)
1212 return df
1213
1214 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001215 def test_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001216 status = self.verify_cluster_status(onos_instances=onos_instances)
1217 assert_equal(status, True)
1218 master, standbys = self.get_cluster_current_master_standbys()
1219 assert_equal(len(standbys), (onos_instances-1))
1220 onos_names_ips = self.get_cluster_container_names_ips()
1221 master_onos_name = onos_names_ips[master]
1222 self.tls.setUp()
1223 df = defer.Deferred()
1224 def eap_tls_verify(df):
1225 tls = TLSAuthTest()
1226 tls.runTest()
1227 df.callback(0)
1228 for i in [0,1]:
1229 if i == 1:
1230 log.info('Killing cluster current master %s'%master)
1231 cord_test_onos_shutdown(node = master_onos_name)
1232 time.sleep(20)
1233 status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True)
1234 assert_equal(status, True)
1235 log.info('Cluster came up with %d instances after killing master'%(onos_instances-1))
1236 log.info('Verifying tls authentication after killing cluster master')
1237 reactor.callLater(0, eap_tls_verify, df)
1238 return df
1239
1240 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001241 def test_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001242 status = self.verify_cluster_status(onos_instances=onos_instances)
1243 assert_equal(status, True)
1244 master, standbys = self.get_cluster_current_master_standbys()
1245 assert_equal(len(standbys), (onos_instances-1))
1246 onos_names_ips = self.get_cluster_container_names_ips()
1247 member_onos_name = onos_names_ips[standbys[0]]
1248 self.tls.setUp()
1249 df = defer.Deferred()
1250 def eap_tls_no_cert(df):
1251 def tls_no_cert_cb():
1252 log.info('TLS authentication failed with no certificate')
1253 tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
1254 tls.runTest()
1255 assert_equal(tls.failTest, True)
1256 df.callback(0)
1257 for i in [0,1]:
1258 if i == 1:
1259 log.info('Restart cluster member %s'%standbys[0])
1260 Container(member_onos_name,Onos.IMAGE).restart()
1261 time.sleep(20)
1262 status = self.verify_cluster_status(onos_instances=onos_instances)
1263 assert_equal(status, True)
1264 log.info('Cluster came up with %d instances after member restart'%(onos_instances))
1265 log.info('Verifying tls authentication after member restart')
1266 reactor.callLater(0, eap_tls_no_cert, df)
1267 return df
1268
ChetanGaonker689b3862016-10-17 16:25:01 -07001269 #pass
1270 def test_cluster_proxyarp_master_change_and_app_deactivation(self,onos_instances=ONOS_INSTANCES,hosts = 3):
1271 status = self.verify_cluster_status(onos_instances=onos_instances)
1272 assert_equal(status,True)
1273 master,standbys = self.get_cluster_current_master_standbys()
1274 assert_equal(len(standbys),(onos_instances-1))
1275 self.proxyarp.setUpClass()
1276 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1277 ingress = hosts+1
1278 for hostip, hostmac in hosts_config:
1279 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1280 time.sleep(1)
1281 log.info('changing cluster current master from %s to %s'%(master,standbys[0]))
1282 self.change_cluster_current_master(new_master=standbys[0])
1283 log.info('verifying proxyarp after master change')
1284 for hostip, hostmac in hosts_config:
1285 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1286 time.sleep(1)
1287 log.info('Deactivating proxyarp app and expecting proxyarp functionality not to work')
1288 self.proxyarp.proxyarp_activate(deactivate = True,controller=standbys[0])
1289 time.sleep(3)
1290 for hostip, hostmac in hosts_config:
1291 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
1292 time.sleep(1)
1293 log.info('activating proxyarp app and expecting to get arp reply from ONOS')
1294 self.proxyarp.proxyarp_activate(deactivate = False,controller=standbys[0])
1295 time.sleep(3)
1296 for hostip, hostmac in hosts_config:
1297 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1298 time.sleep(1)
ChetanGaonker2099d722016-10-07 15:16:58 -07001299
ChetanGaonker689b3862016-10-17 16:25:01 -07001300 #pass
1301 def test_cluster_with_proxyarp_and_one_member_down(self,hosts=3,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001302 status = self.verify_cluster_status(onos_instances=onos_instances)
1303 assert_equal(status, True)
1304 master, standbys = self.get_cluster_current_master_standbys()
ChetanGaonker689b3862016-10-17 16:25:01 -07001305 assert_equal(len(standbys), (onos_instances-1))
1306 onos_names_ips = self.get_cluster_container_names_ips()
1307 member_onos_name = onos_names_ips[standbys[1]]
1308 self.proxyarp.setUpClass()
1309 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1310 ingress = hosts+1
1311 for hostip, hostmac in hosts_config:
1312 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1313 time.sleep(1)
1314 log.info('killing cluster member %s'%standbys[1])
1315 Container(member_onos_name,Onos.IMAGE).kill()
1316 time.sleep(20)
1317 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=master,verify=True)
1318 assert_equal(status, True)
1319 log.info('cluster came up with %d instances after member down'%(onos_instances-1))
1320 log.info('verifying proxy arp functionality after cluster member down')
1321 for hostip, hostmac in hosts_config:
1322 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1323 time.sleep(1)
1324
1325 #pass
1326 def test_cluster_with_proxyarp_and_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts=10,onos_instances=ONOS_INSTANCES):
1327 status = self.verify_cluster_status(onos_instances=onos_instances)
1328 assert_equal(status, True)
1329 self.proxyarp.setUpClass()
1330 master, standbys = self.get_cluster_current_master_standbys()
1331 assert_equal(len(standbys), (onos_instances-1))
1332 ports_map, egress_map, hosts_config = self.proxyarp.proxyarp_config(hosts = hosts, controller=master)
1333 self.success = True
1334 ingress = hosts+1
1335 ports = range(ingress,ingress+10)
1336 hostmac = []
1337 hostip = []
1338 for ip,mac in hosts_config:
1339 hostmac.append(mac)
1340 hostip.append(ip)
1341 success_dir = {}
1342 def verify_proxyarp(*r):
1343 ingress, hostmac, hostip = r[0],r[1],r[2]
1344 def mac_recv_task():
1345 def recv_cb(pkt):
1346 log.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
1347 success_dir[current_thread().name] = True
1348 sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
1349 prn = recv_cb, iface = self.proxyarp.port_map[ingress])
1350 t = threading.Thread(target = mac_recv_task)
1351 t.start()
1352 pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
1353 log.info('Sending arp request for dest ip %s on interface %s' %
1354 (hostip,self.proxyarp.port_map[ingress]))
1355 sendp(pkt, count = 10,iface = self.proxyarp.port_map[ingress])
1356 t.join()
1357 t = []
1358 for i in range(10):
1359 t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
1360 for i in range(10):
1361 t[i].start()
1362 time.sleep(2)
1363 for i in range(10):
1364 t[i].join()
1365 if len(success_dir) != 10:
1366 self.success = False
1367 assert_equal(self.success, True)
1368
1369 #pass
1370 def test_cluster_with_acl_rule_before_master_change_and_remove_acl_rule_after_master_change(self,onos_instances=ONOS_INSTANCES):
1371 status = self.verify_cluster_status(onos_instances=onos_instances)
1372 assert_equal(status, True)
1373 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001374 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001375 self.acl.setUp()
1376 acl_rule = ACLTest()
1377 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1378 if status is False:
1379 log.info('JSON request returned status %d' %code)
1380 assert_equal(status, True)
1381 result = acl_rule.get_acl_rules(controller=master)
1382 aclRules1 = result.json()['aclRules']
1383 log.info('Added acl rules is %s'%aclRules1)
1384 acl_Id = map(lambda d: d['id'], aclRules1)
1385 log.info('Changing cluster current master from %s to %s'%(master,standbys[0]))
1386 self.change_cluster_current_master(new_master=standbys[0])
1387 status,code = acl_rule.remove_acl_rule(acl_Id[0],controller=standbys[0])
1388 if status is False:
1389 log.info('JSON request returned status %d' %code)
1390 assert_equal(status, True)
1391
1392 #pass
1393 def test_cluster_verifying_acl_rule_in_new_master_after_current_master_is_down(self,onos_instances=ONOS_INSTANCES):
1394 status = self.verify_cluster_status(onos_instances=onos_instances)
1395 assert_equal(status, True)
1396 master,standbys = self.get_cluster_current_master_standbys()
1397 assert_equal(len(standbys),(onos_instances-1))
1398 onos_names_ips = self.get_cluster_container_names_ips()
1399 master_onos_name = onos_names_ips[master]
1400 self.acl.setUp()
1401 acl_rule = ACLTest()
1402 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1403 if status is False:
1404 log.info('JSON request returned status %d' %code)
1405 assert_equal(status, True)
1406 result1 = acl_rule.get_acl_rules(controller=master)
1407 aclRules1 = result1.json()['aclRules']
1408 log.info('Added acl rules is %s'%aclRules1)
1409 acl_Id1 = map(lambda d: d['id'], aclRules1)
1410 log.info('Killing cluster current master %s'%master)
1411 Container(master_onos_name,Onos.IMAGE).kill()
1412 time.sleep(45)
1413 status = self.verify_cluster_status(onos_instances=onos_instances,controller=standbys[0])
1414 assert_equal(status, True)
1415 new_master,standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
1416 assert_equal(len(standbys),(onos_instances-2))
1417 assert_not_equal(new_master,master)
1418 result2 = acl_rule.get_acl_rules(controller=new_master)
1419 aclRules2 = result2.json()['aclRules']
1420 acl_Id2 = map(lambda d: d['id'], aclRules2)
1421 log.info('Acl Ids before and after master down are %s and %s'%(acl_Id1,acl_Id2))
1422 assert_equal(acl_Id2,acl_Id1)
1423
1424 #acl traffic scenario not working as acl rule is not getting added to onos
1425 def test_cluster_with_acl_traffic_before_and_after_two_members_down(self,onos_instances=ONOS_INSTANCES):
1426 status = self.verify_cluster_status(onos_instances=onos_instances)
1427 assert_equal(status, True)
1428 master,standbys = self.get_cluster_current_master_standbys()
1429 assert_equal(len(standbys),(onos_instances-1))
1430 onos_names_ips = self.get_cluster_container_names_ips()
1431 member1_onos_name = onos_names_ips[standbys[0]]
1432 member2_onos_name = onos_names_ips[standbys[1]]
1433 ingress = self.acl.ingress_iface
1434 egress = self.acl.CURRENT_PORT_NUM
1435 acl_rule = ACLTest()
1436 status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.acl.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.acl.HOST_DST_IP)
1437 self.acl.CURRENT_PORT_NUM += 1
1438 time.sleep(5)
1439 if status is False:
1440 log.info('JSON request returned status %d' %code)
1441 assert_equal(status, True)
1442 srcMac = '00:00:00:00:00:11'
1443 dstMac = host_ip_mac[0][1]
1444 self.acl.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1, egress_iface_num = egress )
1445 status, code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'deny',controller=master)
1446 time.sleep(10)
1447 if status is False:
1448 log.info('JSON request returned status %d' %code)
1449 assert_equal(status, True)
1450 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1451 log.info('killing cluster members %s and %s'%(standbys[0],standbys[1]))
1452 Container(member1_onos_name, Onos.IMAGE).kill()
1453 Container(member2_onos_name, Onos.IMAGE).kill()
1454 time.sleep(40)
1455 status = self.verify_cluster_status(onos_instances=onos_instances-2,verify=True,controller=master)
1456 assert_equal(status, True)
1457 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1458 self.acl.acl_hosts_remove(egress_iface_count = 1, egress_iface_num = egress)
1459
1460 #pass
1461 def test_cluster_with_dhcpRelay_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1462 status = self.verify_cluster_status(onos_instances=onos_instances)
1463 assert_equal(status, True)
1464 master,standbys = self.get_cluster_current_master_standbys()
1465 assert_equal(len(standbys),(onos_instances-1))
1466 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001467 mac = self.dhcprelay.get_mac(iface)
1468 self.dhcprelay.host_load(iface)
1469 ##we use the defaults for this test that serves as an example for others
1470 ##You don't need to restart dhcpd server if retaining default config
1471 config = self.dhcprelay.default_config
1472 options = self.dhcprelay.default_options
1473 subnet = self.dhcprelay.default_subnet_config
1474 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1475 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1476 config = config,
1477 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001478 subnet = subnet,
1479 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001480 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
1481 cip, sip = self.dhcprelay.send_recv(mac)
1482 log.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
1483 self.change_master_current_cluster(new_master=standbys[0])
1484 log.info('Releasing ip %s to server %s' %(cip, sip))
1485 assert_equal(self.dhcprelay.dhcp.release(cip), True)
1486 log.info('Triggering DHCP discover again after release')
1487 cip2, sip2 = self.dhcprelay.send_recv(mac)
1488 log.info('Verifying released IP was given back on rediscover')
1489 assert_equal(cip, cip2)
1490 log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
1491 assert_equal(self.dhcprelay.dhcp.release(cip2), True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001492 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001493
ChetanGaonker689b3862016-10-17 16:25:01 -07001494
1495 def test_cluster_with_dhcpRelay_and_verify_dhcp_ip_after_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1496 status = self.verify_cluster_status(onos_instances=onos_instances)
1497 assert_equal(status, True)
1498 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001499 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001500 onos_names_ips = self.get_cluster_container_names_ips()
1501 master_onos_name = onos_names_ips[master]
1502 self.dhcprelay.setUpClass(controller=master)
1503 mac = self.dhcprelay.get_mac(iface)
1504 self.dhcprelay.host_load(iface)
1505 ##we use the defaults for this test that serves as an example for others
1506 ##You don't need to restart dhcpd server if retaining default config
1507 config = self.dhcprelay.default_config
1508 options = self.dhcprelay.default_options
1509 subnet = self.dhcprelay.default_subnet_config
1510 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1511 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1512 config = config,
1513 options = options,
1514 subnet = subnet,
1515 controller=master)
1516 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1517 log.info('Initiating dhcp process from client %s'%mac)
1518 cip, sip = self.dhcprelay.send_recv(mac)
1519 log.info('Killing cluster current master %s'%master)
1520 Container(master_onos_name, Onos.IMAGE).kill()
1521 time.sleep(60)
1522 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=standbys[0])
1523 assert_equal(status, True)
1524 mac = self.dhcprelay.dhcp.get_mac(cip)[0]
1525 log.info("Verifying dhcp clients gets same IP after cluster master restarts")
1526 new_cip, new_sip = self.dhcprelay.dhcp.only_request(cip, mac)
1527 assert_equal(new_cip, cip)
1528 self.dhcprelay.tearDownClass(controller=standbys[0])
1529
1530 #pass
1531 def test_cluster_with_dhcpRelay_and_simulate_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1532 status = self.verify_cluster_status(onos_instances=onos_instances)
1533 assert_equal(status, True)
1534 master,standbys = self.get_cluster_current_master_standbys()
1535 assert_equal(len(standbys),(onos_instances-1))
1536 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001537 macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3']
1538 self.dhcprelay.host_load(iface)
1539 ##we use the defaults for this test that serves as an example for others
1540 ##You don't need to restart dhcpd server if retaining default config
1541 config = self.dhcprelay.default_config
1542 options = self.dhcprelay.default_options
1543 subnet = self.dhcprelay.default_subnet_config
1544 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1545 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1546 config = config,
1547 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001548 subnet = subnet,
1549 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001550 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1551 cip1, sip1 = self.dhcprelay.send_recv(macs[0])
1552 assert_not_equal(cip1,None)
1553 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip1,macs[0],master))
1554 log.info('Changing cluster master from %s to %s'%(master, standbys[0]))
1555 self.change_master_current_cluster(new_master=standbys[0])
1556 cip2, sip2 = self.dhcprelay.send_recv(macs[1])
1557 assert_not_equal(cip2,None)
1558 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[1],standbys[0]))
1559 self.change_master_current_cluster(new_master=master)
1560 log.info('Changing cluster master from %s to %s'%(standbys[0],master))
1561 cip3, sip3 = self.dhcprelay.send_recv(macs[2])
1562 assert_not_equal(cip3,None)
1563 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master))
ChetanGaonker689b3862016-10-17 16:25:01 -07001564 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001565
ChetanGaonker689b3862016-10-17 16:25:01 -07001566 def test_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001567 status = self.verify_cluster_status(onos_instances=onos_instances)
1568 assert_equal(status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001569 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001570 self.subscriber.num_subscribers = 5
1571 self.subscriber.num_channels = 10
1572 for i in [0,1]:
1573 if i == 1:
1574 cord_test_onos_restart()
1575 time.sleep(45)
1576 status = self.verify_cluster_status(onos_instances=onos_instances)
1577 assert_equal(status, True)
1578 log.info('Verifying cord subscriber functionality after cluster restart')
1579 else:
1580 log.info('Verifying cord subscriber functionality before cluster restart')
1581 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1582 num_channels = self.subscriber.num_channels,
1583 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1584 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1585 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1586 self.subscriber.num_channels))
1587 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001588 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001589
ChetanGaonker689b3862016-10-17 16:25:01 -07001590 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1591 def test_cluster_with_cord_subscriber_join_next_channel_before_and_after_cluster_mastership_is_withdrawn(self,onos_instances=ONOS_INSTANCES):
1592 status = self.verify_cluster_status(onos_instances=onos_instances)
1593 assert_equal(status, True)
1594 master,standbys = self.get_cluster_current_master_standbys()
1595 assert_equal(len(standbys),(onos_instances-1))
1596 self.subscriber.setUpClass(controller=master)
1597 self.subscriber.num_subscribers = 5
1598 self.subscriber.num_channels = 10
1599 for i in [0,1]:
1600 if i == 1:
1601 status=self.withdraw_cluster_current_mastership(master_ip=master)
1602 asser_equal(status, True)
1603 master,standbys = self.get_cluster_current_master_standbys()
1604 log.info('verifying cord subscriber functionality after cluster current master withdraw mastership')
1605 else:
1606 log.info('verifying cord subscriber functionality before cluster master withdraw mastership')
1607 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1608 num_channels = self.subscriber.num_channels,
1609 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1610 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1611 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1612 self.subscriber.num_channels),controller=master)
1613 assert_equal(test_status, True)
1614 self.subscriber.tearDownClass(controller=master)
1615
1616 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1617 def test_cluster_with_cord_subscriber_join_recv_traffic_from_10channels_and_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001618 status = self.verify_cluster_status(onos_instances=onos_instances)
1619 assert_equal(status, True)
1620 master, standbys = self.get_cluster_current_master_standbys()
1621 assert_equal(len(standbys),(onos_instances-1))
1622 onos_names_ips = self.get_cluster_container_names_ips()
1623 member_onos_name = onos_names_ips[standbys[0]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001624 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001625 num_subscribers = 1
1626 num_channels = 10
1627 for i in [0,1]:
1628 if i == 1:
1629 cord_test_onos_shutdown(node = member_onos_name)
1630 time.sleep(30)
ChetanGaonker689b3862016-10-17 16:25:01 -07001631 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001632 assert_equal(status, True)
1633 log.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0])
1634 else:
1635 log.info('Verifying cord subscriber functionality before cluster member %s is down'%standbys[0])
1636 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1637 num_channels = num_channels,
1638 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify,
1639 self.subscriber.igmp_verify, self.subscriber.traffic_verify),
1640 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
ChetanGaonker689b3862016-10-17 16:25:01 -07001641 negative_subscriber_auth = 'all',controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001642 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001643 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001644
ChetanGaonker689b3862016-10-17 16:25:01 -07001645 def test_cluster_with_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001646 status = self.verify_cluster_status(onos_instances=onos_instances)
1647 assert_equal(status, True)
1648 master, standbys = self.get_cluster_current_master_standbys()
1649 assert_equal(len(standbys),(onos_instances-1))
1650 onos_names_ips = self.get_cluster_container_names_ips()
1651 member1_onos_name = onos_names_ips[standbys[0]]
1652 member2_onos_name = onos_names_ips[standbys[1]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001653 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001654 num_subscribers = 1
1655 num_channels = 10
1656 for i in [0,1]:
1657 if i == 1:
1658 cord_test_onos_shutdown(node = member1_onos_name)
1659 cord_test_onos_shutdown(node = member2_onos_name)
1660 time.sleep(60)
1661 status = self.verify_cluster_status(onos_instances=onos_instances-2)
1662 assert_equal(status, True)
1663 log.info('Verifying cord subscriber funtionality after cluster two members %s and %s down'%(standbys[0],standbys[1]))
1664 else:
1665 log.info('Verifying cord subscriber funtionality before cluster two members %s and %s down'%(standbys[0],standbys[1]))
1666 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1667 num_channels = num_channels,
1668 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1669 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1670 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
1671 negative_subscriber_auth = 'all')
1672 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001673 self.subscriber.tearDownClass(controller=master)
1674
1675 #pass
1676 def test_cluster_with_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1677 status = self.verify_cluster_status(onos_instances=onos_instances)
1678 assert_equal(status, True)
1679 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1680 for device in device_dict.keys():
1681 log.info("Device is %s"%device_dict[device])
1682 assert_not_equal(device_dict[device]['master'],'none')
1683 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1684 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1685
1686 #pass
1687 def test_cluster_state_in_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1688 status = self.verify_cluster_status(onos_instances=onos_instances)
1689 assert_equal(status, True)
1690 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1691 cluster_ips = self.get_cluster_current_member_ips()
1692 for ip in cluster_ips:
1693 device_dict= self.get_cluster_current_master_standbys_of_connected_devices(controller = ip)
1694 assert_equal(len(device_dict.keys()),onos_instances)
1695 for device in device_dict.keys():
1696 log.info("Device is %s"%device_dict[device])
1697 assert_not_equal(device_dict[device]['master'],'none')
1698 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1699 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1700
1701 #pass
1702 def test_cluster_verifying_multiple_ovs_switches_after_master_is_restarted(self,onos_instances = ONOS_INSTANCES):
1703 status = self.verify_cluster_status(onos_instances=onos_instances)
1704 assert_equal(status, True)
1705 onos_names_ips = self.get_cluster_container_names_ips()
1706 master_count = self.get_number_of_devices_of_master()
1707 log.info('Master count information is %s'%master_count)
1708 total_devices = 0
1709 for master in master_count.keys():
1710 total_devices += master_count[master]['size']
1711 if master_count[master]['size'] != 0:
1712 restart_ip = master
1713 assert_equal(total_devices,onos_instances)
1714 member_onos_name = onos_names_ips[restart_ip]
1715 log.info('Restarting cluster member %s having ip %s'%(member_onos_name,restart_ip))
1716 Container(member_onos_name, Onos.IMAGE).restart()
1717 time.sleep(40)
1718 master_count = self.get_number_of_devices_of_master()
1719 log.info('Master count information after restart is %s'%master_count)
1720 total_devices = 0
1721 for master in master_count.keys():
1722 total_devices += master_count[master]['size']
1723 if master == restart_ip:
1724 assert_equal(master_count[master]['size'], 0)
1725 assert_equal(total_devices,onos_instances)
1726
1727 #pass
1728 def test_cluster_verifying_multiple_ovs_switches_with_one_master_down(self,onos_instances = ONOS_INSTANCES):
1729 status = self.verify_cluster_status(onos_instances=onos_instances)
1730 assert_equal(status, True)
1731 onos_names_ips = self.get_cluster_container_names_ips()
1732 master_count = self.get_number_of_devices_of_master()
1733 log.info('Master count information is %s'%master_count)
1734 total_devices = 0
1735 for master in master_count.keys():
1736 total_devices += master_count[master]['size']
1737 if master_count[master]['size'] != 0:
1738 restart_ip = master
1739 assert_equal(total_devices,onos_instances)
1740 master_onos_name = onos_names_ips[restart_ip]
1741 log.info('Shutting down cluster member %s having ip %s'%(master_onos_name,restart_ip))
1742 Container(master_onos_name, Onos.IMAGE).kill()
1743 time.sleep(40)
1744 for ip in onos_names_ips.keys():
1745 if ip != restart_ip:
1746 controller_ip = ip
1747 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=controller_ip)
1748 assert_equal(status, True)
1749 master_count = self.get_number_of_devices_of_master(controller=controller_ip)
1750 log.info('Master count information after restart is %s'%master_count)
1751 total_devices = 0
1752 for master in master_count.keys():
1753 total_devices += master_count[master]['size']
1754 if master == restart_ip:
1755 assert_equal(master_count[master]['size'], 0)
1756 assert_equal(total_devices,onos_instances)
1757
1758 #pass
1759 def test_cluster_verifying_multiple_ovs_switches_with_current_master_withdrawing_mastership(self,onos_instances = ONOS_INSTANCES):
1760 status = self.verify_cluster_status(onos_instances=onos_instances)
1761 assert_equal(status, True)
1762 master_count = self.get_number_of_devices_of_master()
1763 log.info('Master count information is %s'%master_count)
1764 total_devices = 0
1765 for master in master_count.keys():
1766 total_devices += int(master_count[master]['size'])
1767 if master_count[master]['size'] != 0:
1768 master_ip = master
1769 log.info('Devices of master %s are %s'%(master_count[master]['devices'],master))
1770 device_id = str(master_count[master]['devices'][0])
1771 device_count = master_count[master]['size']
1772 assert_equal(total_devices,onos_instances)
1773 log.info('Withdrawing mastership of device %s for controller %s'%(device_id,master_ip))
1774 status=self.withdraw_cluster_current_mastership(master_ip=master_ip,device_id = device_id)
1775 assert_equal(status, True)
1776 master_count = self.get_number_of_devices_of_master()
1777 log.info('Master count information after cluster mastership withdraw is %s'%master_count)
1778 total_devices = 0
1779 for master in master_count.keys():
1780 total_devices += int(master_count[master]['size'])
1781 if master == master_ip:
1782 assert_equal(master_count[master]['size'], device_count-1)
1783 assert_equal(total_devices,onos_instances)
1784
1785 #pass
1786 def test_cluster_verifying_multiple_ovs_switches_and_restarting_cluster(self,onos_instances = ONOS_INSTANCES):
1787 status = self.verify_cluster_status(onos_instances=onos_instances)
1788 assert_equal(status, True)
1789 master_count = self.get_number_of_devices_of_master()
1790 log.info('Master count information is %s'%master_count)
1791 total_devices = 0
1792 for master in master_count.keys():
1793 total_devices += master_count[master]['size']
1794 assert_equal(total_devices,onos_instances)
1795 log.info('Restarting cluster')
1796 cord_test_onos_restart()
1797 time.sleep(60)
1798 master_count = self.get_number_of_devices_of_master()
1799 log.info('Master count information after restart is %s'%master_count)
1800 total_devices = 0
1801 for master in master_count.keys():
1802 total_devices += master_count[master]['size']
1803 assert_equal(total_devices,onos_instances)