blob: ea3c529a8ab244fc2a12b00cefb89184b348224a [file] [log] [blame]
ChetanGaonker2099d722016-10-07 15:16:58 -07001#copyright 2016-present Ciena Corporation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14#
15import unittest
16from nose.tools import *
17from scapy.all import *
18from OnosCtrl import OnosCtrl, get_mac
19from OltConfig import OltConfig
20from socket import socket
21from OnosFlowCtrl import OnosFlowCtrl
22from nose.twistedtools import reactor, deferred
23from twisted.internet import defer
24from onosclidriver import OnosCliDriver
25from CordContainer import Container, Onos, Quagga
A.R Karthick2560f042016-11-30 14:38:52 -080026from CordTestServer import cord_test_onos_restart, cord_test_onos_shutdown, cord_test_onos_add_cluster, cord_test_quagga_restart, cord_test_restart_cluster
ChetanGaonker2099d722016-10-07 15:16:58 -070027from portmaps import g_subscriber_port_map
28from scapy.all import *
29import time, monotonic
30import threading
31from threading import current_thread
32from Cluster import *
33from EapTLS import TLSAuthTest
34from ACL import ACLTest
A R Karthick1f908202016-11-16 17:32:20 -080035from OnosLog import OnosLog
36from CordLogger import CordLogger
ChetanGaonker2099d722016-10-07 15:16:58 -070037import os
38import json
39import random
40import collections
41log.setLevel('INFO')
42
A R Karthick1f908202016-11-16 17:32:20 -080043class cluster_exchange(CordLogger):
ChetanGaonker2099d722016-10-07 15:16:58 -070044 test_path = os.path.dirname(os.path.realpath(__file__))
45 onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
46 mac = RandMAC()._fix()
47 flows_eth = Ether(src = RandMAC()._fix(), dst = RandMAC()._fix())
48 igmp_eth = Ether(dst = '01:00:5e:00:00:16', type = ETH_P_IP)
49 igmp_ip = IP(dst = '224.0.0.22')
50 ONOS_INSTANCES = 3
51 V_INF1 = 'veth0'
52 TLS_TIMEOUT = 100
53 device_id = 'of:' + get_mac()
54 igmp = cluster_igmp()
55 igmp_groups = igmp.mcast_ip_range(start_ip = '224.1.8.10',end_ip = '224.1.10.49')
56 igmp_sources = igmp.source_ip_range(start_ip = '38.24.29.35',end_ip='38.24.35.56')
57 tls = cluster_tls()
58 flows = cluster_flows()
59 proxyarp = cluster_proxyarp()
60 vrouter = cluster_vrouter()
61 acl = cluster_acl()
62 dhcprelay = cluster_dhcprelay()
63 subscriber = cluster_subscriber()
A R Karthick3b2e0372016-12-14 17:37:43 -080064 testcaseLoggers = ('test_cluster_controller_restarts', 'test_cluster_graceful_controller_restarts',
65 'test_cluster_single_controller_restarts', 'test_cluster_restarts')
A R Karthick1f908202016-11-16 17:32:20 -080066
67 def setUp(self):
68 if self._testMethodName not in self.testcaseLoggers:
69 super(cluster_exchange, self).setUp()
70
71 def tearDown(self):
72 if self._testMethodName not in self.testcaseLoggers:
73 super(cluster_exchange, self).tearDown()
ChetanGaonker2099d722016-10-07 15:16:58 -070074
75 def get_controller(self):
76 controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
77 controller = controller.split(',')[0]
78 return controller
79
A R Karthick1f908202016-11-16 17:32:20 -080080 @classmethod
81 def get_controllers(cls):
82 controllers = os.getenv('ONOS_CONTROLLER_IP') or ''
83 return controllers.split(',')
84
A R Karthick6cc8b812016-12-09 10:24:40 -080085 def cliEnter(self, controller = None):
ChetanGaonker2099d722016-10-07 15:16:58 -070086 retries = 0
A R Karthick6cc8b812016-12-09 10:24:40 -080087 while retries < 30:
88 self.cli = OnosCliDriver(controller = controller, connect = True)
ChetanGaonker2099d722016-10-07 15:16:58 -070089 if self.cli.handle:
90 break
91 else:
92 retries += 1
93 time.sleep(2)
94
95 def cliExit(self):
96 self.cli.disconnect()
97
A R Karthick1f908202016-11-16 17:32:20 -080098 def get_leader(self, controller = None):
99 self.cliEnter(controller = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800100 try:
101 result = json.loads(self.cli.leaders(jsonFormat = True))
102 except:
103 result = None
104
A R Karthick1f908202016-11-16 17:32:20 -0800105 if result is None:
106 log.info('Leaders command failure for controller %s' %controller)
107 else:
108 log.info('Leaders returned: %s' %result)
109 self.cliExit()
110 return result
111
A R Karthick3b2e0372016-12-14 17:37:43 -0800112 def onos_shutdown(self, controller = None):
113 status = True
114 self.cliEnter(controller = controller)
115 try:
116 self.cli.shutdown(timeout = 10)
117 except:
118 log.info('Graceful shutdown of ONOS failed for controller: %s' %controller)
119 status = False
120
121 self.cliExit()
122 return status
123
A R Karthicke14fc022016-12-08 14:50:29 -0800124 def log_set(self, level = None, app = 'org.onosproject', controllers = None):
125 CordLogger.logSet(level = level, app = app, controllers = controllers, forced = True)
A R Karthickef1232d2016-12-07 09:18:15 -0800126
A R Karthick1f908202016-11-16 17:32:20 -0800127 def get_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800128 result_map = {}
129 if controller is None:
130 controller = self.get_controller()
A R Karthick1f908202016-11-16 17:32:20 -0800131 if type(controller) in [ list, tuple ]:
132 for c in controller:
133 leaders = self.get_leader(controller = c)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800134 result_map[c] = leaders
A R Karthick1f908202016-11-16 17:32:20 -0800135 else:
136 leaders = self.get_leader(controller = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800137 result_map[controller] = leaders
138 return result_map
A R Karthick1f908202016-11-16 17:32:20 -0800139
A R Karthickec2db322016-11-17 15:06:01 -0800140 def verify_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800141 leaders_map = self.get_leaders(controller = controller)
142 failed = [ k for k,v in leaders_map.items() if v == None ]
A R Karthickec2db322016-11-17 15:06:01 -0800143 return failed
144
ChetanGaonker2099d722016-10-07 15:16:58 -0700145 def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
146 tries = 0
147 try:
148 self.cliEnter(controller = controller)
149 while tries <= 10:
150 cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
151 if cluster_summary:
152 log.info("cluster 'summary' command output is %s"%cluster_summary)
153 nodes = cluster_summary['nodes']
154 if verify:
155 if nodes == onos_instances:
156 self.cliExit()
157 return True
158 else:
159 tries += 1
160 time.sleep(1)
161 else:
162 if nodes >= onos_instances:
163 self.cliExit()
164 return True
165 else:
166 tries += 1
167 time.sleep(1)
168 else:
169 tries += 1
170 time.sleep(1)
171 self.cliExit()
172 return False
173 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700174 raise Exception('Failed to get cluster members')
175 return False
ChetanGaonker2099d722016-10-07 15:16:58 -0700176
A.R Karthick45ab3e12016-11-30 11:25:51 -0800177 def get_cluster_current_member_ips(self, controller = None, nodes_filter = None):
ChetanGaonker2099d722016-10-07 15:16:58 -0700178 tries = 0
179 cluster_ips = []
180 try:
181 self.cliEnter(controller = controller)
182 while tries <= 10:
183 cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
184 if cluster_nodes:
185 log.info("cluster 'nodes' output is %s"%cluster_nodes)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800186 if nodes_filter:
187 cluster_nodes = nodes_filter(cluster_nodes)
ChetanGaonker2099d722016-10-07 15:16:58 -0700188 cluster_ips = map(lambda c: c['id'], cluster_nodes)
189 self.cliExit()
190 cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
191 return cluster_ips
192 else:
193 tries += 1
194 self.cliExit()
195 return cluster_ips
196 except:
197 raise Exception('Failed to get cluster members')
198 return cluster_ips
199
ChetanGaonker689b3862016-10-17 16:25:01 -0700200 def get_cluster_container_names_ips(self,controller=None):
A R Karthick1f908202016-11-16 17:32:20 -0800201 onos_names_ips = {}
202 onos_ips = self.get_cluster_current_member_ips(controller=controller)
203 onos_names_ips[onos_ips[0]] = Onos.NAME
204 onos_names_ips[Onos.NAME] = onos_ips[0]
205 for i in range(1,len(onos_ips)):
206 name = '{0}-{1}'.format(Onos.NAME,i+1)
207 onos_names_ips[onos_ips[i]] = name
208 onos_names_ips[name] = onos_ips[i]
ChetanGaonker2099d722016-10-07 15:16:58 -0700209
210 return onos_names_ips
211
212 #identifying current master of a connected device, not tested
213 def get_cluster_current_master_standbys(self,controller=None,device_id=device_id):
214 master = None
215 standbys = []
216 tries = 0
217 try:
218 cli = self.cliEnter(controller = controller)
219 while tries <= 10:
220 roles = json.loads(self.cli.roles(jsonFormat = True))
221 log.info("cluster 'roles' command output is %s"%roles)
222 if roles:
223 for device in roles:
224 log.info('Verifying device info in line %s'%device)
225 if device['id'] == device_id:
226 master = str(device['master'])
227 standbys = map(lambda d: str(d), device['standbys'])
228 log.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
229 self.cliExit()
230 return master, standbys
231 self.cliExit()
232 return master, standbys
233 else:
234 tries += 1
ChetanGaonker689b3862016-10-17 16:25:01 -0700235 time.sleep(1)
236 self.cliExit()
237 return master,standbys
238 except:
239 raise Exception('Failed to get cluster members')
240 return master,standbys
241
242 def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
243 ''' returns master and standbys of all the connected devices to ONOS cluster instance'''
244 device_dict = {}
245 tries = 0
246 try:
247 cli = self.cliEnter(controller = controller)
248 while tries <= 10:
249 device_dict = {}
250 roles = json.loads(self.cli.roles(jsonFormat = True))
251 log.info("cluster 'roles' command output is %s"%roles)
252 if roles:
253 for device in roles:
254 device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
255 for i in range(len(device_dict[device['id']]['standbys'])):
256 device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
257 log.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
258 self.cliExit()
259 return device_dict
260 else:
261 tries += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700262 time.sleep(1)
263 self.cliExit()
ChetanGaonker689b3862016-10-17 16:25:01 -0700264 return device_dict
265 except:
266 raise Exception('Failed to get cluster members')
267 return device_dict
268
269 #identify current master of a connected device, not tested
270 def get_cluster_connected_devices(self,controller=None):
271 '''returns all the devices connected to ONOS cluster'''
272 device_list = []
273 tries = 0
274 try:
275 cli = self.cliEnter(controller = controller)
276 while tries <= 10:
277 device_list = []
278 devices = json.loads(self.cli.devices(jsonFormat = True))
279 log.info("cluster 'devices' command output is %s"%devices)
280 if devices:
281 for device in devices:
282 log.info('device id is %s'%device['id'])
283 device_list.append(str(device['id']))
284 self.cliExit()
285 return device_list
286 else:
287 tries += 1
288 time.sleep(1)
289 self.cliExit()
290 return device_list
291 except:
292 raise Exception('Failed to get cluster members')
293 return device_list
294
295 def get_number_of_devices_of_master(self,controller=None):
296 '''returns master-device pairs, which master having what devices'''
297 master_count = {}
298 try:
299 cli = self.cliEnter(controller = controller)
300 masters = json.loads(self.cli.masters(jsonFormat = True))
301 if masters:
302 for master in masters:
303 master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
304 return master_count
305 else:
306 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700307 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700308 raise Exception('Failed to get cluster members')
309 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700310
311 def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None):
312 if new_master is None: return False
ChetanGaonker689b3862016-10-17 16:25:01 -0700313 self.cliEnter(controller=controller)
ChetanGaonker2099d722016-10-07 15:16:58 -0700314 cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
315 command = self.cli.command(cmd = cmd, jsonFormat = False)
316 self.cliExit()
317 time.sleep(60)
318 master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
319 assert_equal(master,new_master)
320 log.info('Cluster master changed to %s successfully'%new_master)
321
ChetanGaonker689b3862016-10-17 16:25:01 -0700322 def withdraw_cluster_current_mastership(self,master_ip=None,device_id=device_id,controller=None):
323 '''current master looses its mastership and hence new master will be elected'''
324 self.cliEnter(controller=controller)
325 cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
326 command = self.cli.command(cmd = cmd, jsonFormat = False)
327 self.cliExit()
328 time.sleep(60)
329 new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
330 assert_not_equal(new_master_ip,master_ip)
331 log.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
332 log.info('Cluster new master is %s'%new_master_ip)
333 return True
334
A R Karthick3b2e0372016-12-14 17:37:43 -0800335 def cluster_controller_restarts(self, graceful = False):
A R Karthick1f908202016-11-16 17:32:20 -0800336 controllers = self.get_controllers()
337 ctlr_len = len(controllers)
338 if ctlr_len <= 1:
339 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
340 assert_greater(ctlr_len, 1)
341
342 #this call would verify the cluster for once
343 onos_map = self.get_cluster_container_names_ips()
344
A R Karthickec2db322016-11-17 15:06:01 -0800345 def check_exception(controller = None):
A R Karthick1f908202016-11-16 17:32:20 -0800346 adjacent_controller = None
347 adjacent_controllers = None
348 if controller:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800349 adjacent_controllers = list(set(controllers) - set([controller]))
350 adjacent_controller = adjacent_controllers[0]
A R Karthick1f908202016-11-16 17:32:20 -0800351 for node in controllers:
352 onosLog = OnosLog(host = node)
353 ##check the logs for storage exception
354 _, output = onosLog.get_log(('ERROR', 'Exception',))
A R Karthickec2db322016-11-17 15:06:01 -0800355 if output and output.find('StorageException$Timeout') >= 0:
356 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
357 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
358 log.info('\n' + '-' * 50 + '\n')
A R Karthick1f908202016-11-16 17:32:20 -0800359 log.info('%s' %output)
A R Karthickec2db322016-11-17 15:06:01 -0800360 log.info('\n' + '-' * 50 + '\n')
361 failed = self.verify_leaders(controllers)
362 if failed:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800363 log.info('Leaders command failed on nodes: %s' %failed)
A R Karthickec2db322016-11-17 15:06:01 -0800364 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800365 return controller
366
367 try:
A R Karthickec2db322016-11-17 15:06:01 -0800368 ips = self.get_cluster_current_member_ips(controller = adjacent_controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800369 log.info('ONOS cluster formed with controllers: %s' %ips)
A R Karthick1f908202016-11-16 17:32:20 -0800370 st = True
371 except:
372 st = False
373
A R Karthickec2db322016-11-17 15:06:01 -0800374 failed = self.verify_leaders(controllers)
A R Karthick1f908202016-11-16 17:32:20 -0800375 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800376 if st is False:
377 log.info('No storage exception and ONOS cluster was not formed successfully')
378 else:
379 controller = None
380
381 return controller
382
383 next_controller = None
384 tries = 10
385 for num in range(tries):
386 index = num % ctlr_len
387 #index = random.randrange(0, ctlr_len)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800388 controller_name = onos_map[controllers[index]] if next_controller is None else onos_map[next_controller]
389 controller = onos_map[controller_name]
390 log.info('ITERATION: %d. Restarting Controller %s' %(num + 1, controller_name))
A R Karthick1f908202016-11-16 17:32:20 -0800391 try:
A R Karthickef1232d2016-12-07 09:18:15 -0800392 #enable debug log for the other controllers before restarting this controller
A R Karthicke14fc022016-12-08 14:50:29 -0800393 adjacent_controllers = list( set(controllers) - set([controller]) )
394 self.log_set(controllers = adjacent_controllers)
395 self.log_set(app = 'io.atomix', controllers = adjacent_controllers)
A R Karthick3b2e0372016-12-14 17:37:43 -0800396 if graceful is True:
397 self.onos_shutdown(controller)
398 cord_test_onos_restart(node = controller, timeout = 0)
A R Karthicke14fc022016-12-08 14:50:29 -0800399 self.log_set(controllers = controller)
400 self.log_set(app = 'io.atomix', controllers = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800401 time.sleep(60)
A R Karthick1f908202016-11-16 17:32:20 -0800402 except:
403 time.sleep(5)
404 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800405
406 #first archive the test case logs for this run
A R Karthick3b2e0372016-12-14 17:37:43 -0800407 CordLogger.archive_results(self._testMethodName,
A R Karthicke8935c62016-12-08 18:17:17 -0800408 controllers = controllers,
409 iteration = 'iteration_{}'.format(num+1))
A R Karthickec2db322016-11-17 15:06:01 -0800410 next_controller = check_exception(controller = controller)
A R Karthick1f908202016-11-16 17:32:20 -0800411
A R Karthick3b2e0372016-12-14 17:37:43 -0800412 def test_cluster_controller_restarts(self):
413 '''Test the cluster by repeatedly killing the controllers'''
414 self.cluster_controller_restarts()
415
416 def test_cluster_graceful_controller_restarts(self):
417 '''Test the cluster by repeatedly restarting the controllers gracefully'''
418 self.cluster_controller_restarts(graceful = True)
419
A.R Karthick45ab3e12016-11-30 11:25:51 -0800420 def test_cluster_single_controller_restarts(self):
421 '''Test the cluster by repeatedly restarting the same controller'''
422 controllers = self.get_controllers()
423 ctlr_len = len(controllers)
424 if ctlr_len <= 1:
425 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
426 assert_greater(ctlr_len, 1)
427
428 #this call would verify the cluster for once
429 onos_map = self.get_cluster_container_names_ips()
430
431 def check_exception(controller, inclusive = False):
432 adjacent_controllers = list(set(controllers) - set([controller]))
433 adjacent_controller = adjacent_controllers[0]
434 controller_list = adjacent_controllers if inclusive == False else controllers
435 storage_exceptions = []
436 for node in controller_list:
437 onosLog = OnosLog(host = node)
438 ##check the logs for storage exception
439 _, output = onosLog.get_log(('ERROR', 'Exception',))
440 if output and output.find('StorageException$Timeout') >= 0:
441 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
442 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
443 log.info('\n' + '-' * 50 + '\n')
444 log.info('%s' %output)
445 log.info('\n' + '-' * 50 + '\n')
446 storage_exceptions.append(node)
447
448 failed = self.verify_leaders(controller_list)
449 if failed:
450 log.info('Leaders command failed on nodes: %s' %failed)
451 if storage_exceptions:
452 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
453 assert_equal(len(failed), 0)
454 return controller
455
456 for ctlr in controller_list:
457 ips = self.get_cluster_current_member_ips(controller = ctlr,
458 nodes_filter = \
459 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
460 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
461 if controller in ips and inclusive is False:
462 log.info('Controller %s still ACTIVE on Node %s after it was shutdown' %(controller, ctlr))
463 if controller not in ips and inclusive is True:
A R Karthick6cc8b812016-12-09 10:24:40 -0800464 log.info('Controller %s still INACTIVE on Node %s after it was restarted' %(controller, ctlr))
A.R Karthick45ab3e12016-11-30 11:25:51 -0800465
466 return controller
467
468 tries = 10
469 #chose a random controller for shutdown/restarts
470 controller = controllers[random.randrange(0, ctlr_len)]
471 controller_name = onos_map[controller]
A R Karthick6cc8b812016-12-09 10:24:40 -0800472 ##enable the log level for the controllers
473 self.log_set(controllers = controllers)
474 self.log_set(app = 'io.atomix', controllers = controllers)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800475 for num in range(tries):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800476 log.info('ITERATION: %d. Shutting down Controller %s' %(num + 1, controller_name))
477 try:
A R Karthick3b2e0372016-12-14 17:37:43 -0800478 cord_test_onos_shutdown(node = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800479 time.sleep(20)
480 except:
481 time.sleep(5)
482 continue
483 #check for exceptions on the adjacent nodes
484 check_exception(controller)
485 #Now restart the controller back
486 log.info('Restarting back the controller %s' %controller_name)
A R Karthick3b2e0372016-12-14 17:37:43 -0800487 cord_test_onos_restart(node = controller)
A R Karthick6cc8b812016-12-09 10:24:40 -0800488 self.log_set(controllers = controller)
489 self.log_set(app = 'io.atomix', controllers = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800490 time.sleep(60)
A R Karthicke8935c62016-12-08 18:17:17 -0800491 #archive the logs for this run
492 CordLogger.archive_results('test_cluster_single_controller_restarts',
493 controllers = controllers,
494 iteration = 'iteration_{}'.format(num+1))
A.R Karthick45ab3e12016-11-30 11:25:51 -0800495 check_exception(controller, inclusive = True)
496
A.R Karthick2560f042016-11-30 14:38:52 -0800497 def test_cluster_restarts(self):
498 '''Test the cluster by repeatedly restarting the entire cluster'''
499 controllers = self.get_controllers()
500 ctlr_len = len(controllers)
501 if ctlr_len <= 1:
502 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
503 assert_greater(ctlr_len, 1)
504
505 #this call would verify the cluster for once
506 onos_map = self.get_cluster_container_names_ips()
507
508 def check_exception():
509 controller_list = controllers
510 storage_exceptions = []
511 for node in controller_list:
512 onosLog = OnosLog(host = node)
513 ##check the logs for storage exception
514 _, output = onosLog.get_log(('ERROR', 'Exception',))
515 if output and output.find('StorageException$Timeout') >= 0:
516 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
517 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
518 log.info('\n' + '-' * 50 + '\n')
519 log.info('%s' %output)
520 log.info('\n' + '-' * 50 + '\n')
521 storage_exceptions.append(node)
522
523 failed = self.verify_leaders(controller_list)
524 if failed:
525 log.info('Leaders command failed on nodes: %s' %failed)
526 if storage_exceptions:
527 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
528 assert_equal(len(failed), 0)
529 return
530
531 for ctlr in controller_list:
532 ips = self.get_cluster_current_member_ips(controller = ctlr,
533 nodes_filter = \
534 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
535 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
536 assert_equal(len(ips), len(controllers))
537
538 tries = 10
539 for num in range(tries):
540 log.info('ITERATION: %d. Restarting cluster with controllers at %s' %(num+1, controllers))
541 try:
542 cord_test_restart_cluster()
A R Karthick6cc8b812016-12-09 10:24:40 -0800543 self.log_set(controllers = controllers)
544 self.log_set(app = 'io.atomix', controllers = controllers)
A.R Karthick2560f042016-11-30 14:38:52 -0800545 log.info('Delaying before verifying cluster status')
546 time.sleep(60)
547 except:
548 time.sleep(10)
549 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800550
551 #archive the logs for this run before verification
552 CordLogger.archive_results('test_cluster_restarts',
553 controllers = controllers,
554 iteration = 'iteration_{}'.format(num+1))
A.R Karthick2560f042016-11-30 14:38:52 -0800555 #check for exceptions on the adjacent nodes
556 check_exception()
557
ChetanGaonker2099d722016-10-07 15:16:58 -0700558 #pass
ChetanGaonker689b3862016-10-17 16:25:01 -0700559 def test_cluster_formation_and_verification(self,onos_instances = ONOS_INSTANCES):
560 status = self.verify_cluster_status(onos_instances = onos_instances)
561 assert_equal(status, True)
562 log.info('Cluster exists with %d ONOS instances'%onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -0700563
564 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700565 def test_cluster_adding_members(self, add = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700566 status = self.verify_cluster_status(onos_instances = onos_instances)
567 assert_equal(status, True)
568 onos_ips = self.get_cluster_current_member_ips()
569 onos_instances = len(onos_ips)+add
570 log.info('Adding %d nodes to the ONOS cluster' %add)
571 cord_test_onos_add_cluster(count = add)
572 status = self.verify_cluster_status(onos_instances=onos_instances)
573 assert_equal(status, True)
574
ChetanGaonker689b3862016-10-17 16:25:01 -0700575 def test_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700576 status = self.verify_cluster_status(onos_instances = onos_instances)
577 assert_equal(status, True)
578 master, standbys = self.get_cluster_current_master_standbys()
579 assert_equal(len(standbys),(onos_instances-1))
580 onos_names_ips = self.get_cluster_container_names_ips()
581 master_onos_name = onos_names_ips[master]
582 log.info('Removing cluster current master %s'%(master))
A R Karthick3b2e0372016-12-14 17:37:43 -0800583 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700584 time.sleep(60)
585 onos_instances -= 1
586 status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0])
587 assert_equal(status, True)
588 new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
589 assert_not_equal(master,new_master)
ChetanGaonker689b3862016-10-17 16:25:01 -0700590 log.info('Successfully removed clusters master instance')
ChetanGaonker2099d722016-10-07 15:16:58 -0700591
ChetanGaonker689b3862016-10-17 16:25:01 -0700592 def test_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700593 status = self.verify_cluster_status(onos_instances = onos_instances)
594 assert_equal(status, True)
595 master, standbys = self.get_cluster_current_master_standbys()
596 assert_equal(len(standbys),(onos_instances-1))
597 onos_names_ips = self.get_cluster_container_names_ips()
598 member_onos_name = onos_names_ips[standbys[0]]
599 log.info('Removing cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800600 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700601 time.sleep(60)
602 onos_instances -= 1
603 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
604 assert_equal(status, True)
605
ChetanGaonker689b3862016-10-17 16:25:01 -0700606 def test_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700607 status = self.verify_cluster_status(onos_instances = onos_instances)
608 assert_equal(status, True)
609 master, standbys = self.get_cluster_current_master_standbys()
610 assert_equal(len(standbys),(onos_instances-1))
611 onos_names_ips = self.get_cluster_container_names_ips()
612 member1_onos_name = onos_names_ips[standbys[0]]
613 member2_onos_name = onos_names_ips[standbys[1]]
614 log.info('Removing cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800615 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700616 log.info('Removing cluster member %s'%standbys[1])
A R Karthick3b2e0372016-12-14 17:37:43 -0800617 cord_test_onos_shutdown(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700618 time.sleep(60)
619 onos_instances = onos_instances - 2
620 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
621 assert_equal(status, True)
622
ChetanGaonker689b3862016-10-17 16:25:01 -0700623 def test_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700624 status = self.verify_cluster_status(onos_instances = onos_instances)
625 assert_equal(status, True)
626 master, standbys = self.get_cluster_current_master_standbys()
627 assert_equal(len(standbys),(onos_instances-1))
628 onos_names_ips = self.get_cluster_container_names_ips()
629 for i in range(remove):
630 member_onos_name = onos_names_ips[standbys[i]]
631 log.info('Removing onos container with name %s'%standbys[i])
A R Karthick3b2e0372016-12-14 17:37:43 -0800632 cord_test_onos_shutdown(node = standbys[i])
ChetanGaonker2099d722016-10-07 15:16:58 -0700633 time.sleep(60)
634 onos_instances = onos_instances - remove
635 status = self.verify_cluster_status(onos_instances = onos_instances, controller=master)
636 assert_equal(status, True)
637
638 #nottest test cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700639 def test_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES , add = 2, remove = 2):
ChetanGaonker2099d722016-10-07 15:16:58 -0700640 status = self.verify_cluster_status(onos_instances = onos_instances)
641 assert_equal(status, True)
642 onos_ips = self.get_cluster_current_member_ips()
643 onos_instances = len(onos_ips)+add
644 log.info('Adding %d ONOS instances to the cluster'%add)
645 cord_test_onos_add_cluster(count = add)
646 status = self.verify_cluster_status(onos_instances=onos_instances)
647 assert_equal(status, True)
648 log.info('Removing %d ONOS instances from the cluster'%remove)
649 for i in range(remove):
650 name = '{}-{}'.format(Onos.NAME, onos_instances - i)
651 log.info('Removing onos container with name %s'%name)
652 cord_test_onos_shutdown(node = name)
653 time.sleep(60)
654 onos_instances = onos_instances-remove
655 status = self.verify_cluster_status(onos_instances=onos_instances)
656 assert_equal(status, True)
657
658 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700659 def test_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
ChetanGaonker2099d722016-10-07 15:16:58 -0700660 status = self.verify_cluster_status(onos_instances = onos_instances)
661 assert_equal(status, True)
662 onos_ips = self.get_cluster_current_member_ips()
663 onos_instances = onos_instances-remove
664 log.info('Removing %d ONOS instances from the cluster'%remove)
665 for i in range(remove):
666 name = '{}-{}'.format(Onos.NAME, len(onos_ips)-i)
667 log.info('Removing onos container with name %s'%name)
668 cord_test_onos_shutdown(node = name)
669 time.sleep(60)
670 status = self.verify_cluster_status(onos_instances=onos_instances)
671 assert_equal(status, True)
672 log.info('Adding %d ONOS instances to the cluster'%add)
673 cord_test_onos_add_cluster(count = add)
674 onos_instances = onos_instances+add
675 status = self.verify_cluster_status(onos_instances=onos_instances)
676 assert_equal(status, True)
677
ChetanGaonker689b3862016-10-17 16:25:01 -0700678 def test_cluster_restart(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700679 status = self.verify_cluster_status(onos_instances = onos_instances)
680 assert_equal(status, True)
681 log.info('Restarting cluster')
682 cord_test_onos_restart()
683 status = self.verify_cluster_status(onos_instances = onos_instances)
684 assert_equal(status, True)
685
ChetanGaonker689b3862016-10-17 16:25:01 -0700686 def test_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700687 status = self.verify_cluster_status(onos_instances = onos_instances)
688 assert_equal(status, True)
689 master, standbys = self.get_cluster_current_master_standbys()
690 onos_names_ips = self.get_cluster_container_names_ips()
691 master_onos_name = onos_names_ips[master]
692 log.info('Restarting cluster master %s'%master)
A R Karthick3b2e0372016-12-14 17:37:43 -0800693 cord_test_onos_restart(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700694 status = self.verify_cluster_status(onos_instances = onos_instances)
695 assert_equal(status, True)
696 log.info('Cluster came up after master restart as expected')
697
698 #test fail. master changing after restart. Need to check correct behavior.
ChetanGaonker689b3862016-10-17 16:25:01 -0700699 def test_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700700 status = self.verify_cluster_status(onos_instances = onos_instances)
701 assert_equal(status, True)
702 master1, standbys = self.get_cluster_current_master_standbys()
703 onos_names_ips = self.get_cluster_container_names_ips()
704 master_onos_name = onos_names_ips[master1]
A R Karthick3b2e0372016-12-14 17:37:43 -0800705 log.info('Restarting cluster master %s'%master1)
706 cord_test_onos_restart(node = master1)
ChetanGaonker2099d722016-10-07 15:16:58 -0700707 status = self.verify_cluster_status(onos_instances = onos_instances)
708 assert_equal(status, True)
709 master2, standbys = self.get_cluster_current_master_standbys()
710 assert_equal(master1,master2)
711 log.info('Cluster master is same before and after cluster master restart as expected')
712
ChetanGaonker689b3862016-10-17 16:25:01 -0700713 def test_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700714 status = self.verify_cluster_status(onos_instances = onos_instances)
715 assert_equal(status, True)
716 master, standbys = self.get_cluster_current_master_standbys()
717 assert_equal(len(standbys),(onos_instances-1))
718 onos_names_ips = self.get_cluster_container_names_ips()
719 member_onos_name = onos_names_ips[standbys[0]]
720 log.info('Restarting cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800721 cord_test_onos_restart(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700722 status = self.verify_cluster_status(onos_instances = onos_instances)
723 assert_equal(status, True)
724 log.info('Cluster came up as expected after restarting one member')
725
ChetanGaonker689b3862016-10-17 16:25:01 -0700726 def test_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700727 status = self.verify_cluster_status(onos_instances = onos_instances)
728 assert_equal(status, True)
729 master, standbys = self.get_cluster_current_master_standbys()
730 assert_equal(len(standbys),(onos_instances-1))
731 onos_names_ips = self.get_cluster_container_names_ips()
732 member1_onos_name = onos_names_ips[standbys[0]]
733 member2_onos_name = onos_names_ips[standbys[1]]
734 log.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1]))
A R Karthick3b2e0372016-12-14 17:37:43 -0800735 cord_test_onos_restart(node = standbys[0])
736 cord_test_onos_restart(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700737 status = self.verify_cluster_status(onos_instances = onos_instances)
738 assert_equal(status, True)
739 log.info('Cluster came up as expected after restarting two members')
740
ChetanGaonker689b3862016-10-17 16:25:01 -0700741 def test_cluster_state_with_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700742 status = self.verify_cluster_status(onos_instances = onos_instances)
743 assert_equal(status,True)
744 master, standbys = self.get_cluster_current_master_standbys()
745 assert_equal(len(standbys),(onos_instances-1))
746 onos_names_ips = self.get_cluster_container_names_ips()
747 for i in range(members):
748 member_onos_name = onos_names_ips[standbys[i]]
749 log.info('Restarting cluster member %s'%standbys[i])
A R Karthick3b2e0372016-12-14 17:37:43 -0800750 cord_test_onos_restart(node = standbys[i])
ChetanGaonker2099d722016-10-07 15:16:58 -0700751
752 status = self.verify_cluster_status(onos_instances = onos_instances)
753 assert_equal(status, True)
754 log.info('Cluster came up as expected after restarting %d members'%members)
755
ChetanGaonker689b3862016-10-17 16:25:01 -0700756 def test_cluster_state_with_master_change(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700757 status = self.verify_cluster_status(onos_instances=onos_instances)
758 assert_equal(status, True)
759 master, standbys = self.get_cluster_current_master_standbys()
760 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -0700761 log.info('Cluster current master of devices is %s'%master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700762 self.change_master_current_cluster(new_master=standbys[0])
763 log.info('Cluster master changed successfully')
764
765 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700766 def test_cluster_with_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700767 status = self.verify_cluster_status(onos_instances = onos_instances)
768 assert_equal(status, True)
769 onos_ips = self.get_cluster_current_member_ips()
770 self.vrouter.setUpClass()
771 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
772 assert_equal(res, True)
773 for onos_ip in onos_ips:
774 tries = 0
775 flag = False
776 try:
777 self.cliEnter(controller = onos_ip)
778 while tries <= 5:
779 routes = json.loads(self.cli.routes(jsonFormat = True))
780 if routes:
781 assert_equal(len(routes['routes4']), networks)
782 self.cliExit()
783 flag = True
784 break
785 else:
786 tries += 1
787 time.sleep(1)
788 assert_equal(flag, True)
789 except:
790 log.info('Exception occured while checking routes in onos instance %s'%onos_ip)
791 raise
792
793 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700794 def test_cluster_with_vrouter_and_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700795 status = self.verify_cluster_status(onos_instances = onos_instances)
796 assert_equal(status, True)
797 onos_ips = self.get_cluster_current_member_ips()
798 master, standbys = self.get_cluster_current_master_standbys()
799 onos_names_ips = self.get_cluster_container_names_ips()
800 master_onos_name = onos_names_ips[master]
801 self.vrouter.setUpClass()
802 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
803 assert_equal(res,True)
A R Karthick3b2e0372016-12-14 17:37:43 -0800804 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700805 time.sleep(60)
ChetanGaonker689b3862016-10-17 16:25:01 -0700806 log.info('Verifying vrouter traffic after cluster master is down')
ChetanGaonker2099d722016-10-07 15:16:58 -0700807 self.vrouter.vrouter_traffic_verify()
808
809 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700810 def test_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700811 status = self.verify_cluster_status(onos_instances = onos_instances)
812 assert_equal(status, True)
813 onos_ips = self.get_cluster_current_member_ips()
814 master, standbys = self.get_cluster_current_master_standbys()
815 onos_names_ips = self.get_cluster_container_names_ips()
816 master_onos_name = onos_names_ips[master]
817 self.vrouter.setUpClass()
818 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
819 assert_equal(res, True)
820 cord_test_onos_restart()
821 self.vrouter.vrouter_traffic_verify()
822
823 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700824 def test_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700825 status = self.verify_cluster_status(onos_instances = onos_instances)
826 assert_equal(status, True)
827 self.vrouter.setUpClass()
828 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
829 assert_equal(res, True)
830 self.vrouter.vrouter_activate(deactivate=True)
831 time.sleep(15)
832 self.vrouter.vrouter_traffic_verify(positive_test=False)
833 self.vrouter.vrouter_activate(deactivate=False)
834
835 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700836 def test_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700837 status = self.verify_cluster_status(onos_instances = onos_instances)
838 assert_equal(status, True)
839 master, standbys = self.get_cluster_current_master_standbys()
840 onos_names_ips = self.get_cluster_container_names_ips()
841 master_onos_name = onos_names_ips[master]
842 self.vrouter.setUpClass()
843 log.info('Verifying vrouter before master down')
844 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
845 assert_equal(res, True)
846 self.vrouter.vrouter_activate(deactivate=True)
847 log.info('Verifying vrouter traffic after app deactivated')
848 time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down
849 self.vrouter.vrouter_traffic_verify(positive_test=False)
850 log.info('Verifying vrouter traffic after master down')
A R Karthick3b2e0372016-12-14 17:37:43 -0800851 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700852 time.sleep(60)
853 self.vrouter.vrouter_traffic_verify(positive_test=False)
854 self.vrouter.vrouter_activate(deactivate=False)
855
856 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700857 def test_cluster_for_vrouter_app_and_making_member_down(self, networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700858 status = self.verify_cluster_status(onos_instances = onos_instances)
859 assert_equal(status, True)
860 master, standbys = self.get_cluster_current_master_standbys()
861 onos_names_ips = self.get_cluster_container_names_ips()
862 member_onos_name = onos_names_ips[standbys[0]]
863 self.vrouter.setUpClass()
864 log.info('Verifying vrouter before cluster member down')
865 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
866 assert_equal(res, True) # Expecting vrouter should work properly
867 log.info('Verifying vrouter after cluster member down')
A R Karthick3b2e0372016-12-14 17:37:43 -0800868 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700869 time.sleep(60)
870 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
871
872 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700873 def test_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700874 status = self.verify_cluster_status(onos_instances = onos_instances)
875 assert_equal(status, True)
876 master, standbys = self.get_cluster_current_master_standbys()
877 onos_names_ips = self.get_cluster_container_names_ips()
878 member_onos_name = onos_names_ips[standbys[1]]
879 self.vrouter.setUpClass()
880 log.info('Verifying vrouter traffic before cluster member restart')
881 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
882 assert_equal(res, True) # Expecting vrouter should work properly
A R Karthick3b2e0372016-12-14 17:37:43 -0800883 cord_test_onos_restart(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700884 log.info('Verifying vrouter traffic after cluster member restart')
885 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
886
887 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700888 def test_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700889 status = self.verify_cluster_status(onos_instances = onos_instances)
890 assert_equal(status, True)
891 self.vrouter.setUpClass()
892 log.info('Verifying vrouter traffic before cluster restart')
893 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
894 assert_equal(res, True) # Expecting vrouter should work properly
895 cord_test_onos_restart()
896 log.info('Verifying vrouter traffic after cluster restart')
897 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
898
899
900 #test fails because flow state is in pending_add in onos
ChetanGaonker689b3862016-10-17 16:25:01 -0700901 def test_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700902 status = self.verify_cluster_status(onos_instances = onos_instances)
903 assert_equal(status, True)
904 master, standbys = self.get_cluster_current_master_standbys()
905 onos_names_ips = self.get_cluster_container_names_ips()
906 master_onos_name = onos_names_ips[master]
907 self.flows.setUpClass()
908 egress = 1
909 ingress = 2
910 egress_map = { 'ip': '192.168.30.1', 'udp_port': 9500 }
911 ingress_map = { 'ip': '192.168.40.1', 'udp_port': 9000 }
912 flow = OnosFlowCtrl(deviceId = self.device_id,
913 egressPort = egress,
914 ingressPort = ingress,
915 udpSrc = ingress_map['udp_port'],
916 udpDst = egress_map['udp_port'],
917 controller=master
918 )
919 result = flow.addFlow()
920 assert_equal(result, True)
921 time.sleep(1)
922 self.success = False
923 def mac_recv_task():
924 def recv_cb(pkt):
925 log.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
926 self.success = True
927 sniff(timeout=2,
928 lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
929 and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.flows.port_map[egress])
930
931 for i in [0,1]:
932 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -0800933 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700934 log.info('Verifying flows traffic after master killed')
935 time.sleep(45)
936 else:
937 log.info('Verifying flows traffic before master killed')
938 t = threading.Thread(target = mac_recv_task)
939 t.start()
940 L2 = self.flows_eth #Ether(src = ingress_map['ether'], dst = egress_map['ether'])
941 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
942 L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
943 pkt = L2/L3/L4
944 log.info('Sending packets to verify if flows are correct')
945 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
946 t.join()
947 assert_equal(self.success, True)
948
ChetanGaonker689b3862016-10-17 16:25:01 -0700949 def test_cluster_state_changing_master_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700950 status = self.verify_cluster_status(onos_instances=onos_instances)
951 assert_equal(status, True)
952 master, standbys = self.get_cluster_current_master_standbys()
953 self.flows.setUpClass()
954 egress = 1
955 ingress = 2
956 egress_map = { 'ip': '192.168.30.1' }
957 ingress_map = { 'ip': '192.168.40.1' }
958 flow = OnosFlowCtrl(deviceId = self.device_id,
959 egressPort = egress,
960 ingressPort = ingress,
961 ecn = 1,
962 controller=master
963 )
964 result = flow.addFlow()
965 assert_equal(result, True)
966 ##wait for flows to be added to ONOS
967 time.sleep(1)
968 self.success = False
969 def mac_recv_task():
970 def recv_cb(pkt):
971 log.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
972 self.success = True
973 sniff(count=2, timeout=5,
974 lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
975 and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb,
976 iface = self.flows.port_map[egress])
977 for i in [0,1]:
978 if i == 1:
979 log.info('Changing cluster master to %s'%standbys[0])
980 self.change_master_current_cluster(new_master=standbys[0])
981 log.info('Verifying flow traffic after cluster master chnaged')
982 else:
983 log.info('Verifying flow traffic before cluster master changed')
984 t = threading.Thread(target = mac_recv_task)
985 t.start()
986 L2 = self.flows_eth # Ether(src = ingress_map['ether'], dst = egress_map['ether'])
987 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1)
988 pkt = L2/L3
989 log.info('Sending a packet to verify if flows are correct')
990 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
991 t.join()
992 assert_equal(self.success, True)
993
ChetanGaonker689b3862016-10-17 16:25:01 -0700994 #pass
995 def test_cluster_flow_for_ipv6_extension_header_and_master_restart(self,onos_instances = ONOS_INSTANCES):
996 status = self.verify_cluster_status(onos_instances=onos_instances)
997 assert_equal(status, True)
998 master,standbys = self.get_cluster_current_master_standbys()
999 onos_names_ips = self.get_cluster_container_names_ips()
1000 master_onos_name = onos_names_ips[master]
1001 self.flows.setUpClass()
1002 egress = 1
1003 ingress = 2
1004 egress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
1005 ingress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
1006 flow = OnosFlowCtrl(deviceId = self.device_id,
1007 egressPort = egress,
1008 ingressPort = ingress,
1009 ipv6_extension = 0,
1010 controller=master
1011 )
1012
1013 result = flow.addFlow()
1014 assert_equal(result, True)
1015 ##wait for flows to be added to ONOS
1016 time.sleep(1)
1017 self.success = False
1018 def mac_recv_task():
1019 def recv_cb(pkt):
1020 log.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s'%(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
1021 self.success = True
1022 sniff(timeout=2,count=5,
1023 lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.flows.port_map[egress])
1024 for i in [0,1]:
1025 if i == 1:
1026 log.info('Restart cluster current master %s'%master)
1027 Container(master_onos_name,Onos.IMAGE).restart()
1028 time.sleep(45)
1029 log.info('Verifying flow traffic after master restart')
1030 else:
1031 log.info('Verifying flow traffic before master restart')
1032 t = threading.Thread(target = mac_recv_task)
1033 t.start()
1034 L2 = self.flows_eth
1035 L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
1036 pkt = L2/L3
1037 log.info('Sending packets to verify if flows are correct')
1038 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
1039 t.join()
1040 assert_equal(self.success, True)
1041
1042 def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
1043 dst_mac = self.igmp.iptomac(group)
1044 eth = Ether(dst= dst_mac)
1045 ip = IP(dst=group,src=source)
1046 data = repr(monotonic.monotonic())
1047 sendp(eth/ip/data,count=20, iface = intf)
1048 pkt = (eth/ip/data)
1049 log.info('multicast traffic packet %s'%pkt.show())
1050
1051 def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
1052 log.info('verifying multicast traffic for group %s from source %s'%(group,source))
1053 self.success = False
1054 def recv_task():
1055 def igmp_recv_cb(pkt):
1056 log.info('multicast data received for group %s from source %s'%(group,source))
1057 self.success = True
1058 sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
1059 t = threading.Thread(target = recv_task)
1060 t.start()
1061 self.send_multicast_data_traffic(group,source=source)
1062 t.join()
1063 return self.success
1064
1065 #pass
1066 def test_cluster_with_igmp_include_exclude_modes_and_restarting_master(self, onos_instances=ONOS_INSTANCES):
1067 status = self.verify_cluster_status(onos_instances=onos_instances)
1068 assert_equal(status, True)
1069 master, standbys = self.get_cluster_current_master_standbys()
1070 assert_equal(len(standbys), (onos_instances-1))
1071 onos_names_ips = self.get_cluster_container_names_ips()
1072 master_onos_name = onos_names_ips[master]
1073 self.igmp.setUp(controller=master)
1074 groups = ['224.2.3.4','230.5.6.7']
1075 src_list = ['2.2.2.2','3.3.3.3']
1076 self.igmp.onos_ssm_table_load(groups, src_list=src_list, controller=master)
1077 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1078 iface = self.V_INF1, delay = 2)
1079 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1080 iface = self.V_INF1, delay = 2)
1081 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1082 assert_equal(status,True)
1083 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1084 assert_equal(status,False)
1085 log.info('restarting cluster master %s'%master)
1086 Container(master_onos_name,Onos.IMAGE).restart()
1087 time.sleep(60)
1088 log.info('verifying multicast data traffic after master restart')
1089 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1090 assert_equal(status,True)
1091 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1092 assert_equal(status,False)
1093
1094 #pass
1095 def test_cluster_with_igmp_include_exclude_modes_and_making_master_down(self, onos_instances=ONOS_INSTANCES):
1096 status = self.verify_cluster_status(onos_instances=onos_instances)
1097 assert_equal(status, True)
1098 master, standbys = self.get_cluster_current_master_standbys()
1099 assert_equal(len(standbys), (onos_instances-1))
1100 onos_names_ips = self.get_cluster_container_names_ips()
1101 master_onos_name = onos_names_ips[master]
1102 self.igmp.setUp(controller=master)
1103 groups = [self.igmp.random_mcast_ip(),self.igmp.random_mcast_ip()]
1104 src_list = [self.igmp.randomsourceip()]
1105 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1106 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1107 iface = self.V_INF1, delay = 2)
1108 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1109 iface = self.V_INF1, delay = 2)
1110 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1111 assert_equal(status,True)
1112 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1113 assert_equal(status,False)
1114 log.info('Killing cluster master %s'%master)
1115 Container(master_onos_name,Onos.IMAGE).kill()
1116 time.sleep(60)
1117 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=standbys[0])
1118 assert_equal(status, True)
1119 log.info('Verifying multicast data traffic after cluster master down')
1120 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1121 assert_equal(status,True)
1122 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1123 assert_equal(status,False)
1124
1125 def test_cluster_with_igmp_include_mode_checking_traffic_recovery_time_after_master_is_down(self, onos_instances=ONOS_INSTANCES):
1126 status = self.verify_cluster_status(onos_instances=onos_instances)
1127 assert_equal(status, True)
1128 master, standbys = self.get_cluster_current_master_standbys()
1129 assert_equal(len(standbys), (onos_instances-1))
1130 onos_names_ips = self.get_cluster_container_names_ips()
1131 master_onos_name = onos_names_ips[master]
1132 self.igmp.setUp(controller=master)
1133 groups = [self.igmp.random_mcast_ip()]
1134 src_list = [self.igmp.randomsourceip()]
1135 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1136 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1137 iface = self.V_INF1, delay = 2)
1138 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1139 assert_equal(status,True)
1140 log.info('Killing clusters master %s'%master)
1141 Container(master_onos_name,Onos.IMAGE).kill()
1142 count = 0
1143 for i in range(60):
1144 log.info('Verifying multicast data traffic after cluster master down')
1145 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1146 if status:
1147 break
1148 else:
1149 count += 1
1150 time.sleep(1)
1151 assert_equal(status, True)
1152 log.info('Time taken to recover traffic after clusters master down is %d seconds'%count)
1153
1154
1155 #pass
1156 def test_cluster_state_with_igmp_leave_group_after_master_change(self, onos_instances=ONOS_INSTANCES):
1157 status = self.verify_cluster_status(onos_instances=onos_instances)
1158 assert_equal(status, True)
1159 master, standbys = self.get_cluster_current_master_standbys()
1160 assert_equal(len(standbys), (onos_instances-1))
1161 self.igmp.setUp(controller=master)
1162 groups = [self.igmp.random_mcast_ip()]
1163 src_list = [self.igmp.randomsourceip()]
1164 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1165 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1166 iface = self.V_INF1, delay = 2)
1167 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1168 assert_equal(status,True)
1169 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1170 self.change_cluster_current_master(new_master=standbys[0])
1171 log.info('Verifying multicast traffic after cluster master change')
1172 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1173 assert_equal(status,True)
1174 log.info('Sending igmp TO_EXCLUDE message to leave the group %s'%groups[0])
1175 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
1176 iface = self.V_INF1, delay = 1)
1177 time.sleep(10)
1178 status = self.verify_igmp_data_traffic(groups[0],intf = self.V_INF1,source= src_list[0])
1179 assert_equal(status,False)
1180
1181 #pass
1182 def test_cluster_state_with_igmp_join_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
1183 status = self.verify_cluster_status(onos_instances=onos_instances)
1184 assert_equal(status, True)
1185 master,standbys = self.get_cluster_current_master_standbys()
1186 assert_equal(len(standbys), (onos_instances-1))
1187 self.igmp.setUp(controller=master)
1188 groups = [self.igmp.random_mcast_ip()]
1189 src_list = [self.igmp.randomsourceip()]
1190 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1191 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1192 self.change_cluster_current_master(new_master = standbys[0])
1193 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1194 iface = self.V_INF1, delay = 2)
1195 time.sleep(1)
1196 self.change_cluster_current_master(new_master = master)
1197 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1198 assert_equal(status,True)
1199
1200 #pass
ChetanGaonker2099d722016-10-07 15:16:58 -07001201 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001202 def test_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001203 status = self.verify_cluster_status(onos_instances=onos_instances)
1204 assert_equal(status, True)
1205 master, standbys = self.get_cluster_current_master_standbys()
1206 assert_equal(len(standbys), (onos_instances-1))
1207 self.tls.setUp(controller=master)
1208 df = defer.Deferred()
1209 def eap_tls_verify(df):
1210 tls = TLSAuthTest()
1211 tls.runTest()
1212 df.callback(0)
1213 reactor.callLater(0, eap_tls_verify, df)
1214 return df
1215
1216 @deferred(120)
ChetanGaonker689b3862016-10-17 16:25:01 -07001217 def test_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001218 master, standbys = self.get_cluster_current_master_standbys()
1219 assert_equal(len(standbys), (onos_instances-1))
1220 self.tls.setUp()
1221 df = defer.Deferred()
1222 def eap_tls_verify2(df2):
1223 tls = TLSAuthTest()
1224 tls.runTest()
1225 df.callback(0)
1226 for i in [0,1]:
1227 if i == 1:
1228 log.info('Changing cluster master %s to %s'%(master, standbys[0]))
1229 self.change_master_current_cluster(new_master=standbys[0])
1230 log.info('Verifying tls authentication after cluster master changed to %s'%standbys[0])
1231 else:
1232 log.info('Verifying tls authentication before cluster master change')
1233 reactor.callLater(0, eap_tls_verify, df)
1234 return df
1235
1236 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001237 def test_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001238 status = self.verify_cluster_status(onos_instances=onos_instances)
1239 assert_equal(status, True)
1240 master, standbys = self.get_cluster_current_master_standbys()
1241 assert_equal(len(standbys), (onos_instances-1))
1242 onos_names_ips = self.get_cluster_container_names_ips()
1243 master_onos_name = onos_names_ips[master]
1244 self.tls.setUp()
1245 df = defer.Deferred()
1246 def eap_tls_verify(df):
1247 tls = TLSAuthTest()
1248 tls.runTest()
1249 df.callback(0)
1250 for i in [0,1]:
1251 if i == 1:
1252 log.info('Killing cluster current master %s'%master)
A R Karthick3b2e0372016-12-14 17:37:43 -08001253 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001254 time.sleep(20)
1255 status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True)
1256 assert_equal(status, True)
1257 log.info('Cluster came up with %d instances after killing master'%(onos_instances-1))
1258 log.info('Verifying tls authentication after killing cluster master')
1259 reactor.callLater(0, eap_tls_verify, df)
1260 return df
1261
1262 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001263 def test_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001264 status = self.verify_cluster_status(onos_instances=onos_instances)
1265 assert_equal(status, True)
1266 master, standbys = self.get_cluster_current_master_standbys()
1267 assert_equal(len(standbys), (onos_instances-1))
1268 onos_names_ips = self.get_cluster_container_names_ips()
1269 member_onos_name = onos_names_ips[standbys[0]]
1270 self.tls.setUp()
1271 df = defer.Deferred()
1272 def eap_tls_no_cert(df):
1273 def tls_no_cert_cb():
1274 log.info('TLS authentication failed with no certificate')
1275 tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
1276 tls.runTest()
1277 assert_equal(tls.failTest, True)
1278 df.callback(0)
1279 for i in [0,1]:
1280 if i == 1:
1281 log.info('Restart cluster member %s'%standbys[0])
1282 Container(member_onos_name,Onos.IMAGE).restart()
1283 time.sleep(20)
1284 status = self.verify_cluster_status(onos_instances=onos_instances)
1285 assert_equal(status, True)
1286 log.info('Cluster came up with %d instances after member restart'%(onos_instances))
1287 log.info('Verifying tls authentication after member restart')
1288 reactor.callLater(0, eap_tls_no_cert, df)
1289 return df
1290
ChetanGaonker689b3862016-10-17 16:25:01 -07001291 #pass
1292 def test_cluster_proxyarp_master_change_and_app_deactivation(self,onos_instances=ONOS_INSTANCES,hosts = 3):
1293 status = self.verify_cluster_status(onos_instances=onos_instances)
1294 assert_equal(status,True)
1295 master,standbys = self.get_cluster_current_master_standbys()
1296 assert_equal(len(standbys),(onos_instances-1))
1297 self.proxyarp.setUpClass()
1298 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1299 ingress = hosts+1
1300 for hostip, hostmac in hosts_config:
1301 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1302 time.sleep(1)
1303 log.info('changing cluster current master from %s to %s'%(master,standbys[0]))
1304 self.change_cluster_current_master(new_master=standbys[0])
1305 log.info('verifying proxyarp after master change')
1306 for hostip, hostmac in hosts_config:
1307 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1308 time.sleep(1)
1309 log.info('Deactivating proxyarp app and expecting proxyarp functionality not to work')
1310 self.proxyarp.proxyarp_activate(deactivate = True,controller=standbys[0])
1311 time.sleep(3)
1312 for hostip, hostmac in hosts_config:
1313 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
1314 time.sleep(1)
1315 log.info('activating proxyarp app and expecting to get arp reply from ONOS')
1316 self.proxyarp.proxyarp_activate(deactivate = False,controller=standbys[0])
1317 time.sleep(3)
1318 for hostip, hostmac in hosts_config:
1319 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1320 time.sleep(1)
ChetanGaonker2099d722016-10-07 15:16:58 -07001321
ChetanGaonker689b3862016-10-17 16:25:01 -07001322 #pass
1323 def test_cluster_with_proxyarp_and_one_member_down(self,hosts=3,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001324 status = self.verify_cluster_status(onos_instances=onos_instances)
1325 assert_equal(status, True)
1326 master, standbys = self.get_cluster_current_master_standbys()
ChetanGaonker689b3862016-10-17 16:25:01 -07001327 assert_equal(len(standbys), (onos_instances-1))
1328 onos_names_ips = self.get_cluster_container_names_ips()
1329 member_onos_name = onos_names_ips[standbys[1]]
1330 self.proxyarp.setUpClass()
1331 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1332 ingress = hosts+1
1333 for hostip, hostmac in hosts_config:
1334 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1335 time.sleep(1)
1336 log.info('killing cluster member %s'%standbys[1])
1337 Container(member_onos_name,Onos.IMAGE).kill()
1338 time.sleep(20)
1339 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=master,verify=True)
1340 assert_equal(status, True)
1341 log.info('cluster came up with %d instances after member down'%(onos_instances-1))
1342 log.info('verifying proxy arp functionality after cluster member down')
1343 for hostip, hostmac in hosts_config:
1344 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1345 time.sleep(1)
1346
1347 #pass
1348 def test_cluster_with_proxyarp_and_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts=10,onos_instances=ONOS_INSTANCES):
1349 status = self.verify_cluster_status(onos_instances=onos_instances)
1350 assert_equal(status, True)
1351 self.proxyarp.setUpClass()
1352 master, standbys = self.get_cluster_current_master_standbys()
1353 assert_equal(len(standbys), (onos_instances-1))
1354 ports_map, egress_map, hosts_config = self.proxyarp.proxyarp_config(hosts = hosts, controller=master)
1355 self.success = True
1356 ingress = hosts+1
1357 ports = range(ingress,ingress+10)
1358 hostmac = []
1359 hostip = []
1360 for ip,mac in hosts_config:
1361 hostmac.append(mac)
1362 hostip.append(ip)
1363 success_dir = {}
1364 def verify_proxyarp(*r):
1365 ingress, hostmac, hostip = r[0],r[1],r[2]
1366 def mac_recv_task():
1367 def recv_cb(pkt):
1368 log.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
1369 success_dir[current_thread().name] = True
1370 sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
1371 prn = recv_cb, iface = self.proxyarp.port_map[ingress])
1372 t = threading.Thread(target = mac_recv_task)
1373 t.start()
1374 pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
1375 log.info('Sending arp request for dest ip %s on interface %s' %
1376 (hostip,self.proxyarp.port_map[ingress]))
1377 sendp(pkt, count = 10,iface = self.proxyarp.port_map[ingress])
1378 t.join()
1379 t = []
1380 for i in range(10):
1381 t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
1382 for i in range(10):
1383 t[i].start()
1384 time.sleep(2)
1385 for i in range(10):
1386 t[i].join()
1387 if len(success_dir) != 10:
1388 self.success = False
1389 assert_equal(self.success, True)
1390
1391 #pass
1392 def test_cluster_with_acl_rule_before_master_change_and_remove_acl_rule_after_master_change(self,onos_instances=ONOS_INSTANCES):
1393 status = self.verify_cluster_status(onos_instances=onos_instances)
1394 assert_equal(status, True)
1395 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001396 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001397 self.acl.setUp()
1398 acl_rule = ACLTest()
1399 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1400 if status is False:
1401 log.info('JSON request returned status %d' %code)
1402 assert_equal(status, True)
1403 result = acl_rule.get_acl_rules(controller=master)
1404 aclRules1 = result.json()['aclRules']
1405 log.info('Added acl rules is %s'%aclRules1)
1406 acl_Id = map(lambda d: d['id'], aclRules1)
1407 log.info('Changing cluster current master from %s to %s'%(master,standbys[0]))
1408 self.change_cluster_current_master(new_master=standbys[0])
1409 status,code = acl_rule.remove_acl_rule(acl_Id[0],controller=standbys[0])
1410 if status is False:
1411 log.info('JSON request returned status %d' %code)
1412 assert_equal(status, True)
1413
1414 #pass
1415 def test_cluster_verifying_acl_rule_in_new_master_after_current_master_is_down(self,onos_instances=ONOS_INSTANCES):
1416 status = self.verify_cluster_status(onos_instances=onos_instances)
1417 assert_equal(status, True)
1418 master,standbys = self.get_cluster_current_master_standbys()
1419 assert_equal(len(standbys),(onos_instances-1))
1420 onos_names_ips = self.get_cluster_container_names_ips()
1421 master_onos_name = onos_names_ips[master]
1422 self.acl.setUp()
1423 acl_rule = ACLTest()
1424 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1425 if status is False:
1426 log.info('JSON request returned status %d' %code)
1427 assert_equal(status, True)
1428 result1 = acl_rule.get_acl_rules(controller=master)
1429 aclRules1 = result1.json()['aclRules']
1430 log.info('Added acl rules is %s'%aclRules1)
1431 acl_Id1 = map(lambda d: d['id'], aclRules1)
1432 log.info('Killing cluster current master %s'%master)
1433 Container(master_onos_name,Onos.IMAGE).kill()
1434 time.sleep(45)
1435 status = self.verify_cluster_status(onos_instances=onos_instances,controller=standbys[0])
1436 assert_equal(status, True)
1437 new_master,standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
1438 assert_equal(len(standbys),(onos_instances-2))
1439 assert_not_equal(new_master,master)
1440 result2 = acl_rule.get_acl_rules(controller=new_master)
1441 aclRules2 = result2.json()['aclRules']
1442 acl_Id2 = map(lambda d: d['id'], aclRules2)
1443 log.info('Acl Ids before and after master down are %s and %s'%(acl_Id1,acl_Id2))
1444 assert_equal(acl_Id2,acl_Id1)
1445
1446 #acl traffic scenario not working as acl rule is not getting added to onos
1447 def test_cluster_with_acl_traffic_before_and_after_two_members_down(self,onos_instances=ONOS_INSTANCES):
1448 status = self.verify_cluster_status(onos_instances=onos_instances)
1449 assert_equal(status, True)
1450 master,standbys = self.get_cluster_current_master_standbys()
1451 assert_equal(len(standbys),(onos_instances-1))
1452 onos_names_ips = self.get_cluster_container_names_ips()
1453 member1_onos_name = onos_names_ips[standbys[0]]
1454 member2_onos_name = onos_names_ips[standbys[1]]
1455 ingress = self.acl.ingress_iface
1456 egress = self.acl.CURRENT_PORT_NUM
1457 acl_rule = ACLTest()
1458 status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.acl.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.acl.HOST_DST_IP)
1459 self.acl.CURRENT_PORT_NUM += 1
1460 time.sleep(5)
1461 if status is False:
1462 log.info('JSON request returned status %d' %code)
1463 assert_equal(status, True)
1464 srcMac = '00:00:00:00:00:11'
1465 dstMac = host_ip_mac[0][1]
1466 self.acl.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1, egress_iface_num = egress )
1467 status, code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'deny',controller=master)
1468 time.sleep(10)
1469 if status is False:
1470 log.info('JSON request returned status %d' %code)
1471 assert_equal(status, True)
1472 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1473 log.info('killing cluster members %s and %s'%(standbys[0],standbys[1]))
1474 Container(member1_onos_name, Onos.IMAGE).kill()
1475 Container(member2_onos_name, Onos.IMAGE).kill()
1476 time.sleep(40)
1477 status = self.verify_cluster_status(onos_instances=onos_instances-2,verify=True,controller=master)
1478 assert_equal(status, True)
1479 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1480 self.acl.acl_hosts_remove(egress_iface_count = 1, egress_iface_num = egress)
1481
1482 #pass
1483 def test_cluster_with_dhcpRelay_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1484 status = self.verify_cluster_status(onos_instances=onos_instances)
1485 assert_equal(status, True)
1486 master,standbys = self.get_cluster_current_master_standbys()
1487 assert_equal(len(standbys),(onos_instances-1))
1488 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001489 mac = self.dhcprelay.get_mac(iface)
1490 self.dhcprelay.host_load(iface)
1491 ##we use the defaults for this test that serves as an example for others
1492 ##You don't need to restart dhcpd server if retaining default config
1493 config = self.dhcprelay.default_config
1494 options = self.dhcprelay.default_options
1495 subnet = self.dhcprelay.default_subnet_config
1496 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1497 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1498 config = config,
1499 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001500 subnet = subnet,
1501 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001502 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
1503 cip, sip = self.dhcprelay.send_recv(mac)
1504 log.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
1505 self.change_master_current_cluster(new_master=standbys[0])
1506 log.info('Releasing ip %s to server %s' %(cip, sip))
1507 assert_equal(self.dhcprelay.dhcp.release(cip), True)
1508 log.info('Triggering DHCP discover again after release')
1509 cip2, sip2 = self.dhcprelay.send_recv(mac)
1510 log.info('Verifying released IP was given back on rediscover')
1511 assert_equal(cip, cip2)
1512 log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
1513 assert_equal(self.dhcprelay.dhcp.release(cip2), True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001514 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001515
ChetanGaonker689b3862016-10-17 16:25:01 -07001516
1517 def test_cluster_with_dhcpRelay_and_verify_dhcp_ip_after_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1518 status = self.verify_cluster_status(onos_instances=onos_instances)
1519 assert_equal(status, True)
1520 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001521 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001522 onos_names_ips = self.get_cluster_container_names_ips()
1523 master_onos_name = onos_names_ips[master]
1524 self.dhcprelay.setUpClass(controller=master)
1525 mac = self.dhcprelay.get_mac(iface)
1526 self.dhcprelay.host_load(iface)
1527 ##we use the defaults for this test that serves as an example for others
1528 ##You don't need to restart dhcpd server if retaining default config
1529 config = self.dhcprelay.default_config
1530 options = self.dhcprelay.default_options
1531 subnet = self.dhcprelay.default_subnet_config
1532 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1533 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1534 config = config,
1535 options = options,
1536 subnet = subnet,
1537 controller=master)
1538 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1539 log.info('Initiating dhcp process from client %s'%mac)
1540 cip, sip = self.dhcprelay.send_recv(mac)
1541 log.info('Killing cluster current master %s'%master)
1542 Container(master_onos_name, Onos.IMAGE).kill()
1543 time.sleep(60)
1544 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=standbys[0])
1545 assert_equal(status, True)
1546 mac = self.dhcprelay.dhcp.get_mac(cip)[0]
1547 log.info("Verifying dhcp clients gets same IP after cluster master restarts")
1548 new_cip, new_sip = self.dhcprelay.dhcp.only_request(cip, mac)
1549 assert_equal(new_cip, cip)
1550 self.dhcprelay.tearDownClass(controller=standbys[0])
1551
1552 #pass
1553 def test_cluster_with_dhcpRelay_and_simulate_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1554 status = self.verify_cluster_status(onos_instances=onos_instances)
1555 assert_equal(status, True)
1556 master,standbys = self.get_cluster_current_master_standbys()
1557 assert_equal(len(standbys),(onos_instances-1))
1558 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001559 macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3']
1560 self.dhcprelay.host_load(iface)
1561 ##we use the defaults for this test that serves as an example for others
1562 ##You don't need to restart dhcpd server if retaining default config
1563 config = self.dhcprelay.default_config
1564 options = self.dhcprelay.default_options
1565 subnet = self.dhcprelay.default_subnet_config
1566 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1567 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1568 config = config,
1569 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001570 subnet = subnet,
1571 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001572 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1573 cip1, sip1 = self.dhcprelay.send_recv(macs[0])
1574 assert_not_equal(cip1,None)
1575 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip1,macs[0],master))
1576 log.info('Changing cluster master from %s to %s'%(master, standbys[0]))
1577 self.change_master_current_cluster(new_master=standbys[0])
1578 cip2, sip2 = self.dhcprelay.send_recv(macs[1])
1579 assert_not_equal(cip2,None)
1580 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[1],standbys[0]))
1581 self.change_master_current_cluster(new_master=master)
1582 log.info('Changing cluster master from %s to %s'%(standbys[0],master))
1583 cip3, sip3 = self.dhcprelay.send_recv(macs[2])
1584 assert_not_equal(cip3,None)
1585 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master))
ChetanGaonker689b3862016-10-17 16:25:01 -07001586 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001587
ChetanGaonker689b3862016-10-17 16:25:01 -07001588 def test_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001589 status = self.verify_cluster_status(onos_instances=onos_instances)
1590 assert_equal(status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001591 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001592 self.subscriber.num_subscribers = 5
1593 self.subscriber.num_channels = 10
1594 for i in [0,1]:
1595 if i == 1:
1596 cord_test_onos_restart()
1597 time.sleep(45)
1598 status = self.verify_cluster_status(onos_instances=onos_instances)
1599 assert_equal(status, True)
1600 log.info('Verifying cord subscriber functionality after cluster restart')
1601 else:
1602 log.info('Verifying cord subscriber functionality before cluster restart')
1603 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1604 num_channels = self.subscriber.num_channels,
1605 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1606 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1607 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1608 self.subscriber.num_channels))
1609 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001610 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001611
ChetanGaonker689b3862016-10-17 16:25:01 -07001612 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1613 def test_cluster_with_cord_subscriber_join_next_channel_before_and_after_cluster_mastership_is_withdrawn(self,onos_instances=ONOS_INSTANCES):
1614 status = self.verify_cluster_status(onos_instances=onos_instances)
1615 assert_equal(status, True)
1616 master,standbys = self.get_cluster_current_master_standbys()
1617 assert_equal(len(standbys),(onos_instances-1))
1618 self.subscriber.setUpClass(controller=master)
1619 self.subscriber.num_subscribers = 5
1620 self.subscriber.num_channels = 10
1621 for i in [0,1]:
1622 if i == 1:
1623 status=self.withdraw_cluster_current_mastership(master_ip=master)
1624 asser_equal(status, True)
1625 master,standbys = self.get_cluster_current_master_standbys()
1626 log.info('verifying cord subscriber functionality after cluster current master withdraw mastership')
1627 else:
1628 log.info('verifying cord subscriber functionality before cluster master withdraw mastership')
1629 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1630 num_channels = self.subscriber.num_channels,
1631 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1632 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1633 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1634 self.subscriber.num_channels),controller=master)
1635 assert_equal(test_status, True)
1636 self.subscriber.tearDownClass(controller=master)
1637
1638 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1639 def test_cluster_with_cord_subscriber_join_recv_traffic_from_10channels_and_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001640 status = self.verify_cluster_status(onos_instances=onos_instances)
1641 assert_equal(status, True)
1642 master, standbys = self.get_cluster_current_master_standbys()
1643 assert_equal(len(standbys),(onos_instances-1))
1644 onos_names_ips = self.get_cluster_container_names_ips()
1645 member_onos_name = onos_names_ips[standbys[0]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001646 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001647 num_subscribers = 1
1648 num_channels = 10
1649 for i in [0,1]:
1650 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -08001651 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001652 time.sleep(30)
ChetanGaonker689b3862016-10-17 16:25:01 -07001653 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001654 assert_equal(status, True)
1655 log.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0])
1656 else:
1657 log.info('Verifying cord subscriber functionality before cluster member %s is down'%standbys[0])
1658 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1659 num_channels = num_channels,
1660 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify,
1661 self.subscriber.igmp_verify, self.subscriber.traffic_verify),
1662 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
ChetanGaonker689b3862016-10-17 16:25:01 -07001663 negative_subscriber_auth = 'all',controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001664 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001665 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001666
ChetanGaonker689b3862016-10-17 16:25:01 -07001667 def test_cluster_with_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001668 status = self.verify_cluster_status(onos_instances=onos_instances)
1669 assert_equal(status, True)
1670 master, standbys = self.get_cluster_current_master_standbys()
1671 assert_equal(len(standbys),(onos_instances-1))
1672 onos_names_ips = self.get_cluster_container_names_ips()
1673 member1_onos_name = onos_names_ips[standbys[0]]
1674 member2_onos_name = onos_names_ips[standbys[1]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001675 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001676 num_subscribers = 1
1677 num_channels = 10
1678 for i in [0,1]:
1679 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -08001680 cord_test_onos_shutdown(node = standbys[0])
1681 cord_test_onos_shutdown(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -07001682 time.sleep(60)
1683 status = self.verify_cluster_status(onos_instances=onos_instances-2)
1684 assert_equal(status, True)
1685 log.info('Verifying cord subscriber funtionality after cluster two members %s and %s down'%(standbys[0],standbys[1]))
1686 else:
1687 log.info('Verifying cord subscriber funtionality before cluster two members %s and %s down'%(standbys[0],standbys[1]))
1688 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1689 num_channels = num_channels,
1690 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1691 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1692 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
1693 negative_subscriber_auth = 'all')
1694 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001695 self.subscriber.tearDownClass(controller=master)
1696
1697 #pass
1698 def test_cluster_with_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1699 status = self.verify_cluster_status(onos_instances=onos_instances)
1700 assert_equal(status, True)
1701 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1702 for device in device_dict.keys():
1703 log.info("Device is %s"%device_dict[device])
1704 assert_not_equal(device_dict[device]['master'],'none')
1705 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1706 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1707
1708 #pass
1709 def test_cluster_state_in_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1710 status = self.verify_cluster_status(onos_instances=onos_instances)
1711 assert_equal(status, True)
1712 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1713 cluster_ips = self.get_cluster_current_member_ips()
1714 for ip in cluster_ips:
1715 device_dict= self.get_cluster_current_master_standbys_of_connected_devices(controller = ip)
1716 assert_equal(len(device_dict.keys()),onos_instances)
1717 for device in device_dict.keys():
1718 log.info("Device is %s"%device_dict[device])
1719 assert_not_equal(device_dict[device]['master'],'none')
1720 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1721 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1722
1723 #pass
1724 def test_cluster_verifying_multiple_ovs_switches_after_master_is_restarted(self,onos_instances = ONOS_INSTANCES):
1725 status = self.verify_cluster_status(onos_instances=onos_instances)
1726 assert_equal(status, True)
1727 onos_names_ips = self.get_cluster_container_names_ips()
1728 master_count = self.get_number_of_devices_of_master()
1729 log.info('Master count information is %s'%master_count)
1730 total_devices = 0
1731 for master in master_count.keys():
1732 total_devices += master_count[master]['size']
1733 if master_count[master]['size'] != 0:
1734 restart_ip = master
1735 assert_equal(total_devices,onos_instances)
1736 member_onos_name = onos_names_ips[restart_ip]
1737 log.info('Restarting cluster member %s having ip %s'%(member_onos_name,restart_ip))
1738 Container(member_onos_name, Onos.IMAGE).restart()
1739 time.sleep(40)
1740 master_count = self.get_number_of_devices_of_master()
1741 log.info('Master count information after restart is %s'%master_count)
1742 total_devices = 0
1743 for master in master_count.keys():
1744 total_devices += master_count[master]['size']
1745 if master == restart_ip:
1746 assert_equal(master_count[master]['size'], 0)
1747 assert_equal(total_devices,onos_instances)
1748
1749 #pass
1750 def test_cluster_verifying_multiple_ovs_switches_with_one_master_down(self,onos_instances = ONOS_INSTANCES):
1751 status = self.verify_cluster_status(onos_instances=onos_instances)
1752 assert_equal(status, True)
1753 onos_names_ips = self.get_cluster_container_names_ips()
1754 master_count = self.get_number_of_devices_of_master()
1755 log.info('Master count information is %s'%master_count)
1756 total_devices = 0
1757 for master in master_count.keys():
1758 total_devices += master_count[master]['size']
1759 if master_count[master]['size'] != 0:
1760 restart_ip = master
1761 assert_equal(total_devices,onos_instances)
1762 master_onos_name = onos_names_ips[restart_ip]
1763 log.info('Shutting down cluster member %s having ip %s'%(master_onos_name,restart_ip))
1764 Container(master_onos_name, Onos.IMAGE).kill()
1765 time.sleep(40)
1766 for ip in onos_names_ips.keys():
1767 if ip != restart_ip:
1768 controller_ip = ip
1769 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=controller_ip)
1770 assert_equal(status, True)
1771 master_count = self.get_number_of_devices_of_master(controller=controller_ip)
1772 log.info('Master count information after restart is %s'%master_count)
1773 total_devices = 0
1774 for master in master_count.keys():
1775 total_devices += master_count[master]['size']
1776 if master == restart_ip:
1777 assert_equal(master_count[master]['size'], 0)
1778 assert_equal(total_devices,onos_instances)
1779
1780 #pass
1781 def test_cluster_verifying_multiple_ovs_switches_with_current_master_withdrawing_mastership(self,onos_instances = ONOS_INSTANCES):
1782 status = self.verify_cluster_status(onos_instances=onos_instances)
1783 assert_equal(status, True)
1784 master_count = self.get_number_of_devices_of_master()
1785 log.info('Master count information is %s'%master_count)
1786 total_devices = 0
1787 for master in master_count.keys():
1788 total_devices += int(master_count[master]['size'])
1789 if master_count[master]['size'] != 0:
1790 master_ip = master
1791 log.info('Devices of master %s are %s'%(master_count[master]['devices'],master))
1792 device_id = str(master_count[master]['devices'][0])
1793 device_count = master_count[master]['size']
1794 assert_equal(total_devices,onos_instances)
1795 log.info('Withdrawing mastership of device %s for controller %s'%(device_id,master_ip))
1796 status=self.withdraw_cluster_current_mastership(master_ip=master_ip,device_id = device_id)
1797 assert_equal(status, True)
1798 master_count = self.get_number_of_devices_of_master()
1799 log.info('Master count information after cluster mastership withdraw is %s'%master_count)
1800 total_devices = 0
1801 for master in master_count.keys():
1802 total_devices += int(master_count[master]['size'])
1803 if master == master_ip:
1804 assert_equal(master_count[master]['size'], device_count-1)
1805 assert_equal(total_devices,onos_instances)
1806
1807 #pass
1808 def test_cluster_verifying_multiple_ovs_switches_and_restarting_cluster(self,onos_instances = ONOS_INSTANCES):
1809 status = self.verify_cluster_status(onos_instances=onos_instances)
1810 assert_equal(status, True)
1811 master_count = self.get_number_of_devices_of_master()
1812 log.info('Master count information is %s'%master_count)
1813 total_devices = 0
1814 for master in master_count.keys():
1815 total_devices += master_count[master]['size']
1816 assert_equal(total_devices,onos_instances)
1817 log.info('Restarting cluster')
1818 cord_test_onos_restart()
1819 time.sleep(60)
1820 master_count = self.get_number_of_devices_of_master()
1821 log.info('Master count information after restart is %s'%master_count)
1822 total_devices = 0
1823 for master in master_count.keys():
1824 total_devices += master_count[master]['size']
1825 assert_equal(total_devices,onos_instances)