blob: 5d632d5d2b95d1fa855fffc78a2e7dd1ed5fe5ae [file] [log] [blame]
ChetanGaonker2099d722016-10-07 15:16:58 -07001#copyright 2016-present Ciena Corporation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14#
15import unittest
16from nose.tools import *
17from scapy.all import *
18from OnosCtrl import OnosCtrl, get_mac
19from OltConfig import OltConfig
20from socket import socket
21from OnosFlowCtrl import OnosFlowCtrl
22from nose.twistedtools import reactor, deferred
23from twisted.internet import defer
24from onosclidriver import OnosCliDriver
25from CordContainer import Container, Onos, Quagga
A.R Karthick2560f042016-11-30 14:38:52 -080026from CordTestServer import cord_test_onos_restart, cord_test_onos_shutdown, cord_test_onos_add_cluster, cord_test_quagga_restart, cord_test_restart_cluster
ChetanGaonker2099d722016-10-07 15:16:58 -070027from portmaps import g_subscriber_port_map
28from scapy.all import *
29import time, monotonic
30import threading
31from threading import current_thread
32from Cluster import *
33from EapTLS import TLSAuthTest
34from ACL import ACLTest
A R Karthick1f908202016-11-16 17:32:20 -080035from OnosLog import OnosLog
36from CordLogger import CordLogger
ChetanGaonker2099d722016-10-07 15:16:58 -070037import os
38import json
39import random
40import collections
41log.setLevel('INFO')
42
A R Karthick1f908202016-11-16 17:32:20 -080043class cluster_exchange(CordLogger):
ChetanGaonker2099d722016-10-07 15:16:58 -070044 test_path = os.path.dirname(os.path.realpath(__file__))
45 onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
46 mac = RandMAC()._fix()
47 flows_eth = Ether(src = RandMAC()._fix(), dst = RandMAC()._fix())
48 igmp_eth = Ether(dst = '01:00:5e:00:00:16', type = ETH_P_IP)
49 igmp_ip = IP(dst = '224.0.0.22')
50 ONOS_INSTANCES = 3
51 V_INF1 = 'veth0'
52 TLS_TIMEOUT = 100
53 device_id = 'of:' + get_mac()
54 igmp = cluster_igmp()
55 igmp_groups = igmp.mcast_ip_range(start_ip = '224.1.8.10',end_ip = '224.1.10.49')
56 igmp_sources = igmp.source_ip_range(start_ip = '38.24.29.35',end_ip='38.24.35.56')
57 tls = cluster_tls()
58 flows = cluster_flows()
59 proxyarp = cluster_proxyarp()
60 vrouter = cluster_vrouter()
61 acl = cluster_acl()
62 dhcprelay = cluster_dhcprelay()
63 subscriber = cluster_subscriber()
A R Karthick3b2e0372016-12-14 17:37:43 -080064 testcaseLoggers = ('test_cluster_controller_restarts', 'test_cluster_graceful_controller_restarts',
65 'test_cluster_single_controller_restarts', 'test_cluster_restarts')
A R Karthick1f908202016-11-16 17:32:20 -080066
67 def setUp(self):
68 if self._testMethodName not in self.testcaseLoggers:
69 super(cluster_exchange, self).setUp()
70
71 def tearDown(self):
72 if self._testMethodName not in self.testcaseLoggers:
73 super(cluster_exchange, self).tearDown()
ChetanGaonker2099d722016-10-07 15:16:58 -070074
75 def get_controller(self):
76 controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
77 controller = controller.split(',')[0]
78 return controller
79
A R Karthick1f908202016-11-16 17:32:20 -080080 @classmethod
81 def get_controllers(cls):
82 controllers = os.getenv('ONOS_CONTROLLER_IP') or ''
83 return controllers.split(',')
84
A R Karthick6cc8b812016-12-09 10:24:40 -080085 def cliEnter(self, controller = None):
ChetanGaonker2099d722016-10-07 15:16:58 -070086 retries = 0
A R Karthick6cc8b812016-12-09 10:24:40 -080087 while retries < 30:
88 self.cli = OnosCliDriver(controller = controller, connect = True)
ChetanGaonker2099d722016-10-07 15:16:58 -070089 if self.cli.handle:
90 break
91 else:
92 retries += 1
93 time.sleep(2)
94
95 def cliExit(self):
96 self.cli.disconnect()
97
A R Karthick1f908202016-11-16 17:32:20 -080098 def get_leader(self, controller = None):
99 self.cliEnter(controller = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800100 try:
101 result = json.loads(self.cli.leaders(jsonFormat = True))
102 except:
103 result = None
104
A R Karthick1f908202016-11-16 17:32:20 -0800105 if result is None:
106 log.info('Leaders command failure for controller %s' %controller)
107 else:
108 log.info('Leaders returned: %s' %result)
109 self.cliExit()
110 return result
111
A R Karthick3b2e0372016-12-14 17:37:43 -0800112 def onos_shutdown(self, controller = None):
113 status = True
114 self.cliEnter(controller = controller)
115 try:
116 self.cli.shutdown(timeout = 10)
117 except:
118 log.info('Graceful shutdown of ONOS failed for controller: %s' %controller)
119 status = False
120
121 self.cliExit()
122 return status
123
A R Karthicke14fc022016-12-08 14:50:29 -0800124 def log_set(self, level = None, app = 'org.onosproject', controllers = None):
125 CordLogger.logSet(level = level, app = app, controllers = controllers, forced = True)
A R Karthickef1232d2016-12-07 09:18:15 -0800126
A R Karthick1f908202016-11-16 17:32:20 -0800127 def get_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800128 result_map = {}
129 if controller is None:
130 controller = self.get_controller()
A R Karthick1f908202016-11-16 17:32:20 -0800131 if type(controller) in [ list, tuple ]:
132 for c in controller:
133 leaders = self.get_leader(controller = c)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800134 result_map[c] = leaders
A R Karthick1f908202016-11-16 17:32:20 -0800135 else:
136 leaders = self.get_leader(controller = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800137 result_map[controller] = leaders
138 return result_map
A R Karthick1f908202016-11-16 17:32:20 -0800139
A R Karthickec2db322016-11-17 15:06:01 -0800140 def verify_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800141 leaders_map = self.get_leaders(controller = controller)
142 failed = [ k for k,v in leaders_map.items() if v == None ]
A R Karthickec2db322016-11-17 15:06:01 -0800143 return failed
144
ChetanGaonker2099d722016-10-07 15:16:58 -0700145 def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
146 tries = 0
147 try:
148 self.cliEnter(controller = controller)
149 while tries <= 10:
150 cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
151 if cluster_summary:
152 log.info("cluster 'summary' command output is %s"%cluster_summary)
153 nodes = cluster_summary['nodes']
154 if verify:
155 if nodes == onos_instances:
156 self.cliExit()
157 return True
158 else:
159 tries += 1
160 time.sleep(1)
161 else:
162 if nodes >= onos_instances:
163 self.cliExit()
164 return True
165 else:
166 tries += 1
167 time.sleep(1)
168 else:
169 tries += 1
170 time.sleep(1)
171 self.cliExit()
172 return False
173 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700174 raise Exception('Failed to get cluster members')
175 return False
ChetanGaonker2099d722016-10-07 15:16:58 -0700176
A.R Karthick45ab3e12016-11-30 11:25:51 -0800177 def get_cluster_current_member_ips(self, controller = None, nodes_filter = None):
ChetanGaonker2099d722016-10-07 15:16:58 -0700178 tries = 0
179 cluster_ips = []
180 try:
181 self.cliEnter(controller = controller)
182 while tries <= 10:
183 cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
184 if cluster_nodes:
185 log.info("cluster 'nodes' output is %s"%cluster_nodes)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800186 if nodes_filter:
187 cluster_nodes = nodes_filter(cluster_nodes)
ChetanGaonker2099d722016-10-07 15:16:58 -0700188 cluster_ips = map(lambda c: c['id'], cluster_nodes)
189 self.cliExit()
190 cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
191 return cluster_ips
192 else:
193 tries += 1
194 self.cliExit()
195 return cluster_ips
196 except:
197 raise Exception('Failed to get cluster members')
198 return cluster_ips
199
ChetanGaonker689b3862016-10-17 16:25:01 -0700200 def get_cluster_container_names_ips(self,controller=None):
A R Karthick1f908202016-11-16 17:32:20 -0800201 onos_names_ips = {}
A R Karthick0f3f25b2016-12-15 09:50:57 -0800202 controllers = self.get_controllers()
203 i = 0
204 for controller in controllers:
205 if i == 0:
206 name = Onos.NAME
207 else:
208 name = '{}-{}'.format(Onos.NAME, i+1)
209 onos_names_ips[controller] = name
210 onos_names_ips[name] = controller
211 i += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700212 return onos_names_ips
A R Karthick0f3f25b2016-12-15 09:50:57 -0800213 # onos_ips = self.get_cluster_current_member_ips(controller=controller)
214 # onos_names_ips[onos_ips[0]] = Onos.NAME
215 # onos_names_ips[Onos.NAME] = onos_ips[0]
216 # for i in range(1,len(onos_ips)):
217 # name = '{0}-{1}'.format(Onos.NAME,i+1)
218 # onos_names_ips[onos_ips[i]] = name
219 # onos_names_ips[name] = onos_ips[i]
220
221 # return onos_names_ips
ChetanGaonker2099d722016-10-07 15:16:58 -0700222
223 #identifying current master of a connected device, not tested
224 def get_cluster_current_master_standbys(self,controller=None,device_id=device_id):
225 master = None
226 standbys = []
227 tries = 0
228 try:
229 cli = self.cliEnter(controller = controller)
230 while tries <= 10:
231 roles = json.loads(self.cli.roles(jsonFormat = True))
232 log.info("cluster 'roles' command output is %s"%roles)
233 if roles:
234 for device in roles:
235 log.info('Verifying device info in line %s'%device)
236 if device['id'] == device_id:
237 master = str(device['master'])
238 standbys = map(lambda d: str(d), device['standbys'])
239 log.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
240 self.cliExit()
241 return master, standbys
242 self.cliExit()
243 return master, standbys
244 else:
245 tries += 1
ChetanGaonker689b3862016-10-17 16:25:01 -0700246 time.sleep(1)
247 self.cliExit()
248 return master,standbys
249 except:
250 raise Exception('Failed to get cluster members')
251 return master,standbys
252
253 def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
254 ''' returns master and standbys of all the connected devices to ONOS cluster instance'''
255 device_dict = {}
256 tries = 0
257 try:
258 cli = self.cliEnter(controller = controller)
259 while tries <= 10:
260 device_dict = {}
261 roles = json.loads(self.cli.roles(jsonFormat = True))
262 log.info("cluster 'roles' command output is %s"%roles)
263 if roles:
264 for device in roles:
265 device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
266 for i in range(len(device_dict[device['id']]['standbys'])):
267 device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
268 log.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
269 self.cliExit()
270 return device_dict
271 else:
272 tries += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700273 time.sleep(1)
274 self.cliExit()
ChetanGaonker689b3862016-10-17 16:25:01 -0700275 return device_dict
276 except:
277 raise Exception('Failed to get cluster members')
278 return device_dict
279
280 #identify current master of a connected device, not tested
281 def get_cluster_connected_devices(self,controller=None):
282 '''returns all the devices connected to ONOS cluster'''
283 device_list = []
284 tries = 0
285 try:
286 cli = self.cliEnter(controller = controller)
287 while tries <= 10:
288 device_list = []
289 devices = json.loads(self.cli.devices(jsonFormat = True))
290 log.info("cluster 'devices' command output is %s"%devices)
291 if devices:
292 for device in devices:
293 log.info('device id is %s'%device['id'])
294 device_list.append(str(device['id']))
295 self.cliExit()
296 return device_list
297 else:
298 tries += 1
299 time.sleep(1)
300 self.cliExit()
301 return device_list
302 except:
303 raise Exception('Failed to get cluster members')
304 return device_list
305
306 def get_number_of_devices_of_master(self,controller=None):
307 '''returns master-device pairs, which master having what devices'''
308 master_count = {}
309 try:
310 cli = self.cliEnter(controller = controller)
311 masters = json.loads(self.cli.masters(jsonFormat = True))
312 if masters:
313 for master in masters:
314 master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
315 return master_count
316 else:
317 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700318 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700319 raise Exception('Failed to get cluster members')
320 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700321
322 def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None):
323 if new_master is None: return False
ChetanGaonker689b3862016-10-17 16:25:01 -0700324 self.cliEnter(controller=controller)
ChetanGaonker2099d722016-10-07 15:16:58 -0700325 cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
326 command = self.cli.command(cmd = cmd, jsonFormat = False)
327 self.cliExit()
328 time.sleep(60)
329 master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
330 assert_equal(master,new_master)
331 log.info('Cluster master changed to %s successfully'%new_master)
332
ChetanGaonker689b3862016-10-17 16:25:01 -0700333 def withdraw_cluster_current_mastership(self,master_ip=None,device_id=device_id,controller=None):
334 '''current master looses its mastership and hence new master will be elected'''
335 self.cliEnter(controller=controller)
336 cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
337 command = self.cli.command(cmd = cmd, jsonFormat = False)
338 self.cliExit()
339 time.sleep(60)
340 new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
341 assert_not_equal(new_master_ip,master_ip)
342 log.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
343 log.info('Cluster new master is %s'%new_master_ip)
344 return True
345
A R Karthick3b2e0372016-12-14 17:37:43 -0800346 def cluster_controller_restarts(self, graceful = False):
A R Karthick1f908202016-11-16 17:32:20 -0800347 controllers = self.get_controllers()
348 ctlr_len = len(controllers)
349 if ctlr_len <= 1:
350 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
351 assert_greater(ctlr_len, 1)
352
353 #this call would verify the cluster for once
354 onos_map = self.get_cluster_container_names_ips()
355
A R Karthickec2db322016-11-17 15:06:01 -0800356 def check_exception(controller = None):
A R Karthick1f908202016-11-16 17:32:20 -0800357 adjacent_controller = None
358 adjacent_controllers = None
359 if controller:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800360 adjacent_controllers = list(set(controllers) - set([controller]))
361 adjacent_controller = adjacent_controllers[0]
A R Karthick1f908202016-11-16 17:32:20 -0800362 for node in controllers:
363 onosLog = OnosLog(host = node)
364 ##check the logs for storage exception
365 _, output = onosLog.get_log(('ERROR', 'Exception',))
A R Karthickec2db322016-11-17 15:06:01 -0800366 if output and output.find('StorageException$Timeout') >= 0:
367 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
368 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
369 log.info('\n' + '-' * 50 + '\n')
A R Karthick1f908202016-11-16 17:32:20 -0800370 log.info('%s' %output)
A R Karthickec2db322016-11-17 15:06:01 -0800371 log.info('\n' + '-' * 50 + '\n')
372 failed = self.verify_leaders(controllers)
373 if failed:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800374 log.info('Leaders command failed on nodes: %s' %failed)
A R Karthickec2db322016-11-17 15:06:01 -0800375 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800376 return controller
377
378 try:
A R Karthickec2db322016-11-17 15:06:01 -0800379 ips = self.get_cluster_current_member_ips(controller = adjacent_controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800380 log.info('ONOS cluster formed with controllers: %s' %ips)
A R Karthick1f908202016-11-16 17:32:20 -0800381 st = True
382 except:
383 st = False
384
A R Karthickec2db322016-11-17 15:06:01 -0800385 failed = self.verify_leaders(controllers)
A R Karthick1f908202016-11-16 17:32:20 -0800386 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800387 if st is False:
388 log.info('No storage exception and ONOS cluster was not formed successfully')
389 else:
390 controller = None
391
392 return controller
393
394 next_controller = None
395 tries = 10
396 for num in range(tries):
397 index = num % ctlr_len
398 #index = random.randrange(0, ctlr_len)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800399 controller_name = onos_map[controllers[index]] if next_controller is None else onos_map[next_controller]
400 controller = onos_map[controller_name]
401 log.info('ITERATION: %d. Restarting Controller %s' %(num + 1, controller_name))
A R Karthick1f908202016-11-16 17:32:20 -0800402 try:
A R Karthickef1232d2016-12-07 09:18:15 -0800403 #enable debug log for the other controllers before restarting this controller
A R Karthicke14fc022016-12-08 14:50:29 -0800404 adjacent_controllers = list( set(controllers) - set([controller]) )
405 self.log_set(controllers = adjacent_controllers)
406 self.log_set(app = 'io.atomix', controllers = adjacent_controllers)
A R Karthick3b2e0372016-12-14 17:37:43 -0800407 if graceful is True:
A R Karthick0f3f25b2016-12-15 09:50:57 -0800408 log.info('Gracefully shutting down controller: %s' %controller)
A R Karthick3b2e0372016-12-14 17:37:43 -0800409 self.onos_shutdown(controller)
410 cord_test_onos_restart(node = controller, timeout = 0)
A R Karthicke14fc022016-12-08 14:50:29 -0800411 self.log_set(controllers = controller)
412 self.log_set(app = 'io.atomix', controllers = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800413 time.sleep(60)
A R Karthick1f908202016-11-16 17:32:20 -0800414 except:
415 time.sleep(5)
416 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800417
418 #first archive the test case logs for this run
A R Karthick3b2e0372016-12-14 17:37:43 -0800419 CordLogger.archive_results(self._testMethodName,
A R Karthicke8935c62016-12-08 18:17:17 -0800420 controllers = controllers,
421 iteration = 'iteration_{}'.format(num+1))
A R Karthickec2db322016-11-17 15:06:01 -0800422 next_controller = check_exception(controller = controller)
A R Karthick1f908202016-11-16 17:32:20 -0800423
A R Karthick3b2e0372016-12-14 17:37:43 -0800424 def test_cluster_controller_restarts(self):
425 '''Test the cluster by repeatedly killing the controllers'''
426 self.cluster_controller_restarts()
427
428 def test_cluster_graceful_controller_restarts(self):
429 '''Test the cluster by repeatedly restarting the controllers gracefully'''
430 self.cluster_controller_restarts(graceful = True)
431
A.R Karthick45ab3e12016-11-30 11:25:51 -0800432 def test_cluster_single_controller_restarts(self):
433 '''Test the cluster by repeatedly restarting the same controller'''
434 controllers = self.get_controllers()
435 ctlr_len = len(controllers)
436 if ctlr_len <= 1:
437 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
438 assert_greater(ctlr_len, 1)
439
440 #this call would verify the cluster for once
441 onos_map = self.get_cluster_container_names_ips()
442
443 def check_exception(controller, inclusive = False):
444 adjacent_controllers = list(set(controllers) - set([controller]))
445 adjacent_controller = adjacent_controllers[0]
446 controller_list = adjacent_controllers if inclusive == False else controllers
447 storage_exceptions = []
448 for node in controller_list:
449 onosLog = OnosLog(host = node)
450 ##check the logs for storage exception
451 _, output = onosLog.get_log(('ERROR', 'Exception',))
452 if output and output.find('StorageException$Timeout') >= 0:
453 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
454 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
455 log.info('\n' + '-' * 50 + '\n')
456 log.info('%s' %output)
457 log.info('\n' + '-' * 50 + '\n')
458 storage_exceptions.append(node)
459
460 failed = self.verify_leaders(controller_list)
461 if failed:
462 log.info('Leaders command failed on nodes: %s' %failed)
463 if storage_exceptions:
464 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
465 assert_equal(len(failed), 0)
466 return controller
467
468 for ctlr in controller_list:
469 ips = self.get_cluster_current_member_ips(controller = ctlr,
470 nodes_filter = \
471 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
472 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
473 if controller in ips and inclusive is False:
474 log.info('Controller %s still ACTIVE on Node %s after it was shutdown' %(controller, ctlr))
475 if controller not in ips and inclusive is True:
A R Karthick6cc8b812016-12-09 10:24:40 -0800476 log.info('Controller %s still INACTIVE on Node %s after it was restarted' %(controller, ctlr))
A.R Karthick45ab3e12016-11-30 11:25:51 -0800477
478 return controller
479
480 tries = 10
481 #chose a random controller for shutdown/restarts
482 controller = controllers[random.randrange(0, ctlr_len)]
483 controller_name = onos_map[controller]
A R Karthick6cc8b812016-12-09 10:24:40 -0800484 ##enable the log level for the controllers
485 self.log_set(controllers = controllers)
486 self.log_set(app = 'io.atomix', controllers = controllers)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800487 for num in range(tries):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800488 log.info('ITERATION: %d. Shutting down Controller %s' %(num + 1, controller_name))
489 try:
A R Karthick3b2e0372016-12-14 17:37:43 -0800490 cord_test_onos_shutdown(node = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800491 time.sleep(20)
492 except:
493 time.sleep(5)
494 continue
495 #check for exceptions on the adjacent nodes
496 check_exception(controller)
497 #Now restart the controller back
498 log.info('Restarting back the controller %s' %controller_name)
A R Karthick3b2e0372016-12-14 17:37:43 -0800499 cord_test_onos_restart(node = controller)
A R Karthick6cc8b812016-12-09 10:24:40 -0800500 self.log_set(controllers = controller)
501 self.log_set(app = 'io.atomix', controllers = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800502 time.sleep(60)
A R Karthicke8935c62016-12-08 18:17:17 -0800503 #archive the logs for this run
504 CordLogger.archive_results('test_cluster_single_controller_restarts',
505 controllers = controllers,
506 iteration = 'iteration_{}'.format(num+1))
A.R Karthick45ab3e12016-11-30 11:25:51 -0800507 check_exception(controller, inclusive = True)
508
A.R Karthick2560f042016-11-30 14:38:52 -0800509 def test_cluster_restarts(self):
510 '''Test the cluster by repeatedly restarting the entire cluster'''
511 controllers = self.get_controllers()
512 ctlr_len = len(controllers)
513 if ctlr_len <= 1:
514 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
515 assert_greater(ctlr_len, 1)
516
517 #this call would verify the cluster for once
518 onos_map = self.get_cluster_container_names_ips()
519
520 def check_exception():
521 controller_list = controllers
522 storage_exceptions = []
523 for node in controller_list:
524 onosLog = OnosLog(host = node)
525 ##check the logs for storage exception
526 _, output = onosLog.get_log(('ERROR', 'Exception',))
527 if output and output.find('StorageException$Timeout') >= 0:
528 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
529 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
530 log.info('\n' + '-' * 50 + '\n')
531 log.info('%s' %output)
532 log.info('\n' + '-' * 50 + '\n')
533 storage_exceptions.append(node)
534
535 failed = self.verify_leaders(controller_list)
536 if failed:
537 log.info('Leaders command failed on nodes: %s' %failed)
538 if storage_exceptions:
539 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
540 assert_equal(len(failed), 0)
541 return
542
543 for ctlr in controller_list:
544 ips = self.get_cluster_current_member_ips(controller = ctlr,
545 nodes_filter = \
546 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
547 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
548 assert_equal(len(ips), len(controllers))
549
550 tries = 10
551 for num in range(tries):
552 log.info('ITERATION: %d. Restarting cluster with controllers at %s' %(num+1, controllers))
553 try:
554 cord_test_restart_cluster()
A R Karthick6cc8b812016-12-09 10:24:40 -0800555 self.log_set(controllers = controllers)
556 self.log_set(app = 'io.atomix', controllers = controllers)
A.R Karthick2560f042016-11-30 14:38:52 -0800557 log.info('Delaying before verifying cluster status')
558 time.sleep(60)
559 except:
560 time.sleep(10)
561 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800562
563 #archive the logs for this run before verification
564 CordLogger.archive_results('test_cluster_restarts',
565 controllers = controllers,
566 iteration = 'iteration_{}'.format(num+1))
A.R Karthick2560f042016-11-30 14:38:52 -0800567 #check for exceptions on the adjacent nodes
568 check_exception()
569
ChetanGaonker2099d722016-10-07 15:16:58 -0700570 #pass
ChetanGaonker689b3862016-10-17 16:25:01 -0700571 def test_cluster_formation_and_verification(self,onos_instances = ONOS_INSTANCES):
572 status = self.verify_cluster_status(onos_instances = onos_instances)
573 assert_equal(status, True)
574 log.info('Cluster exists with %d ONOS instances'%onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -0700575
576 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700577 def test_cluster_adding_members(self, add = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700578 status = self.verify_cluster_status(onos_instances = onos_instances)
579 assert_equal(status, True)
580 onos_ips = self.get_cluster_current_member_ips()
581 onos_instances = len(onos_ips)+add
582 log.info('Adding %d nodes to the ONOS cluster' %add)
583 cord_test_onos_add_cluster(count = add)
584 status = self.verify_cluster_status(onos_instances=onos_instances)
585 assert_equal(status, True)
586
ChetanGaonker689b3862016-10-17 16:25:01 -0700587 def test_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700588 status = self.verify_cluster_status(onos_instances = onos_instances)
589 assert_equal(status, True)
590 master, standbys = self.get_cluster_current_master_standbys()
591 assert_equal(len(standbys),(onos_instances-1))
592 onos_names_ips = self.get_cluster_container_names_ips()
593 master_onos_name = onos_names_ips[master]
594 log.info('Removing cluster current master %s'%(master))
A R Karthick3b2e0372016-12-14 17:37:43 -0800595 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700596 time.sleep(60)
597 onos_instances -= 1
598 status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0])
599 assert_equal(status, True)
600 new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
601 assert_not_equal(master,new_master)
ChetanGaonker689b3862016-10-17 16:25:01 -0700602 log.info('Successfully removed clusters master instance')
ChetanGaonker2099d722016-10-07 15:16:58 -0700603
ChetanGaonker689b3862016-10-17 16:25:01 -0700604 def test_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700605 status = self.verify_cluster_status(onos_instances = onos_instances)
606 assert_equal(status, True)
607 master, standbys = self.get_cluster_current_master_standbys()
608 assert_equal(len(standbys),(onos_instances-1))
609 onos_names_ips = self.get_cluster_container_names_ips()
610 member_onos_name = onos_names_ips[standbys[0]]
611 log.info('Removing cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800612 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700613 time.sleep(60)
614 onos_instances -= 1
615 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
616 assert_equal(status, True)
617
ChetanGaonker689b3862016-10-17 16:25:01 -0700618 def test_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700619 status = self.verify_cluster_status(onos_instances = onos_instances)
620 assert_equal(status, True)
621 master, standbys = self.get_cluster_current_master_standbys()
622 assert_equal(len(standbys),(onos_instances-1))
623 onos_names_ips = self.get_cluster_container_names_ips()
624 member1_onos_name = onos_names_ips[standbys[0]]
625 member2_onos_name = onos_names_ips[standbys[1]]
626 log.info('Removing cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800627 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700628 log.info('Removing cluster member %s'%standbys[1])
A R Karthick3b2e0372016-12-14 17:37:43 -0800629 cord_test_onos_shutdown(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700630 time.sleep(60)
631 onos_instances = onos_instances - 2
632 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
633 assert_equal(status, True)
634
ChetanGaonker689b3862016-10-17 16:25:01 -0700635 def test_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700636 status = self.verify_cluster_status(onos_instances = onos_instances)
637 assert_equal(status, True)
638 master, standbys = self.get_cluster_current_master_standbys()
639 assert_equal(len(standbys),(onos_instances-1))
640 onos_names_ips = self.get_cluster_container_names_ips()
641 for i in range(remove):
642 member_onos_name = onos_names_ips[standbys[i]]
643 log.info('Removing onos container with name %s'%standbys[i])
A R Karthick3b2e0372016-12-14 17:37:43 -0800644 cord_test_onos_shutdown(node = standbys[i])
ChetanGaonker2099d722016-10-07 15:16:58 -0700645 time.sleep(60)
646 onos_instances = onos_instances - remove
647 status = self.verify_cluster_status(onos_instances = onos_instances, controller=master)
648 assert_equal(status, True)
649
650 #nottest test cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700651 def test_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES , add = 2, remove = 2):
ChetanGaonker2099d722016-10-07 15:16:58 -0700652 status = self.verify_cluster_status(onos_instances = onos_instances)
653 assert_equal(status, True)
654 onos_ips = self.get_cluster_current_member_ips()
655 onos_instances = len(onos_ips)+add
656 log.info('Adding %d ONOS instances to the cluster'%add)
657 cord_test_onos_add_cluster(count = add)
658 status = self.verify_cluster_status(onos_instances=onos_instances)
659 assert_equal(status, True)
660 log.info('Removing %d ONOS instances from the cluster'%remove)
661 for i in range(remove):
662 name = '{}-{}'.format(Onos.NAME, onos_instances - i)
663 log.info('Removing onos container with name %s'%name)
664 cord_test_onos_shutdown(node = name)
665 time.sleep(60)
666 onos_instances = onos_instances-remove
667 status = self.verify_cluster_status(onos_instances=onos_instances)
668 assert_equal(status, True)
669
670 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700671 def test_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
ChetanGaonker2099d722016-10-07 15:16:58 -0700672 status = self.verify_cluster_status(onos_instances = onos_instances)
673 assert_equal(status, True)
674 onos_ips = self.get_cluster_current_member_ips()
675 onos_instances = onos_instances-remove
676 log.info('Removing %d ONOS instances from the cluster'%remove)
677 for i in range(remove):
678 name = '{}-{}'.format(Onos.NAME, len(onos_ips)-i)
679 log.info('Removing onos container with name %s'%name)
680 cord_test_onos_shutdown(node = name)
681 time.sleep(60)
682 status = self.verify_cluster_status(onos_instances=onos_instances)
683 assert_equal(status, True)
684 log.info('Adding %d ONOS instances to the cluster'%add)
685 cord_test_onos_add_cluster(count = add)
686 onos_instances = onos_instances+add
687 status = self.verify_cluster_status(onos_instances=onos_instances)
688 assert_equal(status, True)
689
ChetanGaonker689b3862016-10-17 16:25:01 -0700690 def test_cluster_restart(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700691 status = self.verify_cluster_status(onos_instances = onos_instances)
692 assert_equal(status, True)
693 log.info('Restarting cluster')
694 cord_test_onos_restart()
695 status = self.verify_cluster_status(onos_instances = onos_instances)
696 assert_equal(status, True)
697
ChetanGaonker689b3862016-10-17 16:25:01 -0700698 def test_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700699 status = self.verify_cluster_status(onos_instances = onos_instances)
700 assert_equal(status, True)
701 master, standbys = self.get_cluster_current_master_standbys()
702 onos_names_ips = self.get_cluster_container_names_ips()
703 master_onos_name = onos_names_ips[master]
704 log.info('Restarting cluster master %s'%master)
A R Karthick3b2e0372016-12-14 17:37:43 -0800705 cord_test_onos_restart(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700706 status = self.verify_cluster_status(onos_instances = onos_instances)
707 assert_equal(status, True)
708 log.info('Cluster came up after master restart as expected')
709
710 #test fail. master changing after restart. Need to check correct behavior.
ChetanGaonker689b3862016-10-17 16:25:01 -0700711 def test_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700712 status = self.verify_cluster_status(onos_instances = onos_instances)
713 assert_equal(status, True)
714 master1, standbys = self.get_cluster_current_master_standbys()
715 onos_names_ips = self.get_cluster_container_names_ips()
716 master_onos_name = onos_names_ips[master1]
A R Karthick3b2e0372016-12-14 17:37:43 -0800717 log.info('Restarting cluster master %s'%master1)
718 cord_test_onos_restart(node = master1)
ChetanGaonker2099d722016-10-07 15:16:58 -0700719 status = self.verify_cluster_status(onos_instances = onos_instances)
720 assert_equal(status, True)
721 master2, standbys = self.get_cluster_current_master_standbys()
722 assert_equal(master1,master2)
723 log.info('Cluster master is same before and after cluster master restart as expected')
724
ChetanGaonker689b3862016-10-17 16:25:01 -0700725 def test_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700726 status = self.verify_cluster_status(onos_instances = onos_instances)
727 assert_equal(status, True)
728 master, standbys = self.get_cluster_current_master_standbys()
729 assert_equal(len(standbys),(onos_instances-1))
730 onos_names_ips = self.get_cluster_container_names_ips()
731 member_onos_name = onos_names_ips[standbys[0]]
732 log.info('Restarting cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800733 cord_test_onos_restart(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700734 status = self.verify_cluster_status(onos_instances = onos_instances)
735 assert_equal(status, True)
736 log.info('Cluster came up as expected after restarting one member')
737
ChetanGaonker689b3862016-10-17 16:25:01 -0700738 def test_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700739 status = self.verify_cluster_status(onos_instances = onos_instances)
740 assert_equal(status, True)
741 master, standbys = self.get_cluster_current_master_standbys()
742 assert_equal(len(standbys),(onos_instances-1))
743 onos_names_ips = self.get_cluster_container_names_ips()
744 member1_onos_name = onos_names_ips[standbys[0]]
745 member2_onos_name = onos_names_ips[standbys[1]]
746 log.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1]))
A R Karthick3b2e0372016-12-14 17:37:43 -0800747 cord_test_onos_restart(node = standbys[0])
748 cord_test_onos_restart(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700749 status = self.verify_cluster_status(onos_instances = onos_instances)
750 assert_equal(status, True)
751 log.info('Cluster came up as expected after restarting two members')
752
ChetanGaonker689b3862016-10-17 16:25:01 -0700753 def test_cluster_state_with_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700754 status = self.verify_cluster_status(onos_instances = onos_instances)
755 assert_equal(status,True)
756 master, standbys = self.get_cluster_current_master_standbys()
757 assert_equal(len(standbys),(onos_instances-1))
758 onos_names_ips = self.get_cluster_container_names_ips()
759 for i in range(members):
760 member_onos_name = onos_names_ips[standbys[i]]
761 log.info('Restarting cluster member %s'%standbys[i])
A R Karthick3b2e0372016-12-14 17:37:43 -0800762 cord_test_onos_restart(node = standbys[i])
ChetanGaonker2099d722016-10-07 15:16:58 -0700763
764 status = self.verify_cluster_status(onos_instances = onos_instances)
765 assert_equal(status, True)
766 log.info('Cluster came up as expected after restarting %d members'%members)
767
ChetanGaonker689b3862016-10-17 16:25:01 -0700768 def test_cluster_state_with_master_change(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700769 status = self.verify_cluster_status(onos_instances=onos_instances)
770 assert_equal(status, True)
771 master, standbys = self.get_cluster_current_master_standbys()
772 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -0700773 log.info('Cluster current master of devices is %s'%master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700774 self.change_master_current_cluster(new_master=standbys[0])
775 log.info('Cluster master changed successfully')
776
777 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700778 def test_cluster_with_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700779 status = self.verify_cluster_status(onos_instances = onos_instances)
780 assert_equal(status, True)
781 onos_ips = self.get_cluster_current_member_ips()
782 self.vrouter.setUpClass()
783 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
784 assert_equal(res, True)
785 for onos_ip in onos_ips:
786 tries = 0
787 flag = False
788 try:
789 self.cliEnter(controller = onos_ip)
790 while tries <= 5:
791 routes = json.loads(self.cli.routes(jsonFormat = True))
792 if routes:
793 assert_equal(len(routes['routes4']), networks)
794 self.cliExit()
795 flag = True
796 break
797 else:
798 tries += 1
799 time.sleep(1)
800 assert_equal(flag, True)
801 except:
802 log.info('Exception occured while checking routes in onos instance %s'%onos_ip)
803 raise
804
805 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700806 def test_cluster_with_vrouter_and_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700807 status = self.verify_cluster_status(onos_instances = onos_instances)
808 assert_equal(status, True)
809 onos_ips = self.get_cluster_current_member_ips()
810 master, standbys = self.get_cluster_current_master_standbys()
811 onos_names_ips = self.get_cluster_container_names_ips()
812 master_onos_name = onos_names_ips[master]
813 self.vrouter.setUpClass()
814 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
815 assert_equal(res,True)
A R Karthick3b2e0372016-12-14 17:37:43 -0800816 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700817 time.sleep(60)
ChetanGaonker689b3862016-10-17 16:25:01 -0700818 log.info('Verifying vrouter traffic after cluster master is down')
ChetanGaonker2099d722016-10-07 15:16:58 -0700819 self.vrouter.vrouter_traffic_verify()
820
821 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700822 def test_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700823 status = self.verify_cluster_status(onos_instances = onos_instances)
824 assert_equal(status, True)
825 onos_ips = self.get_cluster_current_member_ips()
826 master, standbys = self.get_cluster_current_master_standbys()
827 onos_names_ips = self.get_cluster_container_names_ips()
828 master_onos_name = onos_names_ips[master]
829 self.vrouter.setUpClass()
830 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
831 assert_equal(res, True)
832 cord_test_onos_restart()
833 self.vrouter.vrouter_traffic_verify()
834
835 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700836 def test_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700837 status = self.verify_cluster_status(onos_instances = onos_instances)
838 assert_equal(status, True)
839 self.vrouter.setUpClass()
840 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
841 assert_equal(res, True)
842 self.vrouter.vrouter_activate(deactivate=True)
843 time.sleep(15)
844 self.vrouter.vrouter_traffic_verify(positive_test=False)
845 self.vrouter.vrouter_activate(deactivate=False)
846
847 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700848 def test_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700849 status = self.verify_cluster_status(onos_instances = onos_instances)
850 assert_equal(status, True)
851 master, standbys = self.get_cluster_current_master_standbys()
852 onos_names_ips = self.get_cluster_container_names_ips()
853 master_onos_name = onos_names_ips[master]
854 self.vrouter.setUpClass()
855 log.info('Verifying vrouter before master down')
856 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
857 assert_equal(res, True)
858 self.vrouter.vrouter_activate(deactivate=True)
859 log.info('Verifying vrouter traffic after app deactivated')
860 time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down
861 self.vrouter.vrouter_traffic_verify(positive_test=False)
862 log.info('Verifying vrouter traffic after master down')
A R Karthick3b2e0372016-12-14 17:37:43 -0800863 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700864 time.sleep(60)
865 self.vrouter.vrouter_traffic_verify(positive_test=False)
866 self.vrouter.vrouter_activate(deactivate=False)
867
868 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700869 def test_cluster_for_vrouter_app_and_making_member_down(self, networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700870 status = self.verify_cluster_status(onos_instances = onos_instances)
871 assert_equal(status, True)
872 master, standbys = self.get_cluster_current_master_standbys()
873 onos_names_ips = self.get_cluster_container_names_ips()
874 member_onos_name = onos_names_ips[standbys[0]]
875 self.vrouter.setUpClass()
876 log.info('Verifying vrouter before cluster member down')
877 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
878 assert_equal(res, True) # Expecting vrouter should work properly
879 log.info('Verifying vrouter after cluster member down')
A R Karthick3b2e0372016-12-14 17:37:43 -0800880 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700881 time.sleep(60)
882 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
883
884 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700885 def test_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700886 status = self.verify_cluster_status(onos_instances = onos_instances)
887 assert_equal(status, True)
888 master, standbys = self.get_cluster_current_master_standbys()
889 onos_names_ips = self.get_cluster_container_names_ips()
890 member_onos_name = onos_names_ips[standbys[1]]
891 self.vrouter.setUpClass()
892 log.info('Verifying vrouter traffic before cluster member restart')
893 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
894 assert_equal(res, True) # Expecting vrouter should work properly
A R Karthick3b2e0372016-12-14 17:37:43 -0800895 cord_test_onos_restart(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700896 log.info('Verifying vrouter traffic after cluster member restart')
897 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
898
899 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700900 def test_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700901 status = self.verify_cluster_status(onos_instances = onos_instances)
902 assert_equal(status, True)
903 self.vrouter.setUpClass()
904 log.info('Verifying vrouter traffic before cluster restart')
905 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
906 assert_equal(res, True) # Expecting vrouter should work properly
907 cord_test_onos_restart()
908 log.info('Verifying vrouter traffic after cluster restart')
909 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
910
911
912 #test fails because flow state is in pending_add in onos
ChetanGaonker689b3862016-10-17 16:25:01 -0700913 def test_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700914 status = self.verify_cluster_status(onos_instances = onos_instances)
915 assert_equal(status, True)
916 master, standbys = self.get_cluster_current_master_standbys()
917 onos_names_ips = self.get_cluster_container_names_ips()
918 master_onos_name = onos_names_ips[master]
919 self.flows.setUpClass()
920 egress = 1
921 ingress = 2
922 egress_map = { 'ip': '192.168.30.1', 'udp_port': 9500 }
923 ingress_map = { 'ip': '192.168.40.1', 'udp_port': 9000 }
924 flow = OnosFlowCtrl(deviceId = self.device_id,
925 egressPort = egress,
926 ingressPort = ingress,
927 udpSrc = ingress_map['udp_port'],
928 udpDst = egress_map['udp_port'],
929 controller=master
930 )
931 result = flow.addFlow()
932 assert_equal(result, True)
933 time.sleep(1)
934 self.success = False
935 def mac_recv_task():
936 def recv_cb(pkt):
937 log.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
938 self.success = True
939 sniff(timeout=2,
940 lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
941 and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.flows.port_map[egress])
942
943 for i in [0,1]:
944 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -0800945 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700946 log.info('Verifying flows traffic after master killed')
947 time.sleep(45)
948 else:
949 log.info('Verifying flows traffic before master killed')
950 t = threading.Thread(target = mac_recv_task)
951 t.start()
952 L2 = self.flows_eth #Ether(src = ingress_map['ether'], dst = egress_map['ether'])
953 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
954 L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
955 pkt = L2/L3/L4
956 log.info('Sending packets to verify if flows are correct')
957 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
958 t.join()
959 assert_equal(self.success, True)
960
ChetanGaonker689b3862016-10-17 16:25:01 -0700961 def test_cluster_state_changing_master_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700962 status = self.verify_cluster_status(onos_instances=onos_instances)
963 assert_equal(status, True)
964 master, standbys = self.get_cluster_current_master_standbys()
965 self.flows.setUpClass()
966 egress = 1
967 ingress = 2
968 egress_map = { 'ip': '192.168.30.1' }
969 ingress_map = { 'ip': '192.168.40.1' }
970 flow = OnosFlowCtrl(deviceId = self.device_id,
971 egressPort = egress,
972 ingressPort = ingress,
973 ecn = 1,
974 controller=master
975 )
976 result = flow.addFlow()
977 assert_equal(result, True)
978 ##wait for flows to be added to ONOS
979 time.sleep(1)
980 self.success = False
981 def mac_recv_task():
982 def recv_cb(pkt):
983 log.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
984 self.success = True
985 sniff(count=2, timeout=5,
986 lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
987 and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb,
988 iface = self.flows.port_map[egress])
989 for i in [0,1]:
990 if i == 1:
991 log.info('Changing cluster master to %s'%standbys[0])
992 self.change_master_current_cluster(new_master=standbys[0])
993 log.info('Verifying flow traffic after cluster master chnaged')
994 else:
995 log.info('Verifying flow traffic before cluster master changed')
996 t = threading.Thread(target = mac_recv_task)
997 t.start()
998 L2 = self.flows_eth # Ether(src = ingress_map['ether'], dst = egress_map['ether'])
999 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1)
1000 pkt = L2/L3
1001 log.info('Sending a packet to verify if flows are correct')
1002 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
1003 t.join()
1004 assert_equal(self.success, True)
1005
ChetanGaonker689b3862016-10-17 16:25:01 -07001006 #pass
1007 def test_cluster_flow_for_ipv6_extension_header_and_master_restart(self,onos_instances = ONOS_INSTANCES):
1008 status = self.verify_cluster_status(onos_instances=onos_instances)
1009 assert_equal(status, True)
1010 master,standbys = self.get_cluster_current_master_standbys()
1011 onos_names_ips = self.get_cluster_container_names_ips()
1012 master_onos_name = onos_names_ips[master]
1013 self.flows.setUpClass()
1014 egress = 1
1015 ingress = 2
1016 egress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
1017 ingress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
1018 flow = OnosFlowCtrl(deviceId = self.device_id,
1019 egressPort = egress,
1020 ingressPort = ingress,
1021 ipv6_extension = 0,
1022 controller=master
1023 )
1024
1025 result = flow.addFlow()
1026 assert_equal(result, True)
1027 ##wait for flows to be added to ONOS
1028 time.sleep(1)
1029 self.success = False
1030 def mac_recv_task():
1031 def recv_cb(pkt):
1032 log.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s'%(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
1033 self.success = True
1034 sniff(timeout=2,count=5,
1035 lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.flows.port_map[egress])
1036 for i in [0,1]:
1037 if i == 1:
1038 log.info('Restart cluster current master %s'%master)
1039 Container(master_onos_name,Onos.IMAGE).restart()
1040 time.sleep(45)
1041 log.info('Verifying flow traffic after master restart')
1042 else:
1043 log.info('Verifying flow traffic before master restart')
1044 t = threading.Thread(target = mac_recv_task)
1045 t.start()
1046 L2 = self.flows_eth
1047 L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
1048 pkt = L2/L3
1049 log.info('Sending packets to verify if flows are correct')
1050 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
1051 t.join()
1052 assert_equal(self.success, True)
1053
1054 def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
1055 dst_mac = self.igmp.iptomac(group)
1056 eth = Ether(dst= dst_mac)
1057 ip = IP(dst=group,src=source)
1058 data = repr(monotonic.monotonic())
1059 sendp(eth/ip/data,count=20, iface = intf)
1060 pkt = (eth/ip/data)
1061 log.info('multicast traffic packet %s'%pkt.show())
1062
1063 def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
1064 log.info('verifying multicast traffic for group %s from source %s'%(group,source))
1065 self.success = False
1066 def recv_task():
1067 def igmp_recv_cb(pkt):
1068 log.info('multicast data received for group %s from source %s'%(group,source))
1069 self.success = True
1070 sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
1071 t = threading.Thread(target = recv_task)
1072 t.start()
1073 self.send_multicast_data_traffic(group,source=source)
1074 t.join()
1075 return self.success
1076
1077 #pass
1078 def test_cluster_with_igmp_include_exclude_modes_and_restarting_master(self, onos_instances=ONOS_INSTANCES):
1079 status = self.verify_cluster_status(onos_instances=onos_instances)
1080 assert_equal(status, True)
1081 master, standbys = self.get_cluster_current_master_standbys()
1082 assert_equal(len(standbys), (onos_instances-1))
1083 onos_names_ips = self.get_cluster_container_names_ips()
1084 master_onos_name = onos_names_ips[master]
1085 self.igmp.setUp(controller=master)
1086 groups = ['224.2.3.4','230.5.6.7']
1087 src_list = ['2.2.2.2','3.3.3.3']
1088 self.igmp.onos_ssm_table_load(groups, src_list=src_list, controller=master)
1089 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1090 iface = self.V_INF1, delay = 2)
1091 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1092 iface = self.V_INF1, delay = 2)
1093 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1094 assert_equal(status,True)
1095 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1096 assert_equal(status,False)
1097 log.info('restarting cluster master %s'%master)
1098 Container(master_onos_name,Onos.IMAGE).restart()
1099 time.sleep(60)
1100 log.info('verifying multicast data traffic after master restart')
1101 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1102 assert_equal(status,True)
1103 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1104 assert_equal(status,False)
1105
1106 #pass
1107 def test_cluster_with_igmp_include_exclude_modes_and_making_master_down(self, onos_instances=ONOS_INSTANCES):
1108 status = self.verify_cluster_status(onos_instances=onos_instances)
1109 assert_equal(status, True)
1110 master, standbys = self.get_cluster_current_master_standbys()
1111 assert_equal(len(standbys), (onos_instances-1))
1112 onos_names_ips = self.get_cluster_container_names_ips()
1113 master_onos_name = onos_names_ips[master]
1114 self.igmp.setUp(controller=master)
1115 groups = [self.igmp.random_mcast_ip(),self.igmp.random_mcast_ip()]
1116 src_list = [self.igmp.randomsourceip()]
1117 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1118 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1119 iface = self.V_INF1, delay = 2)
1120 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1121 iface = self.V_INF1, delay = 2)
1122 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1123 assert_equal(status,True)
1124 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1125 assert_equal(status,False)
1126 log.info('Killing cluster master %s'%master)
1127 Container(master_onos_name,Onos.IMAGE).kill()
1128 time.sleep(60)
1129 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=standbys[0])
1130 assert_equal(status, True)
1131 log.info('Verifying multicast data traffic after cluster master down')
1132 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1133 assert_equal(status,True)
1134 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1135 assert_equal(status,False)
1136
1137 def test_cluster_with_igmp_include_mode_checking_traffic_recovery_time_after_master_is_down(self, onos_instances=ONOS_INSTANCES):
1138 status = self.verify_cluster_status(onos_instances=onos_instances)
1139 assert_equal(status, True)
1140 master, standbys = self.get_cluster_current_master_standbys()
1141 assert_equal(len(standbys), (onos_instances-1))
1142 onos_names_ips = self.get_cluster_container_names_ips()
1143 master_onos_name = onos_names_ips[master]
1144 self.igmp.setUp(controller=master)
1145 groups = [self.igmp.random_mcast_ip()]
1146 src_list = [self.igmp.randomsourceip()]
1147 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1148 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1149 iface = self.V_INF1, delay = 2)
1150 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1151 assert_equal(status,True)
1152 log.info('Killing clusters master %s'%master)
1153 Container(master_onos_name,Onos.IMAGE).kill()
1154 count = 0
1155 for i in range(60):
1156 log.info('Verifying multicast data traffic after cluster master down')
1157 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1158 if status:
1159 break
1160 else:
1161 count += 1
1162 time.sleep(1)
1163 assert_equal(status, True)
1164 log.info('Time taken to recover traffic after clusters master down is %d seconds'%count)
1165
1166
1167 #pass
1168 def test_cluster_state_with_igmp_leave_group_after_master_change(self, onos_instances=ONOS_INSTANCES):
1169 status = self.verify_cluster_status(onos_instances=onos_instances)
1170 assert_equal(status, True)
1171 master, standbys = self.get_cluster_current_master_standbys()
1172 assert_equal(len(standbys), (onos_instances-1))
1173 self.igmp.setUp(controller=master)
1174 groups = [self.igmp.random_mcast_ip()]
1175 src_list = [self.igmp.randomsourceip()]
1176 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1177 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1178 iface = self.V_INF1, delay = 2)
1179 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1180 assert_equal(status,True)
1181 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1182 self.change_cluster_current_master(new_master=standbys[0])
1183 log.info('Verifying multicast traffic after cluster master change')
1184 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1185 assert_equal(status,True)
1186 log.info('Sending igmp TO_EXCLUDE message to leave the group %s'%groups[0])
1187 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
1188 iface = self.V_INF1, delay = 1)
1189 time.sleep(10)
1190 status = self.verify_igmp_data_traffic(groups[0],intf = self.V_INF1,source= src_list[0])
1191 assert_equal(status,False)
1192
1193 #pass
1194 def test_cluster_state_with_igmp_join_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
1195 status = self.verify_cluster_status(onos_instances=onos_instances)
1196 assert_equal(status, True)
1197 master,standbys = self.get_cluster_current_master_standbys()
1198 assert_equal(len(standbys), (onos_instances-1))
1199 self.igmp.setUp(controller=master)
1200 groups = [self.igmp.random_mcast_ip()]
1201 src_list = [self.igmp.randomsourceip()]
1202 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1203 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1204 self.change_cluster_current_master(new_master = standbys[0])
1205 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1206 iface = self.V_INF1, delay = 2)
1207 time.sleep(1)
1208 self.change_cluster_current_master(new_master = master)
1209 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1210 assert_equal(status,True)
1211
1212 #pass
ChetanGaonker2099d722016-10-07 15:16:58 -07001213 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001214 def test_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001215 status = self.verify_cluster_status(onos_instances=onos_instances)
1216 assert_equal(status, True)
1217 master, standbys = self.get_cluster_current_master_standbys()
1218 assert_equal(len(standbys), (onos_instances-1))
1219 self.tls.setUp(controller=master)
1220 df = defer.Deferred()
1221 def eap_tls_verify(df):
1222 tls = TLSAuthTest()
1223 tls.runTest()
1224 df.callback(0)
1225 reactor.callLater(0, eap_tls_verify, df)
1226 return df
1227
1228 @deferred(120)
ChetanGaonker689b3862016-10-17 16:25:01 -07001229 def test_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001230 master, standbys = self.get_cluster_current_master_standbys()
1231 assert_equal(len(standbys), (onos_instances-1))
1232 self.tls.setUp()
1233 df = defer.Deferred()
1234 def eap_tls_verify2(df2):
1235 tls = TLSAuthTest()
1236 tls.runTest()
1237 df.callback(0)
1238 for i in [0,1]:
1239 if i == 1:
1240 log.info('Changing cluster master %s to %s'%(master, standbys[0]))
1241 self.change_master_current_cluster(new_master=standbys[0])
1242 log.info('Verifying tls authentication after cluster master changed to %s'%standbys[0])
1243 else:
1244 log.info('Verifying tls authentication before cluster master change')
1245 reactor.callLater(0, eap_tls_verify, df)
1246 return df
1247
1248 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001249 def test_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001250 status = self.verify_cluster_status(onos_instances=onos_instances)
1251 assert_equal(status, True)
1252 master, standbys = self.get_cluster_current_master_standbys()
1253 assert_equal(len(standbys), (onos_instances-1))
1254 onos_names_ips = self.get_cluster_container_names_ips()
1255 master_onos_name = onos_names_ips[master]
1256 self.tls.setUp()
1257 df = defer.Deferred()
1258 def eap_tls_verify(df):
1259 tls = TLSAuthTest()
1260 tls.runTest()
1261 df.callback(0)
1262 for i in [0,1]:
1263 if i == 1:
1264 log.info('Killing cluster current master %s'%master)
A R Karthick3b2e0372016-12-14 17:37:43 -08001265 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001266 time.sleep(20)
1267 status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True)
1268 assert_equal(status, True)
1269 log.info('Cluster came up with %d instances after killing master'%(onos_instances-1))
1270 log.info('Verifying tls authentication after killing cluster master')
1271 reactor.callLater(0, eap_tls_verify, df)
1272 return df
1273
1274 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001275 def test_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001276 status = self.verify_cluster_status(onos_instances=onos_instances)
1277 assert_equal(status, True)
1278 master, standbys = self.get_cluster_current_master_standbys()
1279 assert_equal(len(standbys), (onos_instances-1))
1280 onos_names_ips = self.get_cluster_container_names_ips()
1281 member_onos_name = onos_names_ips[standbys[0]]
1282 self.tls.setUp()
1283 df = defer.Deferred()
1284 def eap_tls_no_cert(df):
1285 def tls_no_cert_cb():
1286 log.info('TLS authentication failed with no certificate')
1287 tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
1288 tls.runTest()
1289 assert_equal(tls.failTest, True)
1290 df.callback(0)
1291 for i in [0,1]:
1292 if i == 1:
1293 log.info('Restart cluster member %s'%standbys[0])
1294 Container(member_onos_name,Onos.IMAGE).restart()
1295 time.sleep(20)
1296 status = self.verify_cluster_status(onos_instances=onos_instances)
1297 assert_equal(status, True)
1298 log.info('Cluster came up with %d instances after member restart'%(onos_instances))
1299 log.info('Verifying tls authentication after member restart')
1300 reactor.callLater(0, eap_tls_no_cert, df)
1301 return df
1302
ChetanGaonker689b3862016-10-17 16:25:01 -07001303 #pass
1304 def test_cluster_proxyarp_master_change_and_app_deactivation(self,onos_instances=ONOS_INSTANCES,hosts = 3):
1305 status = self.verify_cluster_status(onos_instances=onos_instances)
1306 assert_equal(status,True)
1307 master,standbys = self.get_cluster_current_master_standbys()
1308 assert_equal(len(standbys),(onos_instances-1))
1309 self.proxyarp.setUpClass()
1310 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1311 ingress = hosts+1
1312 for hostip, hostmac in hosts_config:
1313 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1314 time.sleep(1)
1315 log.info('changing cluster current master from %s to %s'%(master,standbys[0]))
1316 self.change_cluster_current_master(new_master=standbys[0])
1317 log.info('verifying proxyarp after master change')
1318 for hostip, hostmac in hosts_config:
1319 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1320 time.sleep(1)
1321 log.info('Deactivating proxyarp app and expecting proxyarp functionality not to work')
1322 self.proxyarp.proxyarp_activate(deactivate = True,controller=standbys[0])
1323 time.sleep(3)
1324 for hostip, hostmac in hosts_config:
1325 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
1326 time.sleep(1)
1327 log.info('activating proxyarp app and expecting to get arp reply from ONOS')
1328 self.proxyarp.proxyarp_activate(deactivate = False,controller=standbys[0])
1329 time.sleep(3)
1330 for hostip, hostmac in hosts_config:
1331 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1332 time.sleep(1)
ChetanGaonker2099d722016-10-07 15:16:58 -07001333
ChetanGaonker689b3862016-10-17 16:25:01 -07001334 #pass
1335 def test_cluster_with_proxyarp_and_one_member_down(self,hosts=3,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001336 status = self.verify_cluster_status(onos_instances=onos_instances)
1337 assert_equal(status, True)
1338 master, standbys = self.get_cluster_current_master_standbys()
ChetanGaonker689b3862016-10-17 16:25:01 -07001339 assert_equal(len(standbys), (onos_instances-1))
1340 onos_names_ips = self.get_cluster_container_names_ips()
1341 member_onos_name = onos_names_ips[standbys[1]]
1342 self.proxyarp.setUpClass()
1343 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1344 ingress = hosts+1
1345 for hostip, hostmac in hosts_config:
1346 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1347 time.sleep(1)
1348 log.info('killing cluster member %s'%standbys[1])
1349 Container(member_onos_name,Onos.IMAGE).kill()
1350 time.sleep(20)
1351 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=master,verify=True)
1352 assert_equal(status, True)
1353 log.info('cluster came up with %d instances after member down'%(onos_instances-1))
1354 log.info('verifying proxy arp functionality after cluster member down')
1355 for hostip, hostmac in hosts_config:
1356 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1357 time.sleep(1)
1358
1359 #pass
1360 def test_cluster_with_proxyarp_and_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts=10,onos_instances=ONOS_INSTANCES):
1361 status = self.verify_cluster_status(onos_instances=onos_instances)
1362 assert_equal(status, True)
1363 self.proxyarp.setUpClass()
1364 master, standbys = self.get_cluster_current_master_standbys()
1365 assert_equal(len(standbys), (onos_instances-1))
1366 ports_map, egress_map, hosts_config = self.proxyarp.proxyarp_config(hosts = hosts, controller=master)
1367 self.success = True
1368 ingress = hosts+1
1369 ports = range(ingress,ingress+10)
1370 hostmac = []
1371 hostip = []
1372 for ip,mac in hosts_config:
1373 hostmac.append(mac)
1374 hostip.append(ip)
1375 success_dir = {}
1376 def verify_proxyarp(*r):
1377 ingress, hostmac, hostip = r[0],r[1],r[2]
1378 def mac_recv_task():
1379 def recv_cb(pkt):
1380 log.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
1381 success_dir[current_thread().name] = True
1382 sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
1383 prn = recv_cb, iface = self.proxyarp.port_map[ingress])
1384 t = threading.Thread(target = mac_recv_task)
1385 t.start()
1386 pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
1387 log.info('Sending arp request for dest ip %s on interface %s' %
1388 (hostip,self.proxyarp.port_map[ingress]))
1389 sendp(pkt, count = 10,iface = self.proxyarp.port_map[ingress])
1390 t.join()
1391 t = []
1392 for i in range(10):
1393 t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
1394 for i in range(10):
1395 t[i].start()
1396 time.sleep(2)
1397 for i in range(10):
1398 t[i].join()
1399 if len(success_dir) != 10:
1400 self.success = False
1401 assert_equal(self.success, True)
1402
1403 #pass
1404 def test_cluster_with_acl_rule_before_master_change_and_remove_acl_rule_after_master_change(self,onos_instances=ONOS_INSTANCES):
1405 status = self.verify_cluster_status(onos_instances=onos_instances)
1406 assert_equal(status, True)
1407 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001408 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001409 self.acl.setUp()
1410 acl_rule = ACLTest()
1411 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1412 if status is False:
1413 log.info('JSON request returned status %d' %code)
1414 assert_equal(status, True)
1415 result = acl_rule.get_acl_rules(controller=master)
1416 aclRules1 = result.json()['aclRules']
1417 log.info('Added acl rules is %s'%aclRules1)
1418 acl_Id = map(lambda d: d['id'], aclRules1)
1419 log.info('Changing cluster current master from %s to %s'%(master,standbys[0]))
1420 self.change_cluster_current_master(new_master=standbys[0])
1421 status,code = acl_rule.remove_acl_rule(acl_Id[0],controller=standbys[0])
1422 if status is False:
1423 log.info('JSON request returned status %d' %code)
1424 assert_equal(status, True)
1425
1426 #pass
1427 def test_cluster_verifying_acl_rule_in_new_master_after_current_master_is_down(self,onos_instances=ONOS_INSTANCES):
1428 status = self.verify_cluster_status(onos_instances=onos_instances)
1429 assert_equal(status, True)
1430 master,standbys = self.get_cluster_current_master_standbys()
1431 assert_equal(len(standbys),(onos_instances-1))
1432 onos_names_ips = self.get_cluster_container_names_ips()
1433 master_onos_name = onos_names_ips[master]
1434 self.acl.setUp()
1435 acl_rule = ACLTest()
1436 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1437 if status is False:
1438 log.info('JSON request returned status %d' %code)
1439 assert_equal(status, True)
1440 result1 = acl_rule.get_acl_rules(controller=master)
1441 aclRules1 = result1.json()['aclRules']
1442 log.info('Added acl rules is %s'%aclRules1)
1443 acl_Id1 = map(lambda d: d['id'], aclRules1)
1444 log.info('Killing cluster current master %s'%master)
1445 Container(master_onos_name,Onos.IMAGE).kill()
1446 time.sleep(45)
1447 status = self.verify_cluster_status(onos_instances=onos_instances,controller=standbys[0])
1448 assert_equal(status, True)
1449 new_master,standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
1450 assert_equal(len(standbys),(onos_instances-2))
1451 assert_not_equal(new_master,master)
1452 result2 = acl_rule.get_acl_rules(controller=new_master)
1453 aclRules2 = result2.json()['aclRules']
1454 acl_Id2 = map(lambda d: d['id'], aclRules2)
1455 log.info('Acl Ids before and after master down are %s and %s'%(acl_Id1,acl_Id2))
1456 assert_equal(acl_Id2,acl_Id1)
1457
1458 #acl traffic scenario not working as acl rule is not getting added to onos
1459 def test_cluster_with_acl_traffic_before_and_after_two_members_down(self,onos_instances=ONOS_INSTANCES):
1460 status = self.verify_cluster_status(onos_instances=onos_instances)
1461 assert_equal(status, True)
1462 master,standbys = self.get_cluster_current_master_standbys()
1463 assert_equal(len(standbys),(onos_instances-1))
1464 onos_names_ips = self.get_cluster_container_names_ips()
1465 member1_onos_name = onos_names_ips[standbys[0]]
1466 member2_onos_name = onos_names_ips[standbys[1]]
1467 ingress = self.acl.ingress_iface
1468 egress = self.acl.CURRENT_PORT_NUM
1469 acl_rule = ACLTest()
1470 status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.acl.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.acl.HOST_DST_IP)
1471 self.acl.CURRENT_PORT_NUM += 1
1472 time.sleep(5)
1473 if status is False:
1474 log.info('JSON request returned status %d' %code)
1475 assert_equal(status, True)
1476 srcMac = '00:00:00:00:00:11'
1477 dstMac = host_ip_mac[0][1]
1478 self.acl.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1, egress_iface_num = egress )
1479 status, code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'deny',controller=master)
1480 time.sleep(10)
1481 if status is False:
1482 log.info('JSON request returned status %d' %code)
1483 assert_equal(status, True)
1484 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1485 log.info('killing cluster members %s and %s'%(standbys[0],standbys[1]))
1486 Container(member1_onos_name, Onos.IMAGE).kill()
1487 Container(member2_onos_name, Onos.IMAGE).kill()
1488 time.sleep(40)
1489 status = self.verify_cluster_status(onos_instances=onos_instances-2,verify=True,controller=master)
1490 assert_equal(status, True)
1491 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1492 self.acl.acl_hosts_remove(egress_iface_count = 1, egress_iface_num = egress)
1493
1494 #pass
1495 def test_cluster_with_dhcpRelay_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1496 status = self.verify_cluster_status(onos_instances=onos_instances)
1497 assert_equal(status, True)
1498 master,standbys = self.get_cluster_current_master_standbys()
1499 assert_equal(len(standbys),(onos_instances-1))
1500 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001501 mac = self.dhcprelay.get_mac(iface)
1502 self.dhcprelay.host_load(iface)
1503 ##we use the defaults for this test that serves as an example for others
1504 ##You don't need to restart dhcpd server if retaining default config
1505 config = self.dhcprelay.default_config
1506 options = self.dhcprelay.default_options
1507 subnet = self.dhcprelay.default_subnet_config
1508 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1509 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1510 config = config,
1511 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001512 subnet = subnet,
1513 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001514 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
1515 cip, sip = self.dhcprelay.send_recv(mac)
1516 log.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
1517 self.change_master_current_cluster(new_master=standbys[0])
1518 log.info('Releasing ip %s to server %s' %(cip, sip))
1519 assert_equal(self.dhcprelay.dhcp.release(cip), True)
1520 log.info('Triggering DHCP discover again after release')
1521 cip2, sip2 = self.dhcprelay.send_recv(mac)
1522 log.info('Verifying released IP was given back on rediscover')
1523 assert_equal(cip, cip2)
1524 log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
1525 assert_equal(self.dhcprelay.dhcp.release(cip2), True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001526 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001527
ChetanGaonker689b3862016-10-17 16:25:01 -07001528
1529 def test_cluster_with_dhcpRelay_and_verify_dhcp_ip_after_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1530 status = self.verify_cluster_status(onos_instances=onos_instances)
1531 assert_equal(status, True)
1532 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001533 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001534 onos_names_ips = self.get_cluster_container_names_ips()
1535 master_onos_name = onos_names_ips[master]
1536 self.dhcprelay.setUpClass(controller=master)
1537 mac = self.dhcprelay.get_mac(iface)
1538 self.dhcprelay.host_load(iface)
1539 ##we use the defaults for this test that serves as an example for others
1540 ##You don't need to restart dhcpd server if retaining default config
1541 config = self.dhcprelay.default_config
1542 options = self.dhcprelay.default_options
1543 subnet = self.dhcprelay.default_subnet_config
1544 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1545 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1546 config = config,
1547 options = options,
1548 subnet = subnet,
1549 controller=master)
1550 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1551 log.info('Initiating dhcp process from client %s'%mac)
1552 cip, sip = self.dhcprelay.send_recv(mac)
1553 log.info('Killing cluster current master %s'%master)
1554 Container(master_onos_name, Onos.IMAGE).kill()
1555 time.sleep(60)
1556 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=standbys[0])
1557 assert_equal(status, True)
1558 mac = self.dhcprelay.dhcp.get_mac(cip)[0]
1559 log.info("Verifying dhcp clients gets same IP after cluster master restarts")
1560 new_cip, new_sip = self.dhcprelay.dhcp.only_request(cip, mac)
1561 assert_equal(new_cip, cip)
1562 self.dhcprelay.tearDownClass(controller=standbys[0])
1563
1564 #pass
1565 def test_cluster_with_dhcpRelay_and_simulate_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1566 status = self.verify_cluster_status(onos_instances=onos_instances)
1567 assert_equal(status, True)
1568 master,standbys = self.get_cluster_current_master_standbys()
1569 assert_equal(len(standbys),(onos_instances-1))
1570 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001571 macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3']
1572 self.dhcprelay.host_load(iface)
1573 ##we use the defaults for this test that serves as an example for others
1574 ##You don't need to restart dhcpd server if retaining default config
1575 config = self.dhcprelay.default_config
1576 options = self.dhcprelay.default_options
1577 subnet = self.dhcprelay.default_subnet_config
1578 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1579 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1580 config = config,
1581 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001582 subnet = subnet,
1583 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001584 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1585 cip1, sip1 = self.dhcprelay.send_recv(macs[0])
1586 assert_not_equal(cip1,None)
1587 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip1,macs[0],master))
1588 log.info('Changing cluster master from %s to %s'%(master, standbys[0]))
1589 self.change_master_current_cluster(new_master=standbys[0])
1590 cip2, sip2 = self.dhcprelay.send_recv(macs[1])
1591 assert_not_equal(cip2,None)
1592 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[1],standbys[0]))
1593 self.change_master_current_cluster(new_master=master)
1594 log.info('Changing cluster master from %s to %s'%(standbys[0],master))
1595 cip3, sip3 = self.dhcprelay.send_recv(macs[2])
1596 assert_not_equal(cip3,None)
1597 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master))
ChetanGaonker689b3862016-10-17 16:25:01 -07001598 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001599
ChetanGaonker689b3862016-10-17 16:25:01 -07001600 def test_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001601 status = self.verify_cluster_status(onos_instances=onos_instances)
1602 assert_equal(status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001603 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001604 self.subscriber.num_subscribers = 5
1605 self.subscriber.num_channels = 10
1606 for i in [0,1]:
1607 if i == 1:
1608 cord_test_onos_restart()
1609 time.sleep(45)
1610 status = self.verify_cluster_status(onos_instances=onos_instances)
1611 assert_equal(status, True)
1612 log.info('Verifying cord subscriber functionality after cluster restart')
1613 else:
1614 log.info('Verifying cord subscriber functionality before cluster restart')
1615 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1616 num_channels = self.subscriber.num_channels,
1617 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1618 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1619 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1620 self.subscriber.num_channels))
1621 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001622 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001623
ChetanGaonker689b3862016-10-17 16:25:01 -07001624 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1625 def test_cluster_with_cord_subscriber_join_next_channel_before_and_after_cluster_mastership_is_withdrawn(self,onos_instances=ONOS_INSTANCES):
1626 status = self.verify_cluster_status(onos_instances=onos_instances)
1627 assert_equal(status, True)
1628 master,standbys = self.get_cluster_current_master_standbys()
1629 assert_equal(len(standbys),(onos_instances-1))
1630 self.subscriber.setUpClass(controller=master)
1631 self.subscriber.num_subscribers = 5
1632 self.subscriber.num_channels = 10
1633 for i in [0,1]:
1634 if i == 1:
1635 status=self.withdraw_cluster_current_mastership(master_ip=master)
1636 asser_equal(status, True)
1637 master,standbys = self.get_cluster_current_master_standbys()
1638 log.info('verifying cord subscriber functionality after cluster current master withdraw mastership')
1639 else:
1640 log.info('verifying cord subscriber functionality before cluster master withdraw mastership')
1641 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1642 num_channels = self.subscriber.num_channels,
1643 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1644 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1645 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1646 self.subscriber.num_channels),controller=master)
1647 assert_equal(test_status, True)
1648 self.subscriber.tearDownClass(controller=master)
1649
1650 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1651 def test_cluster_with_cord_subscriber_join_recv_traffic_from_10channels_and_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001652 status = self.verify_cluster_status(onos_instances=onos_instances)
1653 assert_equal(status, True)
1654 master, standbys = self.get_cluster_current_master_standbys()
1655 assert_equal(len(standbys),(onos_instances-1))
1656 onos_names_ips = self.get_cluster_container_names_ips()
1657 member_onos_name = onos_names_ips[standbys[0]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001658 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001659 num_subscribers = 1
1660 num_channels = 10
1661 for i in [0,1]:
1662 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -08001663 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001664 time.sleep(30)
ChetanGaonker689b3862016-10-17 16:25:01 -07001665 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001666 assert_equal(status, True)
1667 log.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0])
1668 else:
1669 log.info('Verifying cord subscriber functionality before cluster member %s is down'%standbys[0])
1670 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1671 num_channels = num_channels,
1672 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify,
1673 self.subscriber.igmp_verify, self.subscriber.traffic_verify),
1674 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
ChetanGaonker689b3862016-10-17 16:25:01 -07001675 negative_subscriber_auth = 'all',controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001676 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001677 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001678
ChetanGaonker689b3862016-10-17 16:25:01 -07001679 def test_cluster_with_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001680 status = self.verify_cluster_status(onos_instances=onos_instances)
1681 assert_equal(status, True)
1682 master, standbys = self.get_cluster_current_master_standbys()
1683 assert_equal(len(standbys),(onos_instances-1))
1684 onos_names_ips = self.get_cluster_container_names_ips()
1685 member1_onos_name = onos_names_ips[standbys[0]]
1686 member2_onos_name = onos_names_ips[standbys[1]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001687 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001688 num_subscribers = 1
1689 num_channels = 10
1690 for i in [0,1]:
1691 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -08001692 cord_test_onos_shutdown(node = standbys[0])
1693 cord_test_onos_shutdown(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -07001694 time.sleep(60)
1695 status = self.verify_cluster_status(onos_instances=onos_instances-2)
1696 assert_equal(status, True)
1697 log.info('Verifying cord subscriber funtionality after cluster two members %s and %s down'%(standbys[0],standbys[1]))
1698 else:
1699 log.info('Verifying cord subscriber funtionality before cluster two members %s and %s down'%(standbys[0],standbys[1]))
1700 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1701 num_channels = num_channels,
1702 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1703 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1704 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
1705 negative_subscriber_auth = 'all')
1706 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001707 self.subscriber.tearDownClass(controller=master)
1708
1709 #pass
1710 def test_cluster_with_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1711 status = self.verify_cluster_status(onos_instances=onos_instances)
1712 assert_equal(status, True)
1713 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1714 for device in device_dict.keys():
1715 log.info("Device is %s"%device_dict[device])
1716 assert_not_equal(device_dict[device]['master'],'none')
1717 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1718 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1719
1720 #pass
1721 def test_cluster_state_in_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1722 status = self.verify_cluster_status(onos_instances=onos_instances)
1723 assert_equal(status, True)
1724 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1725 cluster_ips = self.get_cluster_current_member_ips()
1726 for ip in cluster_ips:
1727 device_dict= self.get_cluster_current_master_standbys_of_connected_devices(controller = ip)
1728 assert_equal(len(device_dict.keys()),onos_instances)
1729 for device in device_dict.keys():
1730 log.info("Device is %s"%device_dict[device])
1731 assert_not_equal(device_dict[device]['master'],'none')
1732 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1733 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1734
1735 #pass
1736 def test_cluster_verifying_multiple_ovs_switches_after_master_is_restarted(self,onos_instances = ONOS_INSTANCES):
1737 status = self.verify_cluster_status(onos_instances=onos_instances)
1738 assert_equal(status, True)
1739 onos_names_ips = self.get_cluster_container_names_ips()
1740 master_count = self.get_number_of_devices_of_master()
1741 log.info('Master count information is %s'%master_count)
1742 total_devices = 0
1743 for master in master_count.keys():
1744 total_devices += master_count[master]['size']
1745 if master_count[master]['size'] != 0:
1746 restart_ip = master
1747 assert_equal(total_devices,onos_instances)
1748 member_onos_name = onos_names_ips[restart_ip]
1749 log.info('Restarting cluster member %s having ip %s'%(member_onos_name,restart_ip))
1750 Container(member_onos_name, Onos.IMAGE).restart()
1751 time.sleep(40)
1752 master_count = self.get_number_of_devices_of_master()
1753 log.info('Master count information after restart is %s'%master_count)
1754 total_devices = 0
1755 for master in master_count.keys():
1756 total_devices += master_count[master]['size']
1757 if master == restart_ip:
1758 assert_equal(master_count[master]['size'], 0)
1759 assert_equal(total_devices,onos_instances)
1760
1761 #pass
1762 def test_cluster_verifying_multiple_ovs_switches_with_one_master_down(self,onos_instances = ONOS_INSTANCES):
1763 status = self.verify_cluster_status(onos_instances=onos_instances)
1764 assert_equal(status, True)
1765 onos_names_ips = self.get_cluster_container_names_ips()
1766 master_count = self.get_number_of_devices_of_master()
1767 log.info('Master count information is %s'%master_count)
1768 total_devices = 0
1769 for master in master_count.keys():
1770 total_devices += master_count[master]['size']
1771 if master_count[master]['size'] != 0:
1772 restart_ip = master
1773 assert_equal(total_devices,onos_instances)
1774 master_onos_name = onos_names_ips[restart_ip]
1775 log.info('Shutting down cluster member %s having ip %s'%(master_onos_name,restart_ip))
1776 Container(master_onos_name, Onos.IMAGE).kill()
1777 time.sleep(40)
1778 for ip in onos_names_ips.keys():
1779 if ip != restart_ip:
1780 controller_ip = ip
1781 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=controller_ip)
1782 assert_equal(status, True)
1783 master_count = self.get_number_of_devices_of_master(controller=controller_ip)
1784 log.info('Master count information after restart is %s'%master_count)
1785 total_devices = 0
1786 for master in master_count.keys():
1787 total_devices += master_count[master]['size']
1788 if master == restart_ip:
1789 assert_equal(master_count[master]['size'], 0)
1790 assert_equal(total_devices,onos_instances)
1791
1792 #pass
1793 def test_cluster_verifying_multiple_ovs_switches_with_current_master_withdrawing_mastership(self,onos_instances = ONOS_INSTANCES):
1794 status = self.verify_cluster_status(onos_instances=onos_instances)
1795 assert_equal(status, True)
1796 master_count = self.get_number_of_devices_of_master()
1797 log.info('Master count information is %s'%master_count)
1798 total_devices = 0
1799 for master in master_count.keys():
1800 total_devices += int(master_count[master]['size'])
1801 if master_count[master]['size'] != 0:
1802 master_ip = master
1803 log.info('Devices of master %s are %s'%(master_count[master]['devices'],master))
1804 device_id = str(master_count[master]['devices'][0])
1805 device_count = master_count[master]['size']
1806 assert_equal(total_devices,onos_instances)
1807 log.info('Withdrawing mastership of device %s for controller %s'%(device_id,master_ip))
1808 status=self.withdraw_cluster_current_mastership(master_ip=master_ip,device_id = device_id)
1809 assert_equal(status, True)
1810 master_count = self.get_number_of_devices_of_master()
1811 log.info('Master count information after cluster mastership withdraw is %s'%master_count)
1812 total_devices = 0
1813 for master in master_count.keys():
1814 total_devices += int(master_count[master]['size'])
1815 if master == master_ip:
1816 assert_equal(master_count[master]['size'], device_count-1)
1817 assert_equal(total_devices,onos_instances)
1818
1819 #pass
1820 def test_cluster_verifying_multiple_ovs_switches_and_restarting_cluster(self,onos_instances = ONOS_INSTANCES):
1821 status = self.verify_cluster_status(onos_instances=onos_instances)
1822 assert_equal(status, True)
1823 master_count = self.get_number_of_devices_of_master()
1824 log.info('Master count information is %s'%master_count)
1825 total_devices = 0
1826 for master in master_count.keys():
1827 total_devices += master_count[master]['size']
1828 assert_equal(total_devices,onos_instances)
1829 log.info('Restarting cluster')
1830 cord_test_onos_restart()
1831 time.sleep(60)
1832 master_count = self.get_number_of_devices_of_master()
1833 log.info('Master count information after restart is %s'%master_count)
1834 total_devices = 0
1835 for master in master_count.keys():
1836 total_devices += master_count[master]['size']
1837 assert_equal(total_devices,onos_instances)