blob: 926b4f216b6901a81ea8075a6685afd37c7e0484 [file] [log] [blame]
ChetanGaonker2099d722016-10-07 15:16:58 -07001#copyright 2016-present Ciena Corporation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14#
15import unittest
16from nose.tools import *
17from scapy.all import *
18from OnosCtrl import OnosCtrl, get_mac
19from OltConfig import OltConfig
20from socket import socket
21from OnosFlowCtrl import OnosFlowCtrl
22from nose.twistedtools import reactor, deferred
23from twisted.internet import defer
24from onosclidriver import OnosCliDriver
25from CordContainer import Container, Onos, Quagga
A.R Karthick2560f042016-11-30 14:38:52 -080026from CordTestServer import cord_test_onos_restart, cord_test_onos_shutdown, cord_test_onos_add_cluster, cord_test_quagga_restart, cord_test_restart_cluster
ChetanGaonker2099d722016-10-07 15:16:58 -070027from portmaps import g_subscriber_port_map
28from scapy.all import *
29import time, monotonic
30import threading
31from threading import current_thread
32from Cluster import *
33from EapTLS import TLSAuthTest
34from ACL import ACLTest
A R Karthick1f908202016-11-16 17:32:20 -080035from OnosLog import OnosLog
36from CordLogger import CordLogger
ChetanGaonker2099d722016-10-07 15:16:58 -070037import os
38import json
39import random
40import collections
41log.setLevel('INFO')
42
A R Karthick1f908202016-11-16 17:32:20 -080043class cluster_exchange(CordLogger):
ChetanGaonker2099d722016-10-07 15:16:58 -070044 test_path = os.path.dirname(os.path.realpath(__file__))
45 onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
46 mac = RandMAC()._fix()
47 flows_eth = Ether(src = RandMAC()._fix(), dst = RandMAC()._fix())
48 igmp_eth = Ether(dst = '01:00:5e:00:00:16', type = ETH_P_IP)
49 igmp_ip = IP(dst = '224.0.0.22')
50 ONOS_INSTANCES = 3
51 V_INF1 = 'veth0'
52 TLS_TIMEOUT = 100
53 device_id = 'of:' + get_mac()
54 igmp = cluster_igmp()
55 igmp_groups = igmp.mcast_ip_range(start_ip = '224.1.8.10',end_ip = '224.1.10.49')
56 igmp_sources = igmp.source_ip_range(start_ip = '38.24.29.35',end_ip='38.24.35.56')
57 tls = cluster_tls()
58 flows = cluster_flows()
59 proxyarp = cluster_proxyarp()
60 vrouter = cluster_vrouter()
61 acl = cluster_acl()
62 dhcprelay = cluster_dhcprelay()
63 subscriber = cluster_subscriber()
A R Karthick3b2e0372016-12-14 17:37:43 -080064 testcaseLoggers = ('test_cluster_controller_restarts', 'test_cluster_graceful_controller_restarts',
65 'test_cluster_single_controller_restarts', 'test_cluster_restarts')
A R Karthick1f908202016-11-16 17:32:20 -080066
67 def setUp(self):
68 if self._testMethodName not in self.testcaseLoggers:
69 super(cluster_exchange, self).setUp()
70
71 def tearDown(self):
72 if self._testMethodName not in self.testcaseLoggers:
73 super(cluster_exchange, self).tearDown()
ChetanGaonker2099d722016-10-07 15:16:58 -070074
75 def get_controller(self):
76 controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
77 controller = controller.split(',')[0]
78 return controller
79
A R Karthick1f908202016-11-16 17:32:20 -080080 @classmethod
81 def get_controllers(cls):
82 controllers = os.getenv('ONOS_CONTROLLER_IP') or ''
83 return controllers.split(',')
84
A R Karthick6cc8b812016-12-09 10:24:40 -080085 def cliEnter(self, controller = None):
ChetanGaonker2099d722016-10-07 15:16:58 -070086 retries = 0
A R Karthick6cc8b812016-12-09 10:24:40 -080087 while retries < 30:
88 self.cli = OnosCliDriver(controller = controller, connect = True)
ChetanGaonker2099d722016-10-07 15:16:58 -070089 if self.cli.handle:
90 break
91 else:
92 retries += 1
93 time.sleep(2)
94
95 def cliExit(self):
96 self.cli.disconnect()
97
A R Karthick1f908202016-11-16 17:32:20 -080098 def get_leader(self, controller = None):
99 self.cliEnter(controller = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800100 try:
101 result = json.loads(self.cli.leaders(jsonFormat = True))
102 except:
103 result = None
104
A R Karthick1f908202016-11-16 17:32:20 -0800105 if result is None:
106 log.info('Leaders command failure for controller %s' %controller)
107 else:
108 log.info('Leaders returned: %s' %result)
109 self.cliExit()
110 return result
111
A R Karthick3b2e0372016-12-14 17:37:43 -0800112 def onos_shutdown(self, controller = None):
113 status = True
114 self.cliEnter(controller = controller)
115 try:
116 self.cli.shutdown(timeout = 10)
117 except:
118 log.info('Graceful shutdown of ONOS failed for controller: %s' %controller)
119 status = False
120
121 self.cliExit()
122 return status
123
A R Karthicke14fc022016-12-08 14:50:29 -0800124 def log_set(self, level = None, app = 'org.onosproject', controllers = None):
125 CordLogger.logSet(level = level, app = app, controllers = controllers, forced = True)
A R Karthickef1232d2016-12-07 09:18:15 -0800126
A R Karthick1f908202016-11-16 17:32:20 -0800127 def get_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800128 result_map = {}
129 if controller is None:
130 controller = self.get_controller()
A R Karthick1f908202016-11-16 17:32:20 -0800131 if type(controller) in [ list, tuple ]:
132 for c in controller:
133 leaders = self.get_leader(controller = c)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800134 result_map[c] = leaders
A R Karthick1f908202016-11-16 17:32:20 -0800135 else:
136 leaders = self.get_leader(controller = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800137 result_map[controller] = leaders
138 return result_map
A R Karthick1f908202016-11-16 17:32:20 -0800139
A R Karthickec2db322016-11-17 15:06:01 -0800140 def verify_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800141 leaders_map = self.get_leaders(controller = controller)
142 failed = [ k for k,v in leaders_map.items() if v == None ]
A R Karthickec2db322016-11-17 15:06:01 -0800143 return failed
144
ChetanGaonker2099d722016-10-07 15:16:58 -0700145 def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
146 tries = 0
147 try:
148 self.cliEnter(controller = controller)
149 while tries <= 10:
150 cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
151 if cluster_summary:
152 log.info("cluster 'summary' command output is %s"%cluster_summary)
153 nodes = cluster_summary['nodes']
154 if verify:
155 if nodes == onos_instances:
156 self.cliExit()
157 return True
158 else:
159 tries += 1
160 time.sleep(1)
161 else:
162 if nodes >= onos_instances:
163 self.cliExit()
164 return True
165 else:
166 tries += 1
167 time.sleep(1)
168 else:
169 tries += 1
170 time.sleep(1)
171 self.cliExit()
172 return False
173 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700174 raise Exception('Failed to get cluster members')
175 return False
ChetanGaonker2099d722016-10-07 15:16:58 -0700176
A.R Karthick45ab3e12016-11-30 11:25:51 -0800177 def get_cluster_current_member_ips(self, controller = None, nodes_filter = None):
ChetanGaonker2099d722016-10-07 15:16:58 -0700178 tries = 0
179 cluster_ips = []
180 try:
181 self.cliEnter(controller = controller)
182 while tries <= 10:
183 cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
184 if cluster_nodes:
185 log.info("cluster 'nodes' output is %s"%cluster_nodes)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800186 if nodes_filter:
187 cluster_nodes = nodes_filter(cluster_nodes)
ChetanGaonker2099d722016-10-07 15:16:58 -0700188 cluster_ips = map(lambda c: c['id'], cluster_nodes)
189 self.cliExit()
190 cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
191 return cluster_ips
192 else:
193 tries += 1
194 self.cliExit()
195 return cluster_ips
196 except:
197 raise Exception('Failed to get cluster members')
198 return cluster_ips
199
ChetanGaonker689b3862016-10-17 16:25:01 -0700200 def get_cluster_container_names_ips(self,controller=None):
A R Karthick1f908202016-11-16 17:32:20 -0800201 onos_names_ips = {}
A R Karthick0f3f25b2016-12-15 09:50:57 -0800202 controllers = self.get_controllers()
203 i = 0
204 for controller in controllers:
205 if i == 0:
206 name = Onos.NAME
207 else:
208 name = '{}-{}'.format(Onos.NAME, i+1)
209 onos_names_ips[controller] = name
210 onos_names_ips[name] = controller
211 i += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700212 return onos_names_ips
A R Karthick0f3f25b2016-12-15 09:50:57 -0800213 # onos_ips = self.get_cluster_current_member_ips(controller=controller)
214 # onos_names_ips[onos_ips[0]] = Onos.NAME
215 # onos_names_ips[Onos.NAME] = onos_ips[0]
216 # for i in range(1,len(onos_ips)):
217 # name = '{0}-{1}'.format(Onos.NAME,i+1)
218 # onos_names_ips[onos_ips[i]] = name
219 # onos_names_ips[name] = onos_ips[i]
220
221 # return onos_names_ips
ChetanGaonker2099d722016-10-07 15:16:58 -0700222
223 #identifying current master of a connected device, not tested
224 def get_cluster_current_master_standbys(self,controller=None,device_id=device_id):
225 master = None
226 standbys = []
227 tries = 0
228 try:
229 cli = self.cliEnter(controller = controller)
230 while tries <= 10:
231 roles = json.loads(self.cli.roles(jsonFormat = True))
232 log.info("cluster 'roles' command output is %s"%roles)
233 if roles:
234 for device in roles:
235 log.info('Verifying device info in line %s'%device)
236 if device['id'] == device_id:
237 master = str(device['master'])
238 standbys = map(lambda d: str(d), device['standbys'])
239 log.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
240 self.cliExit()
241 return master, standbys
242 self.cliExit()
243 return master, standbys
244 else:
245 tries += 1
ChetanGaonker689b3862016-10-17 16:25:01 -0700246 time.sleep(1)
247 self.cliExit()
248 return master,standbys
249 except:
250 raise Exception('Failed to get cluster members')
251 return master,standbys
252
253 def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
254 ''' returns master and standbys of all the connected devices to ONOS cluster instance'''
255 device_dict = {}
256 tries = 0
257 try:
258 cli = self.cliEnter(controller = controller)
259 while tries <= 10:
260 device_dict = {}
261 roles = json.loads(self.cli.roles(jsonFormat = True))
262 log.info("cluster 'roles' command output is %s"%roles)
263 if roles:
264 for device in roles:
265 device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
266 for i in range(len(device_dict[device['id']]['standbys'])):
267 device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
268 log.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
269 self.cliExit()
270 return device_dict
271 else:
272 tries += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700273 time.sleep(1)
274 self.cliExit()
ChetanGaonker689b3862016-10-17 16:25:01 -0700275 return device_dict
276 except:
277 raise Exception('Failed to get cluster members')
278 return device_dict
279
280 #identify current master of a connected device, not tested
281 def get_cluster_connected_devices(self,controller=None):
282 '''returns all the devices connected to ONOS cluster'''
283 device_list = []
284 tries = 0
285 try:
286 cli = self.cliEnter(controller = controller)
287 while tries <= 10:
288 device_list = []
289 devices = json.loads(self.cli.devices(jsonFormat = True))
290 log.info("cluster 'devices' command output is %s"%devices)
291 if devices:
292 for device in devices:
293 log.info('device id is %s'%device['id'])
294 device_list.append(str(device['id']))
295 self.cliExit()
296 return device_list
297 else:
298 tries += 1
299 time.sleep(1)
300 self.cliExit()
301 return device_list
302 except:
303 raise Exception('Failed to get cluster members')
304 return device_list
305
306 def get_number_of_devices_of_master(self,controller=None):
307 '''returns master-device pairs, which master having what devices'''
308 master_count = {}
309 try:
310 cli = self.cliEnter(controller = controller)
311 masters = json.loads(self.cli.masters(jsonFormat = True))
312 if masters:
313 for master in masters:
314 master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
315 return master_count
316 else:
317 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700318 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700319 raise Exception('Failed to get cluster members')
320 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700321
322 def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None):
323 if new_master is None: return False
ChetanGaonker689b3862016-10-17 16:25:01 -0700324 self.cliEnter(controller=controller)
ChetanGaonker2099d722016-10-07 15:16:58 -0700325 cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
326 command = self.cli.command(cmd = cmd, jsonFormat = False)
327 self.cliExit()
328 time.sleep(60)
329 master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
330 assert_equal(master,new_master)
331 log.info('Cluster master changed to %s successfully'%new_master)
332
ChetanGaonker689b3862016-10-17 16:25:01 -0700333 def withdraw_cluster_current_mastership(self,master_ip=None,device_id=device_id,controller=None):
334 '''current master looses its mastership and hence new master will be elected'''
335 self.cliEnter(controller=controller)
336 cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
337 command = self.cli.command(cmd = cmd, jsonFormat = False)
338 self.cliExit()
339 time.sleep(60)
340 new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
341 assert_not_equal(new_master_ip,master_ip)
342 log.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
343 log.info('Cluster new master is %s'%new_master_ip)
344 return True
345
A R Karthick3b2e0372016-12-14 17:37:43 -0800346 def cluster_controller_restarts(self, graceful = False):
A R Karthick1f908202016-11-16 17:32:20 -0800347 controllers = self.get_controllers()
348 ctlr_len = len(controllers)
349 if ctlr_len <= 1:
350 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
351 assert_greater(ctlr_len, 1)
352
353 #this call would verify the cluster for once
354 onos_map = self.get_cluster_container_names_ips()
355
A R Karthick2a70a2f2016-12-16 14:40:16 -0800356 def check_exception(iteration, controller = None):
A R Karthick1f908202016-11-16 17:32:20 -0800357 adjacent_controller = None
358 adjacent_controllers = None
359 if controller:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800360 adjacent_controllers = list(set(controllers) - set([controller]))
361 adjacent_controller = adjacent_controllers[0]
A R Karthick1f908202016-11-16 17:32:20 -0800362 for node in controllers:
363 onosLog = OnosLog(host = node)
364 ##check the logs for storage exception
365 _, output = onosLog.get_log(('ERROR', 'Exception',))
A R Karthickec2db322016-11-17 15:06:01 -0800366 if output and output.find('StorageException$Timeout') >= 0:
367 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
368 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
369 log.info('\n' + '-' * 50 + '\n')
A R Karthick1f908202016-11-16 17:32:20 -0800370 log.info('%s' %output)
A R Karthickec2db322016-11-17 15:06:01 -0800371 log.info('\n' + '-' * 50 + '\n')
372 failed = self.verify_leaders(controllers)
373 if failed:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800374 log.info('Leaders command failed on nodes: %s' %failed)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800375 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800376 CordLogger.archive_results(self._testMethodName,
377 controllers = controllers,
378 iteration = 'FAILED')
A R Karthickec2db322016-11-17 15:06:01 -0800379 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800380 return controller
381
382 try:
A R Karthickec2db322016-11-17 15:06:01 -0800383 ips = self.get_cluster_current_member_ips(controller = adjacent_controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800384 log.info('ONOS cluster formed with controllers: %s' %ips)
A R Karthick1f908202016-11-16 17:32:20 -0800385 st = True
386 except:
387 st = False
388
A R Karthickec2db322016-11-17 15:06:01 -0800389 failed = self.verify_leaders(controllers)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800390 if failed:
391 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick1f908202016-11-16 17:32:20 -0800392 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800393 if st is False:
394 log.info('No storage exception and ONOS cluster was not formed successfully')
395 else:
396 controller = None
397
398 return controller
399
400 next_controller = None
401 tries = 10
402 for num in range(tries):
403 index = num % ctlr_len
404 #index = random.randrange(0, ctlr_len)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800405 controller_name = onos_map[controllers[index]] if next_controller is None else onos_map[next_controller]
406 controller = onos_map[controller_name]
407 log.info('ITERATION: %d. Restarting Controller %s' %(num + 1, controller_name))
A R Karthick1f908202016-11-16 17:32:20 -0800408 try:
A R Karthickef1232d2016-12-07 09:18:15 -0800409 #enable debug log for the other controllers before restarting this controller
A R Karthicke14fc022016-12-08 14:50:29 -0800410 adjacent_controllers = list( set(controllers) - set([controller]) )
411 self.log_set(controllers = adjacent_controllers)
412 self.log_set(app = 'io.atomix', controllers = adjacent_controllers)
A R Karthick3b2e0372016-12-14 17:37:43 -0800413 if graceful is True:
A R Karthick0f3f25b2016-12-15 09:50:57 -0800414 log.info('Gracefully shutting down controller: %s' %controller)
A R Karthick3b2e0372016-12-14 17:37:43 -0800415 self.onos_shutdown(controller)
416 cord_test_onos_restart(node = controller, timeout = 0)
A R Karthicke14fc022016-12-08 14:50:29 -0800417 self.log_set(controllers = controller)
418 self.log_set(app = 'io.atomix', controllers = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800419 time.sleep(60)
A R Karthick1f908202016-11-16 17:32:20 -0800420 except:
421 time.sleep(5)
422 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800423
424 #first archive the test case logs for this run
A R Karthick3b2e0372016-12-14 17:37:43 -0800425 CordLogger.archive_results(self._testMethodName,
A R Karthicke8935c62016-12-08 18:17:17 -0800426 controllers = controllers,
427 iteration = 'iteration_{}'.format(num+1))
A R Karthick2a70a2f2016-12-16 14:40:16 -0800428 next_controller = check_exception(num, controller = controller)
A R Karthick1f908202016-11-16 17:32:20 -0800429
A R Karthick3b2e0372016-12-14 17:37:43 -0800430 def test_cluster_controller_restarts(self):
431 '''Test the cluster by repeatedly killing the controllers'''
432 self.cluster_controller_restarts()
433
434 def test_cluster_graceful_controller_restarts(self):
435 '''Test the cluster by repeatedly restarting the controllers gracefully'''
436 self.cluster_controller_restarts(graceful = True)
437
A.R Karthick45ab3e12016-11-30 11:25:51 -0800438 def test_cluster_single_controller_restarts(self):
439 '''Test the cluster by repeatedly restarting the same controller'''
440 controllers = self.get_controllers()
441 ctlr_len = len(controllers)
442 if ctlr_len <= 1:
443 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
444 assert_greater(ctlr_len, 1)
445
446 #this call would verify the cluster for once
447 onos_map = self.get_cluster_container_names_ips()
448
A R Karthick2a70a2f2016-12-16 14:40:16 -0800449 def check_exception(iteration, controller, inclusive = False):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800450 adjacent_controllers = list(set(controllers) - set([controller]))
451 adjacent_controller = adjacent_controllers[0]
452 controller_list = adjacent_controllers if inclusive == False else controllers
453 storage_exceptions = []
454 for node in controller_list:
455 onosLog = OnosLog(host = node)
456 ##check the logs for storage exception
457 _, output = onosLog.get_log(('ERROR', 'Exception',))
458 if output and output.find('StorageException$Timeout') >= 0:
459 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
460 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
461 log.info('\n' + '-' * 50 + '\n')
462 log.info('%s' %output)
463 log.info('\n' + '-' * 50 + '\n')
464 storage_exceptions.append(node)
465
466 failed = self.verify_leaders(controller_list)
467 if failed:
468 log.info('Leaders command failed on nodes: %s' %failed)
469 if storage_exceptions:
470 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800471 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800472 CordLogger.archive_results('test_cluster_single_controller_restarts',
473 controllers = controllers,
474 iteration = 'FAILED')
A.R Karthick45ab3e12016-11-30 11:25:51 -0800475 assert_equal(len(failed), 0)
476 return controller
477
478 for ctlr in controller_list:
479 ips = self.get_cluster_current_member_ips(controller = ctlr,
480 nodes_filter = \
481 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
482 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
483 if controller in ips and inclusive is False:
484 log.info('Controller %s still ACTIVE on Node %s after it was shutdown' %(controller, ctlr))
485 if controller not in ips and inclusive is True:
A R Karthick6cc8b812016-12-09 10:24:40 -0800486 log.info('Controller %s still INACTIVE on Node %s after it was restarted' %(controller, ctlr))
A.R Karthick45ab3e12016-11-30 11:25:51 -0800487
488 return controller
489
490 tries = 10
491 #chose a random controller for shutdown/restarts
492 controller = controllers[random.randrange(0, ctlr_len)]
493 controller_name = onos_map[controller]
A R Karthick6cc8b812016-12-09 10:24:40 -0800494 ##enable the log level for the controllers
495 self.log_set(controllers = controllers)
496 self.log_set(app = 'io.atomix', controllers = controllers)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800497 for num in range(tries):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800498 log.info('ITERATION: %d. Shutting down Controller %s' %(num + 1, controller_name))
499 try:
A R Karthick3b2e0372016-12-14 17:37:43 -0800500 cord_test_onos_shutdown(node = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800501 time.sleep(20)
502 except:
503 time.sleep(5)
504 continue
505 #check for exceptions on the adjacent nodes
A R Karthick2a70a2f2016-12-16 14:40:16 -0800506 check_exception(num, controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800507 #Now restart the controller back
508 log.info('Restarting back the controller %s' %controller_name)
A R Karthick3b2e0372016-12-14 17:37:43 -0800509 cord_test_onos_restart(node = controller)
A R Karthick6cc8b812016-12-09 10:24:40 -0800510 self.log_set(controllers = controller)
511 self.log_set(app = 'io.atomix', controllers = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800512 time.sleep(60)
A R Karthicke8935c62016-12-08 18:17:17 -0800513 #archive the logs for this run
514 CordLogger.archive_results('test_cluster_single_controller_restarts',
515 controllers = controllers,
516 iteration = 'iteration_{}'.format(num+1))
A R Karthick2a70a2f2016-12-16 14:40:16 -0800517 check_exception(num, controller, inclusive = True)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800518
A.R Karthick2560f042016-11-30 14:38:52 -0800519 def test_cluster_restarts(self):
520 '''Test the cluster by repeatedly restarting the entire cluster'''
521 controllers = self.get_controllers()
522 ctlr_len = len(controllers)
523 if ctlr_len <= 1:
524 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
525 assert_greater(ctlr_len, 1)
526
527 #this call would verify the cluster for once
528 onos_map = self.get_cluster_container_names_ips()
529
A R Karthick2a70a2f2016-12-16 14:40:16 -0800530 def check_exception(iteration):
A.R Karthick2560f042016-11-30 14:38:52 -0800531 controller_list = controllers
532 storage_exceptions = []
533 for node in controller_list:
534 onosLog = OnosLog(host = node)
535 ##check the logs for storage exception
536 _, output = onosLog.get_log(('ERROR', 'Exception',))
537 if output and output.find('StorageException$Timeout') >= 0:
538 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
539 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
540 log.info('\n' + '-' * 50 + '\n')
541 log.info('%s' %output)
542 log.info('\n' + '-' * 50 + '\n')
543 storage_exceptions.append(node)
544
545 failed = self.verify_leaders(controller_list)
546 if failed:
547 log.info('Leaders command failed on nodes: %s' %failed)
548 if storage_exceptions:
549 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800550 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800551 CordLogger.archive_results('test_cluster_restarts',
552 controllers = controllers,
553 iteration = 'FAILED')
A.R Karthick2560f042016-11-30 14:38:52 -0800554 assert_equal(len(failed), 0)
555 return
556
557 for ctlr in controller_list:
558 ips = self.get_cluster_current_member_ips(controller = ctlr,
559 nodes_filter = \
560 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
561 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
A R Karthick2a70a2f2016-12-16 14:40:16 -0800562 if len(ips) != len(controllers):
563 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800564 CordLogger.archive_results('test_cluster_restarts',
565 controllers = controllers,
566 iteration = 'FAILED')
A.R Karthick2560f042016-11-30 14:38:52 -0800567 assert_equal(len(ips), len(controllers))
568
569 tries = 10
570 for num in range(tries):
571 log.info('ITERATION: %d. Restarting cluster with controllers at %s' %(num+1, controllers))
572 try:
573 cord_test_restart_cluster()
A R Karthick6cc8b812016-12-09 10:24:40 -0800574 self.log_set(controllers = controllers)
575 self.log_set(app = 'io.atomix', controllers = controllers)
A.R Karthick2560f042016-11-30 14:38:52 -0800576 log.info('Delaying before verifying cluster status')
577 time.sleep(60)
578 except:
579 time.sleep(10)
580 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800581
582 #archive the logs for this run before verification
583 CordLogger.archive_results('test_cluster_restarts',
584 controllers = controllers,
585 iteration = 'iteration_{}'.format(num+1))
A.R Karthick2560f042016-11-30 14:38:52 -0800586 #check for exceptions on the adjacent nodes
A R Karthick2a70a2f2016-12-16 14:40:16 -0800587 check_exception(num)
A.R Karthick2560f042016-11-30 14:38:52 -0800588
ChetanGaonker2099d722016-10-07 15:16:58 -0700589 #pass
ChetanGaonker689b3862016-10-17 16:25:01 -0700590 def test_cluster_formation_and_verification(self,onos_instances = ONOS_INSTANCES):
591 status = self.verify_cluster_status(onos_instances = onos_instances)
592 assert_equal(status, True)
593 log.info('Cluster exists with %d ONOS instances'%onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -0700594
595 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700596 def test_cluster_adding_members(self, add = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700597 status = self.verify_cluster_status(onos_instances = onos_instances)
598 assert_equal(status, True)
599 onos_ips = self.get_cluster_current_member_ips()
600 onos_instances = len(onos_ips)+add
601 log.info('Adding %d nodes to the ONOS cluster' %add)
602 cord_test_onos_add_cluster(count = add)
603 status = self.verify_cluster_status(onos_instances=onos_instances)
604 assert_equal(status, True)
605
ChetanGaonker689b3862016-10-17 16:25:01 -0700606 def test_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700607 status = self.verify_cluster_status(onos_instances = onos_instances)
608 assert_equal(status, True)
609 master, standbys = self.get_cluster_current_master_standbys()
610 assert_equal(len(standbys),(onos_instances-1))
611 onos_names_ips = self.get_cluster_container_names_ips()
612 master_onos_name = onos_names_ips[master]
613 log.info('Removing cluster current master %s'%(master))
A R Karthick3b2e0372016-12-14 17:37:43 -0800614 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700615 time.sleep(60)
616 onos_instances -= 1
617 status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0])
618 assert_equal(status, True)
619 new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
620 assert_not_equal(master,new_master)
ChetanGaonker689b3862016-10-17 16:25:01 -0700621 log.info('Successfully removed clusters master instance')
ChetanGaonker2099d722016-10-07 15:16:58 -0700622
ChetanGaonker689b3862016-10-17 16:25:01 -0700623 def test_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700624 status = self.verify_cluster_status(onos_instances = onos_instances)
625 assert_equal(status, True)
626 master, standbys = self.get_cluster_current_master_standbys()
627 assert_equal(len(standbys),(onos_instances-1))
628 onos_names_ips = self.get_cluster_container_names_ips()
629 member_onos_name = onos_names_ips[standbys[0]]
630 log.info('Removing cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800631 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700632 time.sleep(60)
633 onos_instances -= 1
634 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
635 assert_equal(status, True)
636
ChetanGaonker689b3862016-10-17 16:25:01 -0700637 def test_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700638 status = self.verify_cluster_status(onos_instances = onos_instances)
639 assert_equal(status, True)
640 master, standbys = self.get_cluster_current_master_standbys()
641 assert_equal(len(standbys),(onos_instances-1))
642 onos_names_ips = self.get_cluster_container_names_ips()
643 member1_onos_name = onos_names_ips[standbys[0]]
644 member2_onos_name = onos_names_ips[standbys[1]]
645 log.info('Removing cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800646 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700647 log.info('Removing cluster member %s'%standbys[1])
A R Karthick3b2e0372016-12-14 17:37:43 -0800648 cord_test_onos_shutdown(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700649 time.sleep(60)
650 onos_instances = onos_instances - 2
651 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
652 assert_equal(status, True)
653
ChetanGaonker689b3862016-10-17 16:25:01 -0700654 def test_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700655 status = self.verify_cluster_status(onos_instances = onos_instances)
656 assert_equal(status, True)
657 master, standbys = self.get_cluster_current_master_standbys()
658 assert_equal(len(standbys),(onos_instances-1))
659 onos_names_ips = self.get_cluster_container_names_ips()
660 for i in range(remove):
661 member_onos_name = onos_names_ips[standbys[i]]
662 log.info('Removing onos container with name %s'%standbys[i])
A R Karthick3b2e0372016-12-14 17:37:43 -0800663 cord_test_onos_shutdown(node = standbys[i])
ChetanGaonker2099d722016-10-07 15:16:58 -0700664 time.sleep(60)
665 onos_instances = onos_instances - remove
666 status = self.verify_cluster_status(onos_instances = onos_instances, controller=master)
667 assert_equal(status, True)
668
669 #nottest test cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700670 def test_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES , add = 2, remove = 2):
ChetanGaonker2099d722016-10-07 15:16:58 -0700671 status = self.verify_cluster_status(onos_instances = onos_instances)
672 assert_equal(status, True)
673 onos_ips = self.get_cluster_current_member_ips()
674 onos_instances = len(onos_ips)+add
675 log.info('Adding %d ONOS instances to the cluster'%add)
676 cord_test_onos_add_cluster(count = add)
677 status = self.verify_cluster_status(onos_instances=onos_instances)
678 assert_equal(status, True)
679 log.info('Removing %d ONOS instances from the cluster'%remove)
680 for i in range(remove):
681 name = '{}-{}'.format(Onos.NAME, onos_instances - i)
682 log.info('Removing onos container with name %s'%name)
683 cord_test_onos_shutdown(node = name)
684 time.sleep(60)
685 onos_instances = onos_instances-remove
686 status = self.verify_cluster_status(onos_instances=onos_instances)
687 assert_equal(status, True)
688
689 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700690 def test_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
ChetanGaonker2099d722016-10-07 15:16:58 -0700691 status = self.verify_cluster_status(onos_instances = onos_instances)
692 assert_equal(status, True)
693 onos_ips = self.get_cluster_current_member_ips()
694 onos_instances = onos_instances-remove
695 log.info('Removing %d ONOS instances from the cluster'%remove)
696 for i in range(remove):
697 name = '{}-{}'.format(Onos.NAME, len(onos_ips)-i)
698 log.info('Removing onos container with name %s'%name)
699 cord_test_onos_shutdown(node = name)
700 time.sleep(60)
701 status = self.verify_cluster_status(onos_instances=onos_instances)
702 assert_equal(status, True)
703 log.info('Adding %d ONOS instances to the cluster'%add)
704 cord_test_onos_add_cluster(count = add)
705 onos_instances = onos_instances+add
706 status = self.verify_cluster_status(onos_instances=onos_instances)
707 assert_equal(status, True)
708
ChetanGaonker689b3862016-10-17 16:25:01 -0700709 def test_cluster_restart(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700710 status = self.verify_cluster_status(onos_instances = onos_instances)
711 assert_equal(status, True)
712 log.info('Restarting cluster')
713 cord_test_onos_restart()
714 status = self.verify_cluster_status(onos_instances = onos_instances)
715 assert_equal(status, True)
716
ChetanGaonker689b3862016-10-17 16:25:01 -0700717 def test_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700718 status = self.verify_cluster_status(onos_instances = onos_instances)
719 assert_equal(status, True)
720 master, standbys = self.get_cluster_current_master_standbys()
721 onos_names_ips = self.get_cluster_container_names_ips()
722 master_onos_name = onos_names_ips[master]
723 log.info('Restarting cluster master %s'%master)
A R Karthick3b2e0372016-12-14 17:37:43 -0800724 cord_test_onos_restart(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700725 status = self.verify_cluster_status(onos_instances = onos_instances)
726 assert_equal(status, True)
727 log.info('Cluster came up after master restart as expected')
728
729 #test fail. master changing after restart. Need to check correct behavior.
ChetanGaonker689b3862016-10-17 16:25:01 -0700730 def test_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700731 status = self.verify_cluster_status(onos_instances = onos_instances)
732 assert_equal(status, True)
733 master1, standbys = self.get_cluster_current_master_standbys()
734 onos_names_ips = self.get_cluster_container_names_ips()
735 master_onos_name = onos_names_ips[master1]
A R Karthick3b2e0372016-12-14 17:37:43 -0800736 log.info('Restarting cluster master %s'%master1)
737 cord_test_onos_restart(node = master1)
ChetanGaonker2099d722016-10-07 15:16:58 -0700738 status = self.verify_cluster_status(onos_instances = onos_instances)
739 assert_equal(status, True)
740 master2, standbys = self.get_cluster_current_master_standbys()
741 assert_equal(master1,master2)
742 log.info('Cluster master is same before and after cluster master restart as expected')
743
ChetanGaonker689b3862016-10-17 16:25:01 -0700744 def test_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700745 status = self.verify_cluster_status(onos_instances = onos_instances)
746 assert_equal(status, True)
747 master, standbys = self.get_cluster_current_master_standbys()
748 assert_equal(len(standbys),(onos_instances-1))
749 onos_names_ips = self.get_cluster_container_names_ips()
750 member_onos_name = onos_names_ips[standbys[0]]
751 log.info('Restarting cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800752 cord_test_onos_restart(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700753 status = self.verify_cluster_status(onos_instances = onos_instances)
754 assert_equal(status, True)
755 log.info('Cluster came up as expected after restarting one member')
756
ChetanGaonker689b3862016-10-17 16:25:01 -0700757 def test_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700758 status = self.verify_cluster_status(onos_instances = onos_instances)
759 assert_equal(status, True)
760 master, standbys = self.get_cluster_current_master_standbys()
761 assert_equal(len(standbys),(onos_instances-1))
762 onos_names_ips = self.get_cluster_container_names_ips()
763 member1_onos_name = onos_names_ips[standbys[0]]
764 member2_onos_name = onos_names_ips[standbys[1]]
765 log.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1]))
A R Karthick3b2e0372016-12-14 17:37:43 -0800766 cord_test_onos_restart(node = standbys[0])
767 cord_test_onos_restart(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700768 status = self.verify_cluster_status(onos_instances = onos_instances)
769 assert_equal(status, True)
770 log.info('Cluster came up as expected after restarting two members')
771
ChetanGaonker689b3862016-10-17 16:25:01 -0700772 def test_cluster_state_with_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700773 status = self.verify_cluster_status(onos_instances = onos_instances)
774 assert_equal(status,True)
775 master, standbys = self.get_cluster_current_master_standbys()
776 assert_equal(len(standbys),(onos_instances-1))
777 onos_names_ips = self.get_cluster_container_names_ips()
778 for i in range(members):
779 member_onos_name = onos_names_ips[standbys[i]]
780 log.info('Restarting cluster member %s'%standbys[i])
A R Karthick3b2e0372016-12-14 17:37:43 -0800781 cord_test_onos_restart(node = standbys[i])
ChetanGaonker2099d722016-10-07 15:16:58 -0700782
783 status = self.verify_cluster_status(onos_instances = onos_instances)
784 assert_equal(status, True)
785 log.info('Cluster came up as expected after restarting %d members'%members)
786
ChetanGaonker689b3862016-10-17 16:25:01 -0700787 def test_cluster_state_with_master_change(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700788 status = self.verify_cluster_status(onos_instances=onos_instances)
789 assert_equal(status, True)
790 master, standbys = self.get_cluster_current_master_standbys()
791 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -0700792 log.info('Cluster current master of devices is %s'%master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700793 self.change_master_current_cluster(new_master=standbys[0])
794 log.info('Cluster master changed successfully')
795
796 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700797 def test_cluster_with_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700798 status = self.verify_cluster_status(onos_instances = onos_instances)
799 assert_equal(status, True)
800 onos_ips = self.get_cluster_current_member_ips()
801 self.vrouter.setUpClass()
802 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
803 assert_equal(res, True)
804 for onos_ip in onos_ips:
805 tries = 0
806 flag = False
807 try:
808 self.cliEnter(controller = onos_ip)
809 while tries <= 5:
810 routes = json.loads(self.cli.routes(jsonFormat = True))
811 if routes:
812 assert_equal(len(routes['routes4']), networks)
813 self.cliExit()
814 flag = True
815 break
816 else:
817 tries += 1
818 time.sleep(1)
819 assert_equal(flag, True)
820 except:
821 log.info('Exception occured while checking routes in onos instance %s'%onos_ip)
822 raise
823
824 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700825 def test_cluster_with_vrouter_and_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700826 status = self.verify_cluster_status(onos_instances = onos_instances)
827 assert_equal(status, True)
828 onos_ips = self.get_cluster_current_member_ips()
829 master, standbys = self.get_cluster_current_master_standbys()
830 onos_names_ips = self.get_cluster_container_names_ips()
831 master_onos_name = onos_names_ips[master]
832 self.vrouter.setUpClass()
833 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
834 assert_equal(res,True)
A R Karthick3b2e0372016-12-14 17:37:43 -0800835 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700836 time.sleep(60)
ChetanGaonker689b3862016-10-17 16:25:01 -0700837 log.info('Verifying vrouter traffic after cluster master is down')
ChetanGaonker2099d722016-10-07 15:16:58 -0700838 self.vrouter.vrouter_traffic_verify()
839
840 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700841 def test_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700842 status = self.verify_cluster_status(onos_instances = onos_instances)
843 assert_equal(status, True)
844 onos_ips = self.get_cluster_current_member_ips()
845 master, standbys = self.get_cluster_current_master_standbys()
846 onos_names_ips = self.get_cluster_container_names_ips()
847 master_onos_name = onos_names_ips[master]
848 self.vrouter.setUpClass()
849 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
850 assert_equal(res, True)
851 cord_test_onos_restart()
852 self.vrouter.vrouter_traffic_verify()
853
854 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700855 def test_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700856 status = self.verify_cluster_status(onos_instances = onos_instances)
857 assert_equal(status, True)
858 self.vrouter.setUpClass()
859 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
860 assert_equal(res, True)
861 self.vrouter.vrouter_activate(deactivate=True)
862 time.sleep(15)
863 self.vrouter.vrouter_traffic_verify(positive_test=False)
864 self.vrouter.vrouter_activate(deactivate=False)
865
866 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700867 def test_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700868 status = self.verify_cluster_status(onos_instances = onos_instances)
869 assert_equal(status, True)
870 master, standbys = self.get_cluster_current_master_standbys()
871 onos_names_ips = self.get_cluster_container_names_ips()
872 master_onos_name = onos_names_ips[master]
873 self.vrouter.setUpClass()
874 log.info('Verifying vrouter before master down')
875 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
876 assert_equal(res, True)
877 self.vrouter.vrouter_activate(deactivate=True)
878 log.info('Verifying vrouter traffic after app deactivated')
879 time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down
880 self.vrouter.vrouter_traffic_verify(positive_test=False)
881 log.info('Verifying vrouter traffic after master down')
A R Karthick3b2e0372016-12-14 17:37:43 -0800882 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700883 time.sleep(60)
884 self.vrouter.vrouter_traffic_verify(positive_test=False)
885 self.vrouter.vrouter_activate(deactivate=False)
886
887 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700888 def test_cluster_for_vrouter_app_and_making_member_down(self, networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700889 status = self.verify_cluster_status(onos_instances = onos_instances)
890 assert_equal(status, True)
891 master, standbys = self.get_cluster_current_master_standbys()
892 onos_names_ips = self.get_cluster_container_names_ips()
893 member_onos_name = onos_names_ips[standbys[0]]
894 self.vrouter.setUpClass()
895 log.info('Verifying vrouter before cluster member down')
896 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
897 assert_equal(res, True) # Expecting vrouter should work properly
898 log.info('Verifying vrouter after cluster member down')
A R Karthick3b2e0372016-12-14 17:37:43 -0800899 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700900 time.sleep(60)
901 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
902
903 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700904 def test_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700905 status = self.verify_cluster_status(onos_instances = onos_instances)
906 assert_equal(status, True)
907 master, standbys = self.get_cluster_current_master_standbys()
908 onos_names_ips = self.get_cluster_container_names_ips()
909 member_onos_name = onos_names_ips[standbys[1]]
910 self.vrouter.setUpClass()
911 log.info('Verifying vrouter traffic before cluster member restart')
912 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
913 assert_equal(res, True) # Expecting vrouter should work properly
A R Karthick3b2e0372016-12-14 17:37:43 -0800914 cord_test_onos_restart(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700915 log.info('Verifying vrouter traffic after cluster member restart')
916 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
917
918 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700919 def test_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700920 status = self.verify_cluster_status(onos_instances = onos_instances)
921 assert_equal(status, True)
922 self.vrouter.setUpClass()
923 log.info('Verifying vrouter traffic before cluster restart')
924 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
925 assert_equal(res, True) # Expecting vrouter should work properly
926 cord_test_onos_restart()
927 log.info('Verifying vrouter traffic after cluster restart')
928 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
929
930
931 #test fails because flow state is in pending_add in onos
ChetanGaonker689b3862016-10-17 16:25:01 -0700932 def test_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700933 status = self.verify_cluster_status(onos_instances = onos_instances)
934 assert_equal(status, True)
935 master, standbys = self.get_cluster_current_master_standbys()
936 onos_names_ips = self.get_cluster_container_names_ips()
937 master_onos_name = onos_names_ips[master]
938 self.flows.setUpClass()
939 egress = 1
940 ingress = 2
941 egress_map = { 'ip': '192.168.30.1', 'udp_port': 9500 }
942 ingress_map = { 'ip': '192.168.40.1', 'udp_port': 9000 }
943 flow = OnosFlowCtrl(deviceId = self.device_id,
944 egressPort = egress,
945 ingressPort = ingress,
946 udpSrc = ingress_map['udp_port'],
947 udpDst = egress_map['udp_port'],
948 controller=master
949 )
950 result = flow.addFlow()
951 assert_equal(result, True)
952 time.sleep(1)
953 self.success = False
954 def mac_recv_task():
955 def recv_cb(pkt):
956 log.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
957 self.success = True
958 sniff(timeout=2,
959 lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
960 and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.flows.port_map[egress])
961
962 for i in [0,1]:
963 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -0800964 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700965 log.info('Verifying flows traffic after master killed')
966 time.sleep(45)
967 else:
968 log.info('Verifying flows traffic before master killed')
969 t = threading.Thread(target = mac_recv_task)
970 t.start()
971 L2 = self.flows_eth #Ether(src = ingress_map['ether'], dst = egress_map['ether'])
972 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
973 L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
974 pkt = L2/L3/L4
975 log.info('Sending packets to verify if flows are correct')
976 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
977 t.join()
978 assert_equal(self.success, True)
979
ChetanGaonker689b3862016-10-17 16:25:01 -0700980 def test_cluster_state_changing_master_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700981 status = self.verify_cluster_status(onos_instances=onos_instances)
982 assert_equal(status, True)
983 master, standbys = self.get_cluster_current_master_standbys()
984 self.flows.setUpClass()
985 egress = 1
986 ingress = 2
987 egress_map = { 'ip': '192.168.30.1' }
988 ingress_map = { 'ip': '192.168.40.1' }
989 flow = OnosFlowCtrl(deviceId = self.device_id,
990 egressPort = egress,
991 ingressPort = ingress,
992 ecn = 1,
993 controller=master
994 )
995 result = flow.addFlow()
996 assert_equal(result, True)
997 ##wait for flows to be added to ONOS
998 time.sleep(1)
999 self.success = False
1000 def mac_recv_task():
1001 def recv_cb(pkt):
1002 log.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
1003 self.success = True
1004 sniff(count=2, timeout=5,
1005 lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
1006 and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb,
1007 iface = self.flows.port_map[egress])
1008 for i in [0,1]:
1009 if i == 1:
1010 log.info('Changing cluster master to %s'%standbys[0])
1011 self.change_master_current_cluster(new_master=standbys[0])
1012 log.info('Verifying flow traffic after cluster master chnaged')
1013 else:
1014 log.info('Verifying flow traffic before cluster master changed')
1015 t = threading.Thread(target = mac_recv_task)
1016 t.start()
1017 L2 = self.flows_eth # Ether(src = ingress_map['ether'], dst = egress_map['ether'])
1018 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1)
1019 pkt = L2/L3
1020 log.info('Sending a packet to verify if flows are correct')
1021 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
1022 t.join()
1023 assert_equal(self.success, True)
1024
ChetanGaonker689b3862016-10-17 16:25:01 -07001025 #pass
1026 def test_cluster_flow_for_ipv6_extension_header_and_master_restart(self,onos_instances = ONOS_INSTANCES):
1027 status = self.verify_cluster_status(onos_instances=onos_instances)
1028 assert_equal(status, True)
1029 master,standbys = self.get_cluster_current_master_standbys()
1030 onos_names_ips = self.get_cluster_container_names_ips()
1031 master_onos_name = onos_names_ips[master]
1032 self.flows.setUpClass()
1033 egress = 1
1034 ingress = 2
1035 egress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
1036 ingress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
1037 flow = OnosFlowCtrl(deviceId = self.device_id,
1038 egressPort = egress,
1039 ingressPort = ingress,
1040 ipv6_extension = 0,
1041 controller=master
1042 )
1043
1044 result = flow.addFlow()
1045 assert_equal(result, True)
1046 ##wait for flows to be added to ONOS
1047 time.sleep(1)
1048 self.success = False
1049 def mac_recv_task():
1050 def recv_cb(pkt):
1051 log.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s'%(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
1052 self.success = True
1053 sniff(timeout=2,count=5,
1054 lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.flows.port_map[egress])
1055 for i in [0,1]:
1056 if i == 1:
1057 log.info('Restart cluster current master %s'%master)
1058 Container(master_onos_name,Onos.IMAGE).restart()
1059 time.sleep(45)
1060 log.info('Verifying flow traffic after master restart')
1061 else:
1062 log.info('Verifying flow traffic before master restart')
1063 t = threading.Thread(target = mac_recv_task)
1064 t.start()
1065 L2 = self.flows_eth
1066 L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
1067 pkt = L2/L3
1068 log.info('Sending packets to verify if flows are correct')
1069 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
1070 t.join()
1071 assert_equal(self.success, True)
1072
1073 def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
1074 dst_mac = self.igmp.iptomac(group)
1075 eth = Ether(dst= dst_mac)
1076 ip = IP(dst=group,src=source)
1077 data = repr(monotonic.monotonic())
1078 sendp(eth/ip/data,count=20, iface = intf)
1079 pkt = (eth/ip/data)
1080 log.info('multicast traffic packet %s'%pkt.show())
1081
1082 def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
1083 log.info('verifying multicast traffic for group %s from source %s'%(group,source))
1084 self.success = False
1085 def recv_task():
1086 def igmp_recv_cb(pkt):
1087 log.info('multicast data received for group %s from source %s'%(group,source))
1088 self.success = True
1089 sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
1090 t = threading.Thread(target = recv_task)
1091 t.start()
1092 self.send_multicast_data_traffic(group,source=source)
1093 t.join()
1094 return self.success
1095
1096 #pass
1097 def test_cluster_with_igmp_include_exclude_modes_and_restarting_master(self, onos_instances=ONOS_INSTANCES):
1098 status = self.verify_cluster_status(onos_instances=onos_instances)
1099 assert_equal(status, True)
1100 master, standbys = self.get_cluster_current_master_standbys()
1101 assert_equal(len(standbys), (onos_instances-1))
1102 onos_names_ips = self.get_cluster_container_names_ips()
1103 master_onos_name = onos_names_ips[master]
1104 self.igmp.setUp(controller=master)
1105 groups = ['224.2.3.4','230.5.6.7']
1106 src_list = ['2.2.2.2','3.3.3.3']
1107 self.igmp.onos_ssm_table_load(groups, src_list=src_list, controller=master)
1108 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1109 iface = self.V_INF1, delay = 2)
1110 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1111 iface = self.V_INF1, delay = 2)
1112 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1113 assert_equal(status,True)
1114 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1115 assert_equal(status,False)
1116 log.info('restarting cluster master %s'%master)
1117 Container(master_onos_name,Onos.IMAGE).restart()
1118 time.sleep(60)
1119 log.info('verifying multicast data traffic after master restart')
1120 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1121 assert_equal(status,True)
1122 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1123 assert_equal(status,False)
1124
1125 #pass
1126 def test_cluster_with_igmp_include_exclude_modes_and_making_master_down(self, onos_instances=ONOS_INSTANCES):
1127 status = self.verify_cluster_status(onos_instances=onos_instances)
1128 assert_equal(status, True)
1129 master, standbys = self.get_cluster_current_master_standbys()
1130 assert_equal(len(standbys), (onos_instances-1))
1131 onos_names_ips = self.get_cluster_container_names_ips()
1132 master_onos_name = onos_names_ips[master]
1133 self.igmp.setUp(controller=master)
1134 groups = [self.igmp.random_mcast_ip(),self.igmp.random_mcast_ip()]
1135 src_list = [self.igmp.randomsourceip()]
1136 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1137 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1138 iface = self.V_INF1, delay = 2)
1139 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1140 iface = self.V_INF1, delay = 2)
1141 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1142 assert_equal(status,True)
1143 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1144 assert_equal(status,False)
1145 log.info('Killing cluster master %s'%master)
1146 Container(master_onos_name,Onos.IMAGE).kill()
1147 time.sleep(60)
1148 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=standbys[0])
1149 assert_equal(status, True)
1150 log.info('Verifying multicast data traffic after cluster master down')
1151 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1152 assert_equal(status,True)
1153 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1154 assert_equal(status,False)
1155
1156 def test_cluster_with_igmp_include_mode_checking_traffic_recovery_time_after_master_is_down(self, onos_instances=ONOS_INSTANCES):
1157 status = self.verify_cluster_status(onos_instances=onos_instances)
1158 assert_equal(status, True)
1159 master, standbys = self.get_cluster_current_master_standbys()
1160 assert_equal(len(standbys), (onos_instances-1))
1161 onos_names_ips = self.get_cluster_container_names_ips()
1162 master_onos_name = onos_names_ips[master]
1163 self.igmp.setUp(controller=master)
1164 groups = [self.igmp.random_mcast_ip()]
1165 src_list = [self.igmp.randomsourceip()]
1166 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1167 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1168 iface = self.V_INF1, delay = 2)
1169 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1170 assert_equal(status,True)
1171 log.info('Killing clusters master %s'%master)
1172 Container(master_onos_name,Onos.IMAGE).kill()
1173 count = 0
1174 for i in range(60):
1175 log.info('Verifying multicast data traffic after cluster master down')
1176 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1177 if status:
1178 break
1179 else:
1180 count += 1
1181 time.sleep(1)
1182 assert_equal(status, True)
1183 log.info('Time taken to recover traffic after clusters master down is %d seconds'%count)
1184
1185
1186 #pass
1187 def test_cluster_state_with_igmp_leave_group_after_master_change(self, onos_instances=ONOS_INSTANCES):
1188 status = self.verify_cluster_status(onos_instances=onos_instances)
1189 assert_equal(status, True)
1190 master, standbys = self.get_cluster_current_master_standbys()
1191 assert_equal(len(standbys), (onos_instances-1))
1192 self.igmp.setUp(controller=master)
1193 groups = [self.igmp.random_mcast_ip()]
1194 src_list = [self.igmp.randomsourceip()]
1195 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1196 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1197 iface = self.V_INF1, delay = 2)
1198 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1199 assert_equal(status,True)
1200 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1201 self.change_cluster_current_master(new_master=standbys[0])
1202 log.info('Verifying multicast traffic after cluster master change')
1203 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1204 assert_equal(status,True)
1205 log.info('Sending igmp TO_EXCLUDE message to leave the group %s'%groups[0])
1206 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
1207 iface = self.V_INF1, delay = 1)
1208 time.sleep(10)
1209 status = self.verify_igmp_data_traffic(groups[0],intf = self.V_INF1,source= src_list[0])
1210 assert_equal(status,False)
1211
1212 #pass
1213 def test_cluster_state_with_igmp_join_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
1214 status = self.verify_cluster_status(onos_instances=onos_instances)
1215 assert_equal(status, True)
1216 master,standbys = self.get_cluster_current_master_standbys()
1217 assert_equal(len(standbys), (onos_instances-1))
1218 self.igmp.setUp(controller=master)
1219 groups = [self.igmp.random_mcast_ip()]
1220 src_list = [self.igmp.randomsourceip()]
1221 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1222 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1223 self.change_cluster_current_master(new_master = standbys[0])
1224 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1225 iface = self.V_INF1, delay = 2)
1226 time.sleep(1)
1227 self.change_cluster_current_master(new_master = master)
1228 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1229 assert_equal(status,True)
1230
1231 #pass
ChetanGaonker2099d722016-10-07 15:16:58 -07001232 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001233 def test_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001234 status = self.verify_cluster_status(onos_instances=onos_instances)
1235 assert_equal(status, True)
1236 master, standbys = self.get_cluster_current_master_standbys()
1237 assert_equal(len(standbys), (onos_instances-1))
1238 self.tls.setUp(controller=master)
1239 df = defer.Deferred()
1240 def eap_tls_verify(df):
1241 tls = TLSAuthTest()
1242 tls.runTest()
1243 df.callback(0)
1244 reactor.callLater(0, eap_tls_verify, df)
1245 return df
1246
1247 @deferred(120)
ChetanGaonker689b3862016-10-17 16:25:01 -07001248 def test_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001249 master, standbys = self.get_cluster_current_master_standbys()
1250 assert_equal(len(standbys), (onos_instances-1))
1251 self.tls.setUp()
1252 df = defer.Deferred()
1253 def eap_tls_verify2(df2):
1254 tls = TLSAuthTest()
1255 tls.runTest()
1256 df.callback(0)
1257 for i in [0,1]:
1258 if i == 1:
1259 log.info('Changing cluster master %s to %s'%(master, standbys[0]))
1260 self.change_master_current_cluster(new_master=standbys[0])
1261 log.info('Verifying tls authentication after cluster master changed to %s'%standbys[0])
1262 else:
1263 log.info('Verifying tls authentication before cluster master change')
1264 reactor.callLater(0, eap_tls_verify, df)
1265 return df
1266
1267 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001268 def test_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001269 status = self.verify_cluster_status(onos_instances=onos_instances)
1270 assert_equal(status, True)
1271 master, standbys = self.get_cluster_current_master_standbys()
1272 assert_equal(len(standbys), (onos_instances-1))
1273 onos_names_ips = self.get_cluster_container_names_ips()
1274 master_onos_name = onos_names_ips[master]
1275 self.tls.setUp()
1276 df = defer.Deferred()
1277 def eap_tls_verify(df):
1278 tls = TLSAuthTest()
1279 tls.runTest()
1280 df.callback(0)
1281 for i in [0,1]:
1282 if i == 1:
1283 log.info('Killing cluster current master %s'%master)
A R Karthick3b2e0372016-12-14 17:37:43 -08001284 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001285 time.sleep(20)
1286 status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True)
1287 assert_equal(status, True)
1288 log.info('Cluster came up with %d instances after killing master'%(onos_instances-1))
1289 log.info('Verifying tls authentication after killing cluster master')
1290 reactor.callLater(0, eap_tls_verify, df)
1291 return df
1292
1293 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001294 def test_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001295 status = self.verify_cluster_status(onos_instances=onos_instances)
1296 assert_equal(status, True)
1297 master, standbys = self.get_cluster_current_master_standbys()
1298 assert_equal(len(standbys), (onos_instances-1))
1299 onos_names_ips = self.get_cluster_container_names_ips()
1300 member_onos_name = onos_names_ips[standbys[0]]
1301 self.tls.setUp()
1302 df = defer.Deferred()
1303 def eap_tls_no_cert(df):
1304 def tls_no_cert_cb():
1305 log.info('TLS authentication failed with no certificate')
1306 tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
1307 tls.runTest()
1308 assert_equal(tls.failTest, True)
1309 df.callback(0)
1310 for i in [0,1]:
1311 if i == 1:
1312 log.info('Restart cluster member %s'%standbys[0])
1313 Container(member_onos_name,Onos.IMAGE).restart()
1314 time.sleep(20)
1315 status = self.verify_cluster_status(onos_instances=onos_instances)
1316 assert_equal(status, True)
1317 log.info('Cluster came up with %d instances after member restart'%(onos_instances))
1318 log.info('Verifying tls authentication after member restart')
1319 reactor.callLater(0, eap_tls_no_cert, df)
1320 return df
1321
ChetanGaonker689b3862016-10-17 16:25:01 -07001322 #pass
1323 def test_cluster_proxyarp_master_change_and_app_deactivation(self,onos_instances=ONOS_INSTANCES,hosts = 3):
1324 status = self.verify_cluster_status(onos_instances=onos_instances)
1325 assert_equal(status,True)
1326 master,standbys = self.get_cluster_current_master_standbys()
1327 assert_equal(len(standbys),(onos_instances-1))
1328 self.proxyarp.setUpClass()
1329 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1330 ingress = hosts+1
1331 for hostip, hostmac in hosts_config:
1332 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1333 time.sleep(1)
1334 log.info('changing cluster current master from %s to %s'%(master,standbys[0]))
1335 self.change_cluster_current_master(new_master=standbys[0])
1336 log.info('verifying proxyarp after master change')
1337 for hostip, hostmac in hosts_config:
1338 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1339 time.sleep(1)
1340 log.info('Deactivating proxyarp app and expecting proxyarp functionality not to work')
1341 self.proxyarp.proxyarp_activate(deactivate = True,controller=standbys[0])
1342 time.sleep(3)
1343 for hostip, hostmac in hosts_config:
1344 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
1345 time.sleep(1)
1346 log.info('activating proxyarp app and expecting to get arp reply from ONOS')
1347 self.proxyarp.proxyarp_activate(deactivate = False,controller=standbys[0])
1348 time.sleep(3)
1349 for hostip, hostmac in hosts_config:
1350 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1351 time.sleep(1)
ChetanGaonker2099d722016-10-07 15:16:58 -07001352
ChetanGaonker689b3862016-10-17 16:25:01 -07001353 #pass
1354 def test_cluster_with_proxyarp_and_one_member_down(self,hosts=3,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001355 status = self.verify_cluster_status(onos_instances=onos_instances)
1356 assert_equal(status, True)
1357 master, standbys = self.get_cluster_current_master_standbys()
ChetanGaonker689b3862016-10-17 16:25:01 -07001358 assert_equal(len(standbys), (onos_instances-1))
1359 onos_names_ips = self.get_cluster_container_names_ips()
1360 member_onos_name = onos_names_ips[standbys[1]]
1361 self.proxyarp.setUpClass()
1362 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1363 ingress = hosts+1
1364 for hostip, hostmac in hosts_config:
1365 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1366 time.sleep(1)
1367 log.info('killing cluster member %s'%standbys[1])
1368 Container(member_onos_name,Onos.IMAGE).kill()
1369 time.sleep(20)
1370 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=master,verify=True)
1371 assert_equal(status, True)
1372 log.info('cluster came up with %d instances after member down'%(onos_instances-1))
1373 log.info('verifying proxy arp functionality after cluster member down')
1374 for hostip, hostmac in hosts_config:
1375 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1376 time.sleep(1)
1377
1378 #pass
1379 def test_cluster_with_proxyarp_and_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts=10,onos_instances=ONOS_INSTANCES):
1380 status = self.verify_cluster_status(onos_instances=onos_instances)
1381 assert_equal(status, True)
1382 self.proxyarp.setUpClass()
1383 master, standbys = self.get_cluster_current_master_standbys()
1384 assert_equal(len(standbys), (onos_instances-1))
1385 ports_map, egress_map, hosts_config = self.proxyarp.proxyarp_config(hosts = hosts, controller=master)
1386 self.success = True
1387 ingress = hosts+1
1388 ports = range(ingress,ingress+10)
1389 hostmac = []
1390 hostip = []
1391 for ip,mac in hosts_config:
1392 hostmac.append(mac)
1393 hostip.append(ip)
1394 success_dir = {}
1395 def verify_proxyarp(*r):
1396 ingress, hostmac, hostip = r[0],r[1],r[2]
1397 def mac_recv_task():
1398 def recv_cb(pkt):
1399 log.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
1400 success_dir[current_thread().name] = True
1401 sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
1402 prn = recv_cb, iface = self.proxyarp.port_map[ingress])
1403 t = threading.Thread(target = mac_recv_task)
1404 t.start()
1405 pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
1406 log.info('Sending arp request for dest ip %s on interface %s' %
1407 (hostip,self.proxyarp.port_map[ingress]))
1408 sendp(pkt, count = 10,iface = self.proxyarp.port_map[ingress])
1409 t.join()
1410 t = []
1411 for i in range(10):
1412 t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
1413 for i in range(10):
1414 t[i].start()
1415 time.sleep(2)
1416 for i in range(10):
1417 t[i].join()
1418 if len(success_dir) != 10:
1419 self.success = False
1420 assert_equal(self.success, True)
1421
1422 #pass
1423 def test_cluster_with_acl_rule_before_master_change_and_remove_acl_rule_after_master_change(self,onos_instances=ONOS_INSTANCES):
1424 status = self.verify_cluster_status(onos_instances=onos_instances)
1425 assert_equal(status, True)
1426 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001427 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001428 self.acl.setUp()
1429 acl_rule = ACLTest()
1430 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1431 if status is False:
1432 log.info('JSON request returned status %d' %code)
1433 assert_equal(status, True)
1434 result = acl_rule.get_acl_rules(controller=master)
1435 aclRules1 = result.json()['aclRules']
1436 log.info('Added acl rules is %s'%aclRules1)
1437 acl_Id = map(lambda d: d['id'], aclRules1)
1438 log.info('Changing cluster current master from %s to %s'%(master,standbys[0]))
1439 self.change_cluster_current_master(new_master=standbys[0])
1440 status,code = acl_rule.remove_acl_rule(acl_Id[0],controller=standbys[0])
1441 if status is False:
1442 log.info('JSON request returned status %d' %code)
1443 assert_equal(status, True)
1444
1445 #pass
1446 def test_cluster_verifying_acl_rule_in_new_master_after_current_master_is_down(self,onos_instances=ONOS_INSTANCES):
1447 status = self.verify_cluster_status(onos_instances=onos_instances)
1448 assert_equal(status, True)
1449 master,standbys = self.get_cluster_current_master_standbys()
1450 assert_equal(len(standbys),(onos_instances-1))
1451 onos_names_ips = self.get_cluster_container_names_ips()
1452 master_onos_name = onos_names_ips[master]
1453 self.acl.setUp()
1454 acl_rule = ACLTest()
1455 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1456 if status is False:
1457 log.info('JSON request returned status %d' %code)
1458 assert_equal(status, True)
1459 result1 = acl_rule.get_acl_rules(controller=master)
1460 aclRules1 = result1.json()['aclRules']
1461 log.info('Added acl rules is %s'%aclRules1)
1462 acl_Id1 = map(lambda d: d['id'], aclRules1)
1463 log.info('Killing cluster current master %s'%master)
1464 Container(master_onos_name,Onos.IMAGE).kill()
1465 time.sleep(45)
1466 status = self.verify_cluster_status(onos_instances=onos_instances,controller=standbys[0])
1467 assert_equal(status, True)
1468 new_master,standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
1469 assert_equal(len(standbys),(onos_instances-2))
1470 assert_not_equal(new_master,master)
1471 result2 = acl_rule.get_acl_rules(controller=new_master)
1472 aclRules2 = result2.json()['aclRules']
1473 acl_Id2 = map(lambda d: d['id'], aclRules2)
1474 log.info('Acl Ids before and after master down are %s and %s'%(acl_Id1,acl_Id2))
1475 assert_equal(acl_Id2,acl_Id1)
1476
1477 #acl traffic scenario not working as acl rule is not getting added to onos
1478 def test_cluster_with_acl_traffic_before_and_after_two_members_down(self,onos_instances=ONOS_INSTANCES):
1479 status = self.verify_cluster_status(onos_instances=onos_instances)
1480 assert_equal(status, True)
1481 master,standbys = self.get_cluster_current_master_standbys()
1482 assert_equal(len(standbys),(onos_instances-1))
1483 onos_names_ips = self.get_cluster_container_names_ips()
1484 member1_onos_name = onos_names_ips[standbys[0]]
1485 member2_onos_name = onos_names_ips[standbys[1]]
1486 ingress = self.acl.ingress_iface
1487 egress = self.acl.CURRENT_PORT_NUM
1488 acl_rule = ACLTest()
1489 status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.acl.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.acl.HOST_DST_IP)
1490 self.acl.CURRENT_PORT_NUM += 1
1491 time.sleep(5)
1492 if status is False:
1493 log.info('JSON request returned status %d' %code)
1494 assert_equal(status, True)
1495 srcMac = '00:00:00:00:00:11'
1496 dstMac = host_ip_mac[0][1]
1497 self.acl.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1, egress_iface_num = egress )
1498 status, code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'deny',controller=master)
1499 time.sleep(10)
1500 if status is False:
1501 log.info('JSON request returned status %d' %code)
1502 assert_equal(status, True)
1503 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1504 log.info('killing cluster members %s and %s'%(standbys[0],standbys[1]))
1505 Container(member1_onos_name, Onos.IMAGE).kill()
1506 Container(member2_onos_name, Onos.IMAGE).kill()
1507 time.sleep(40)
1508 status = self.verify_cluster_status(onos_instances=onos_instances-2,verify=True,controller=master)
1509 assert_equal(status, True)
1510 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1511 self.acl.acl_hosts_remove(egress_iface_count = 1, egress_iface_num = egress)
1512
1513 #pass
1514 def test_cluster_with_dhcpRelay_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1515 status = self.verify_cluster_status(onos_instances=onos_instances)
1516 assert_equal(status, True)
1517 master,standbys = self.get_cluster_current_master_standbys()
1518 assert_equal(len(standbys),(onos_instances-1))
1519 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001520 mac = self.dhcprelay.get_mac(iface)
1521 self.dhcprelay.host_load(iface)
1522 ##we use the defaults for this test that serves as an example for others
1523 ##You don't need to restart dhcpd server if retaining default config
1524 config = self.dhcprelay.default_config
1525 options = self.dhcprelay.default_options
1526 subnet = self.dhcprelay.default_subnet_config
1527 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1528 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1529 config = config,
1530 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001531 subnet = subnet,
1532 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001533 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
1534 cip, sip = self.dhcprelay.send_recv(mac)
1535 log.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
1536 self.change_master_current_cluster(new_master=standbys[0])
1537 log.info('Releasing ip %s to server %s' %(cip, sip))
1538 assert_equal(self.dhcprelay.dhcp.release(cip), True)
1539 log.info('Triggering DHCP discover again after release')
1540 cip2, sip2 = self.dhcprelay.send_recv(mac)
1541 log.info('Verifying released IP was given back on rediscover')
1542 assert_equal(cip, cip2)
1543 log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
1544 assert_equal(self.dhcprelay.dhcp.release(cip2), True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001545 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001546
ChetanGaonker689b3862016-10-17 16:25:01 -07001547
1548 def test_cluster_with_dhcpRelay_and_verify_dhcp_ip_after_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1549 status = self.verify_cluster_status(onos_instances=onos_instances)
1550 assert_equal(status, True)
1551 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001552 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001553 onos_names_ips = self.get_cluster_container_names_ips()
1554 master_onos_name = onos_names_ips[master]
1555 self.dhcprelay.setUpClass(controller=master)
1556 mac = self.dhcprelay.get_mac(iface)
1557 self.dhcprelay.host_load(iface)
1558 ##we use the defaults for this test that serves as an example for others
1559 ##You don't need to restart dhcpd server if retaining default config
1560 config = self.dhcprelay.default_config
1561 options = self.dhcprelay.default_options
1562 subnet = self.dhcprelay.default_subnet_config
1563 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1564 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1565 config = config,
1566 options = options,
1567 subnet = subnet,
1568 controller=master)
1569 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1570 log.info('Initiating dhcp process from client %s'%mac)
1571 cip, sip = self.dhcprelay.send_recv(mac)
1572 log.info('Killing cluster current master %s'%master)
1573 Container(master_onos_name, Onos.IMAGE).kill()
1574 time.sleep(60)
1575 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=standbys[0])
1576 assert_equal(status, True)
1577 mac = self.dhcprelay.dhcp.get_mac(cip)[0]
1578 log.info("Verifying dhcp clients gets same IP after cluster master restarts")
1579 new_cip, new_sip = self.dhcprelay.dhcp.only_request(cip, mac)
1580 assert_equal(new_cip, cip)
1581 self.dhcprelay.tearDownClass(controller=standbys[0])
1582
1583 #pass
1584 def test_cluster_with_dhcpRelay_and_simulate_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1585 status = self.verify_cluster_status(onos_instances=onos_instances)
1586 assert_equal(status, True)
1587 master,standbys = self.get_cluster_current_master_standbys()
1588 assert_equal(len(standbys),(onos_instances-1))
1589 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001590 macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3']
1591 self.dhcprelay.host_load(iface)
1592 ##we use the defaults for this test that serves as an example for others
1593 ##You don't need to restart dhcpd server if retaining default config
1594 config = self.dhcprelay.default_config
1595 options = self.dhcprelay.default_options
1596 subnet = self.dhcprelay.default_subnet_config
1597 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1598 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1599 config = config,
1600 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001601 subnet = subnet,
1602 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001603 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1604 cip1, sip1 = self.dhcprelay.send_recv(macs[0])
1605 assert_not_equal(cip1,None)
1606 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip1,macs[0],master))
1607 log.info('Changing cluster master from %s to %s'%(master, standbys[0]))
1608 self.change_master_current_cluster(new_master=standbys[0])
1609 cip2, sip2 = self.dhcprelay.send_recv(macs[1])
1610 assert_not_equal(cip2,None)
1611 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[1],standbys[0]))
1612 self.change_master_current_cluster(new_master=master)
1613 log.info('Changing cluster master from %s to %s'%(standbys[0],master))
1614 cip3, sip3 = self.dhcprelay.send_recv(macs[2])
1615 assert_not_equal(cip3,None)
1616 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master))
ChetanGaonker689b3862016-10-17 16:25:01 -07001617 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001618
ChetanGaonker689b3862016-10-17 16:25:01 -07001619 def test_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001620 status = self.verify_cluster_status(onos_instances=onos_instances)
1621 assert_equal(status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001622 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001623 self.subscriber.num_subscribers = 5
1624 self.subscriber.num_channels = 10
1625 for i in [0,1]:
1626 if i == 1:
1627 cord_test_onos_restart()
1628 time.sleep(45)
1629 status = self.verify_cluster_status(onos_instances=onos_instances)
1630 assert_equal(status, True)
1631 log.info('Verifying cord subscriber functionality after cluster restart')
1632 else:
1633 log.info('Verifying cord subscriber functionality before cluster restart')
1634 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1635 num_channels = self.subscriber.num_channels,
1636 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1637 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1638 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1639 self.subscriber.num_channels))
1640 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001641 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001642
ChetanGaonker689b3862016-10-17 16:25:01 -07001643 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1644 def test_cluster_with_cord_subscriber_join_next_channel_before_and_after_cluster_mastership_is_withdrawn(self,onos_instances=ONOS_INSTANCES):
1645 status = self.verify_cluster_status(onos_instances=onos_instances)
1646 assert_equal(status, True)
1647 master,standbys = self.get_cluster_current_master_standbys()
1648 assert_equal(len(standbys),(onos_instances-1))
1649 self.subscriber.setUpClass(controller=master)
1650 self.subscriber.num_subscribers = 5
1651 self.subscriber.num_channels = 10
1652 for i in [0,1]:
1653 if i == 1:
1654 status=self.withdraw_cluster_current_mastership(master_ip=master)
1655 asser_equal(status, True)
1656 master,standbys = self.get_cluster_current_master_standbys()
1657 log.info('verifying cord subscriber functionality after cluster current master withdraw mastership')
1658 else:
1659 log.info('verifying cord subscriber functionality before cluster master withdraw mastership')
1660 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1661 num_channels = self.subscriber.num_channels,
1662 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1663 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1664 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1665 self.subscriber.num_channels),controller=master)
1666 assert_equal(test_status, True)
1667 self.subscriber.tearDownClass(controller=master)
1668
1669 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1670 def test_cluster_with_cord_subscriber_join_recv_traffic_from_10channels_and_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001671 status = self.verify_cluster_status(onos_instances=onos_instances)
1672 assert_equal(status, True)
1673 master, standbys = self.get_cluster_current_master_standbys()
1674 assert_equal(len(standbys),(onos_instances-1))
1675 onos_names_ips = self.get_cluster_container_names_ips()
1676 member_onos_name = onos_names_ips[standbys[0]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001677 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001678 num_subscribers = 1
1679 num_channels = 10
1680 for i in [0,1]:
1681 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -08001682 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001683 time.sleep(30)
ChetanGaonker689b3862016-10-17 16:25:01 -07001684 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001685 assert_equal(status, True)
1686 log.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0])
1687 else:
1688 log.info('Verifying cord subscriber functionality before cluster member %s is down'%standbys[0])
1689 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1690 num_channels = num_channels,
1691 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify,
1692 self.subscriber.igmp_verify, self.subscriber.traffic_verify),
1693 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
ChetanGaonker689b3862016-10-17 16:25:01 -07001694 negative_subscriber_auth = 'all',controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001695 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001696 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001697
ChetanGaonker689b3862016-10-17 16:25:01 -07001698 def test_cluster_with_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001699 status = self.verify_cluster_status(onos_instances=onos_instances)
1700 assert_equal(status, True)
1701 master, standbys = self.get_cluster_current_master_standbys()
1702 assert_equal(len(standbys),(onos_instances-1))
1703 onos_names_ips = self.get_cluster_container_names_ips()
1704 member1_onos_name = onos_names_ips[standbys[0]]
1705 member2_onos_name = onos_names_ips[standbys[1]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001706 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001707 num_subscribers = 1
1708 num_channels = 10
1709 for i in [0,1]:
1710 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -08001711 cord_test_onos_shutdown(node = standbys[0])
1712 cord_test_onos_shutdown(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -07001713 time.sleep(60)
1714 status = self.verify_cluster_status(onos_instances=onos_instances-2)
1715 assert_equal(status, True)
1716 log.info('Verifying cord subscriber funtionality after cluster two members %s and %s down'%(standbys[0],standbys[1]))
1717 else:
1718 log.info('Verifying cord subscriber funtionality before cluster two members %s and %s down'%(standbys[0],standbys[1]))
1719 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1720 num_channels = num_channels,
1721 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1722 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1723 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
1724 negative_subscriber_auth = 'all')
1725 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001726 self.subscriber.tearDownClass(controller=master)
1727
1728 #pass
1729 def test_cluster_with_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1730 status = self.verify_cluster_status(onos_instances=onos_instances)
1731 assert_equal(status, True)
1732 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1733 for device in device_dict.keys():
1734 log.info("Device is %s"%device_dict[device])
1735 assert_not_equal(device_dict[device]['master'],'none')
1736 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1737 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1738
1739 #pass
1740 def test_cluster_state_in_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1741 status = self.verify_cluster_status(onos_instances=onos_instances)
1742 assert_equal(status, True)
1743 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1744 cluster_ips = self.get_cluster_current_member_ips()
1745 for ip in cluster_ips:
1746 device_dict= self.get_cluster_current_master_standbys_of_connected_devices(controller = ip)
1747 assert_equal(len(device_dict.keys()),onos_instances)
1748 for device in device_dict.keys():
1749 log.info("Device is %s"%device_dict[device])
1750 assert_not_equal(device_dict[device]['master'],'none')
1751 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1752 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1753
1754 #pass
1755 def test_cluster_verifying_multiple_ovs_switches_after_master_is_restarted(self,onos_instances = ONOS_INSTANCES):
1756 status = self.verify_cluster_status(onos_instances=onos_instances)
1757 assert_equal(status, True)
1758 onos_names_ips = self.get_cluster_container_names_ips()
1759 master_count = self.get_number_of_devices_of_master()
1760 log.info('Master count information is %s'%master_count)
1761 total_devices = 0
1762 for master in master_count.keys():
1763 total_devices += master_count[master]['size']
1764 if master_count[master]['size'] != 0:
1765 restart_ip = master
1766 assert_equal(total_devices,onos_instances)
1767 member_onos_name = onos_names_ips[restart_ip]
1768 log.info('Restarting cluster member %s having ip %s'%(member_onos_name,restart_ip))
1769 Container(member_onos_name, Onos.IMAGE).restart()
1770 time.sleep(40)
1771 master_count = self.get_number_of_devices_of_master()
1772 log.info('Master count information after restart is %s'%master_count)
1773 total_devices = 0
1774 for master in master_count.keys():
1775 total_devices += master_count[master]['size']
1776 if master == restart_ip:
1777 assert_equal(master_count[master]['size'], 0)
1778 assert_equal(total_devices,onos_instances)
1779
1780 #pass
1781 def test_cluster_verifying_multiple_ovs_switches_with_one_master_down(self,onos_instances = ONOS_INSTANCES):
1782 status = self.verify_cluster_status(onos_instances=onos_instances)
1783 assert_equal(status, True)
1784 onos_names_ips = self.get_cluster_container_names_ips()
1785 master_count = self.get_number_of_devices_of_master()
1786 log.info('Master count information is %s'%master_count)
1787 total_devices = 0
1788 for master in master_count.keys():
1789 total_devices += master_count[master]['size']
1790 if master_count[master]['size'] != 0:
1791 restart_ip = master
1792 assert_equal(total_devices,onos_instances)
1793 master_onos_name = onos_names_ips[restart_ip]
1794 log.info('Shutting down cluster member %s having ip %s'%(master_onos_name,restart_ip))
1795 Container(master_onos_name, Onos.IMAGE).kill()
1796 time.sleep(40)
1797 for ip in onos_names_ips.keys():
1798 if ip != restart_ip:
1799 controller_ip = ip
1800 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=controller_ip)
1801 assert_equal(status, True)
1802 master_count = self.get_number_of_devices_of_master(controller=controller_ip)
1803 log.info('Master count information after restart is %s'%master_count)
1804 total_devices = 0
1805 for master in master_count.keys():
1806 total_devices += master_count[master]['size']
1807 if master == restart_ip:
1808 assert_equal(master_count[master]['size'], 0)
1809 assert_equal(total_devices,onos_instances)
1810
1811 #pass
1812 def test_cluster_verifying_multiple_ovs_switches_with_current_master_withdrawing_mastership(self,onos_instances = ONOS_INSTANCES):
1813 status = self.verify_cluster_status(onos_instances=onos_instances)
1814 assert_equal(status, True)
1815 master_count = self.get_number_of_devices_of_master()
1816 log.info('Master count information is %s'%master_count)
1817 total_devices = 0
1818 for master in master_count.keys():
1819 total_devices += int(master_count[master]['size'])
1820 if master_count[master]['size'] != 0:
1821 master_ip = master
1822 log.info('Devices of master %s are %s'%(master_count[master]['devices'],master))
1823 device_id = str(master_count[master]['devices'][0])
1824 device_count = master_count[master]['size']
1825 assert_equal(total_devices,onos_instances)
1826 log.info('Withdrawing mastership of device %s for controller %s'%(device_id,master_ip))
1827 status=self.withdraw_cluster_current_mastership(master_ip=master_ip,device_id = device_id)
1828 assert_equal(status, True)
1829 master_count = self.get_number_of_devices_of_master()
1830 log.info('Master count information after cluster mastership withdraw is %s'%master_count)
1831 total_devices = 0
1832 for master in master_count.keys():
1833 total_devices += int(master_count[master]['size'])
1834 if master == master_ip:
1835 assert_equal(master_count[master]['size'], device_count-1)
1836 assert_equal(total_devices,onos_instances)
1837
1838 #pass
1839 def test_cluster_verifying_multiple_ovs_switches_and_restarting_cluster(self,onos_instances = ONOS_INSTANCES):
1840 status = self.verify_cluster_status(onos_instances=onos_instances)
1841 assert_equal(status, True)
1842 master_count = self.get_number_of_devices_of_master()
1843 log.info('Master count information is %s'%master_count)
1844 total_devices = 0
1845 for master in master_count.keys():
1846 total_devices += master_count[master]['size']
1847 assert_equal(total_devices,onos_instances)
1848 log.info('Restarting cluster')
1849 cord_test_onos_restart()
1850 time.sleep(60)
1851 master_count = self.get_number_of_devices_of_master()
1852 log.info('Master count information after restart is %s'%master_count)
1853 total_devices = 0
1854 for master in master_count.keys():
1855 total_devices += master_count[master]['size']
1856 assert_equal(total_devices,onos_instances)