blob: 22606129ea6e5565320ec509745edc65a85c2e44 [file] [log] [blame]
ChetanGaonker2099d722016-10-07 15:16:58 -07001#copyright 2016-present Ciena Corporation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14#
15import unittest
16from nose.tools import *
17from scapy.all import *
18from OnosCtrl import OnosCtrl, get_mac
19from OltConfig import OltConfig
20from socket import socket
21from OnosFlowCtrl import OnosFlowCtrl
22from nose.twistedtools import reactor, deferred
23from twisted.internet import defer
24from onosclidriver import OnosCliDriver
25from CordContainer import Container, Onos, Quagga
A.R Karthick2560f042016-11-30 14:38:52 -080026from CordTestServer import cord_test_onos_restart, cord_test_onos_shutdown, cord_test_onos_add_cluster, cord_test_quagga_restart, cord_test_restart_cluster
ChetanGaonker2099d722016-10-07 15:16:58 -070027from portmaps import g_subscriber_port_map
28from scapy.all import *
29import time, monotonic
30import threading
31from threading import current_thread
32from Cluster import *
33from EapTLS import TLSAuthTest
34from ACL import ACLTest
A R Karthick1f908202016-11-16 17:32:20 -080035from OnosLog import OnosLog
36from CordLogger import CordLogger
ChetanGaonker2099d722016-10-07 15:16:58 -070037import os
38import json
39import random
40import collections
41log.setLevel('INFO')
42
A R Karthick1f908202016-11-16 17:32:20 -080043class cluster_exchange(CordLogger):
ChetanGaonker2099d722016-10-07 15:16:58 -070044 test_path = os.path.dirname(os.path.realpath(__file__))
45 onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
46 mac = RandMAC()._fix()
47 flows_eth = Ether(src = RandMAC()._fix(), dst = RandMAC()._fix())
48 igmp_eth = Ether(dst = '01:00:5e:00:00:16', type = ETH_P_IP)
49 igmp_ip = IP(dst = '224.0.0.22')
50 ONOS_INSTANCES = 3
51 V_INF1 = 'veth0'
52 TLS_TIMEOUT = 100
53 device_id = 'of:' + get_mac()
54 igmp = cluster_igmp()
55 igmp_groups = igmp.mcast_ip_range(start_ip = '224.1.8.10',end_ip = '224.1.10.49')
56 igmp_sources = igmp.source_ip_range(start_ip = '38.24.29.35',end_ip='38.24.35.56')
57 tls = cluster_tls()
58 flows = cluster_flows()
59 proxyarp = cluster_proxyarp()
60 vrouter = cluster_vrouter()
61 acl = cluster_acl()
62 dhcprelay = cluster_dhcprelay()
63 subscriber = cluster_subscriber()
A R Karthick3b2e0372016-12-14 17:37:43 -080064 testcaseLoggers = ('test_cluster_controller_restarts', 'test_cluster_graceful_controller_restarts',
65 'test_cluster_single_controller_restarts', 'test_cluster_restarts')
A R Karthick1f908202016-11-16 17:32:20 -080066
67 def setUp(self):
68 if self._testMethodName not in self.testcaseLoggers:
69 super(cluster_exchange, self).setUp()
70
71 def tearDown(self):
72 if self._testMethodName not in self.testcaseLoggers:
73 super(cluster_exchange, self).tearDown()
ChetanGaonker2099d722016-10-07 15:16:58 -070074
75 def get_controller(self):
76 controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
77 controller = controller.split(',')[0]
78 return controller
79
A R Karthick1f908202016-11-16 17:32:20 -080080 @classmethod
81 def get_controllers(cls):
82 controllers = os.getenv('ONOS_CONTROLLER_IP') or ''
83 return controllers.split(',')
84
A R Karthick6cc8b812016-12-09 10:24:40 -080085 def cliEnter(self, controller = None):
ChetanGaonker2099d722016-10-07 15:16:58 -070086 retries = 0
A R Karthick6cc8b812016-12-09 10:24:40 -080087 while retries < 30:
88 self.cli = OnosCliDriver(controller = controller, connect = True)
ChetanGaonker2099d722016-10-07 15:16:58 -070089 if self.cli.handle:
90 break
91 else:
92 retries += 1
93 time.sleep(2)
94
95 def cliExit(self):
96 self.cli.disconnect()
97
A R Karthick1f908202016-11-16 17:32:20 -080098 def get_leader(self, controller = None):
99 self.cliEnter(controller = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800100 try:
101 result = json.loads(self.cli.leaders(jsonFormat = True))
102 except:
103 result = None
104
A R Karthick1f908202016-11-16 17:32:20 -0800105 if result is None:
106 log.info('Leaders command failure for controller %s' %controller)
107 else:
108 log.info('Leaders returned: %s' %result)
109 self.cliExit()
110 return result
111
A R Karthick3b2e0372016-12-14 17:37:43 -0800112 def onos_shutdown(self, controller = None):
113 status = True
114 self.cliEnter(controller = controller)
115 try:
116 self.cli.shutdown(timeout = 10)
117 except:
118 log.info('Graceful shutdown of ONOS failed for controller: %s' %controller)
119 status = False
120
121 self.cliExit()
122 return status
123
A R Karthicke14fc022016-12-08 14:50:29 -0800124 def log_set(self, level = None, app = 'org.onosproject', controllers = None):
125 CordLogger.logSet(level = level, app = app, controllers = controllers, forced = True)
A R Karthickef1232d2016-12-07 09:18:15 -0800126
A R Karthick1f908202016-11-16 17:32:20 -0800127 def get_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800128 result_map = {}
129 if controller is None:
130 controller = self.get_controller()
A R Karthick1f908202016-11-16 17:32:20 -0800131 if type(controller) in [ list, tuple ]:
132 for c in controller:
133 leaders = self.get_leader(controller = c)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800134 result_map[c] = leaders
A R Karthick1f908202016-11-16 17:32:20 -0800135 else:
136 leaders = self.get_leader(controller = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800137 result_map[controller] = leaders
138 return result_map
A R Karthick1f908202016-11-16 17:32:20 -0800139
A R Karthickec2db322016-11-17 15:06:01 -0800140 def verify_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800141 leaders_map = self.get_leaders(controller = controller)
142 failed = [ k for k,v in leaders_map.items() if v == None ]
A R Karthickec2db322016-11-17 15:06:01 -0800143 return failed
144
ChetanGaonker2099d722016-10-07 15:16:58 -0700145 def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
146 tries = 0
147 try:
148 self.cliEnter(controller = controller)
149 while tries <= 10:
150 cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
151 if cluster_summary:
152 log.info("cluster 'summary' command output is %s"%cluster_summary)
153 nodes = cluster_summary['nodes']
154 if verify:
155 if nodes == onos_instances:
156 self.cliExit()
157 return True
158 else:
159 tries += 1
160 time.sleep(1)
161 else:
162 if nodes >= onos_instances:
163 self.cliExit()
164 return True
165 else:
166 tries += 1
167 time.sleep(1)
168 else:
169 tries += 1
170 time.sleep(1)
171 self.cliExit()
172 return False
173 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700174 raise Exception('Failed to get cluster members')
175 return False
ChetanGaonker2099d722016-10-07 15:16:58 -0700176
A.R Karthick45ab3e12016-11-30 11:25:51 -0800177 def get_cluster_current_member_ips(self, controller = None, nodes_filter = None):
ChetanGaonker2099d722016-10-07 15:16:58 -0700178 tries = 0
179 cluster_ips = []
180 try:
181 self.cliEnter(controller = controller)
182 while tries <= 10:
183 cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
184 if cluster_nodes:
185 log.info("cluster 'nodes' output is %s"%cluster_nodes)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800186 if nodes_filter:
187 cluster_nodes = nodes_filter(cluster_nodes)
ChetanGaonker2099d722016-10-07 15:16:58 -0700188 cluster_ips = map(lambda c: c['id'], cluster_nodes)
189 self.cliExit()
190 cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
191 return cluster_ips
192 else:
193 tries += 1
194 self.cliExit()
195 return cluster_ips
196 except:
197 raise Exception('Failed to get cluster members')
198 return cluster_ips
199
ChetanGaonker689b3862016-10-17 16:25:01 -0700200 def get_cluster_container_names_ips(self,controller=None):
A R Karthick1f908202016-11-16 17:32:20 -0800201 onos_names_ips = {}
A R Karthick0f3f25b2016-12-15 09:50:57 -0800202 controllers = self.get_controllers()
203 i = 0
204 for controller in controllers:
205 if i == 0:
206 name = Onos.NAME
207 else:
208 name = '{}-{}'.format(Onos.NAME, i+1)
209 onos_names_ips[controller] = name
210 onos_names_ips[name] = controller
211 i += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700212 return onos_names_ips
A R Karthick0f3f25b2016-12-15 09:50:57 -0800213 # onos_ips = self.get_cluster_current_member_ips(controller=controller)
214 # onos_names_ips[onos_ips[0]] = Onos.NAME
215 # onos_names_ips[Onos.NAME] = onos_ips[0]
216 # for i in range(1,len(onos_ips)):
217 # name = '{0}-{1}'.format(Onos.NAME,i+1)
218 # onos_names_ips[onos_ips[i]] = name
219 # onos_names_ips[name] = onos_ips[i]
220
221 # return onos_names_ips
ChetanGaonker2099d722016-10-07 15:16:58 -0700222
223 #identifying current master of a connected device, not tested
224 def get_cluster_current_master_standbys(self,controller=None,device_id=device_id):
225 master = None
226 standbys = []
227 tries = 0
228 try:
229 cli = self.cliEnter(controller = controller)
230 while tries <= 10:
231 roles = json.loads(self.cli.roles(jsonFormat = True))
232 log.info("cluster 'roles' command output is %s"%roles)
233 if roles:
234 for device in roles:
235 log.info('Verifying device info in line %s'%device)
236 if device['id'] == device_id:
237 master = str(device['master'])
238 standbys = map(lambda d: str(d), device['standbys'])
239 log.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
240 self.cliExit()
241 return master, standbys
242 self.cliExit()
243 return master, standbys
244 else:
245 tries += 1
ChetanGaonker689b3862016-10-17 16:25:01 -0700246 time.sleep(1)
247 self.cliExit()
248 return master,standbys
249 except:
250 raise Exception('Failed to get cluster members')
251 return master,standbys
252
253 def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
254 ''' returns master and standbys of all the connected devices to ONOS cluster instance'''
255 device_dict = {}
256 tries = 0
257 try:
258 cli = self.cliEnter(controller = controller)
259 while tries <= 10:
260 device_dict = {}
261 roles = json.loads(self.cli.roles(jsonFormat = True))
262 log.info("cluster 'roles' command output is %s"%roles)
263 if roles:
264 for device in roles:
265 device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
266 for i in range(len(device_dict[device['id']]['standbys'])):
267 device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
268 log.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
269 self.cliExit()
270 return device_dict
271 else:
272 tries += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700273 time.sleep(1)
274 self.cliExit()
ChetanGaonker689b3862016-10-17 16:25:01 -0700275 return device_dict
276 except:
277 raise Exception('Failed to get cluster members')
278 return device_dict
279
280 #identify current master of a connected device, not tested
281 def get_cluster_connected_devices(self,controller=None):
282 '''returns all the devices connected to ONOS cluster'''
283 device_list = []
284 tries = 0
285 try:
286 cli = self.cliEnter(controller = controller)
287 while tries <= 10:
288 device_list = []
289 devices = json.loads(self.cli.devices(jsonFormat = True))
290 log.info("cluster 'devices' command output is %s"%devices)
291 if devices:
292 for device in devices:
293 log.info('device id is %s'%device['id'])
294 device_list.append(str(device['id']))
295 self.cliExit()
296 return device_list
297 else:
298 tries += 1
299 time.sleep(1)
300 self.cliExit()
301 return device_list
302 except:
303 raise Exception('Failed to get cluster members')
304 return device_list
305
306 def get_number_of_devices_of_master(self,controller=None):
307 '''returns master-device pairs, which master having what devices'''
308 master_count = {}
309 try:
310 cli = self.cliEnter(controller = controller)
311 masters = json.loads(self.cli.masters(jsonFormat = True))
312 if masters:
313 for master in masters:
314 master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
315 return master_count
316 else:
317 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700318 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700319 raise Exception('Failed to get cluster members')
320 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700321
322 def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None):
323 if new_master is None: return False
ChetanGaonker689b3862016-10-17 16:25:01 -0700324 self.cliEnter(controller=controller)
ChetanGaonker2099d722016-10-07 15:16:58 -0700325 cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
326 command = self.cli.command(cmd = cmd, jsonFormat = False)
327 self.cliExit()
328 time.sleep(60)
329 master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
330 assert_equal(master,new_master)
331 log.info('Cluster master changed to %s successfully'%new_master)
332
ChetanGaonker689b3862016-10-17 16:25:01 -0700333 def withdraw_cluster_current_mastership(self,master_ip=None,device_id=device_id,controller=None):
334 '''current master looses its mastership and hence new master will be elected'''
335 self.cliEnter(controller=controller)
336 cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
337 command = self.cli.command(cmd = cmd, jsonFormat = False)
338 self.cliExit()
339 time.sleep(60)
340 new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
341 assert_not_equal(new_master_ip,master_ip)
342 log.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
343 log.info('Cluster new master is %s'%new_master_ip)
344 return True
345
A R Karthick3b2e0372016-12-14 17:37:43 -0800346 def cluster_controller_restarts(self, graceful = False):
A R Karthick1f908202016-11-16 17:32:20 -0800347 controllers = self.get_controllers()
348 ctlr_len = len(controllers)
349 if ctlr_len <= 1:
350 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
351 assert_greater(ctlr_len, 1)
352
353 #this call would verify the cluster for once
354 onos_map = self.get_cluster_container_names_ips()
355
A R Karthick2a70a2f2016-12-16 14:40:16 -0800356 def check_exception(iteration, controller = None):
A R Karthick1f908202016-11-16 17:32:20 -0800357 adjacent_controller = None
358 adjacent_controllers = None
359 if controller:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800360 adjacent_controllers = list(set(controllers) - set([controller]))
361 adjacent_controller = adjacent_controllers[0]
A R Karthick1f908202016-11-16 17:32:20 -0800362 for node in controllers:
363 onosLog = OnosLog(host = node)
364 ##check the logs for storage exception
365 _, output = onosLog.get_log(('ERROR', 'Exception',))
A R Karthickec2db322016-11-17 15:06:01 -0800366 if output and output.find('StorageException$Timeout') >= 0:
367 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
368 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
369 log.info('\n' + '-' * 50 + '\n')
A R Karthick1f908202016-11-16 17:32:20 -0800370 log.info('%s' %output)
A R Karthickec2db322016-11-17 15:06:01 -0800371 log.info('\n' + '-' * 50 + '\n')
372 failed = self.verify_leaders(controllers)
373 if failed:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800374 log.info('Leaders command failed on nodes: %s' %failed)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800375 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800376 CordLogger.archive_results(self._testMethodName,
377 controllers = controllers,
378 iteration = 'FAILED')
A R Karthickec2db322016-11-17 15:06:01 -0800379 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800380 return controller
381
382 try:
A R Karthickec2db322016-11-17 15:06:01 -0800383 ips = self.get_cluster_current_member_ips(controller = adjacent_controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800384 log.info('ONOS cluster formed with controllers: %s' %ips)
A R Karthick1f908202016-11-16 17:32:20 -0800385 st = True
386 except:
387 st = False
388
A R Karthickec2db322016-11-17 15:06:01 -0800389 failed = self.verify_leaders(controllers)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800390 if failed:
391 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick3396ec42017-01-11 17:12:13 -0800392 CordLogger.archive_results(self._testMethodName,
393 controllers = controllers,
394 iteration = 'FAILED')
A R Karthick1f908202016-11-16 17:32:20 -0800395 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800396 if st is False:
397 log.info('No storage exception and ONOS cluster was not formed successfully')
398 else:
399 controller = None
400
401 return controller
402
403 next_controller = None
404 tries = 10
405 for num in range(tries):
406 index = num % ctlr_len
407 #index = random.randrange(0, ctlr_len)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800408 controller_name = onos_map[controllers[index]] if next_controller is None else onos_map[next_controller]
409 controller = onos_map[controller_name]
410 log.info('ITERATION: %d. Restarting Controller %s' %(num + 1, controller_name))
A R Karthick1f908202016-11-16 17:32:20 -0800411 try:
A R Karthickef1232d2016-12-07 09:18:15 -0800412 #enable debug log for the other controllers before restarting this controller
A R Karthicke14fc022016-12-08 14:50:29 -0800413 adjacent_controllers = list( set(controllers) - set([controller]) )
414 self.log_set(controllers = adjacent_controllers)
415 self.log_set(app = 'io.atomix', controllers = adjacent_controllers)
A R Karthick3b2e0372016-12-14 17:37:43 -0800416 if graceful is True:
A R Karthick0f3f25b2016-12-15 09:50:57 -0800417 log.info('Gracefully shutting down controller: %s' %controller)
A R Karthick3b2e0372016-12-14 17:37:43 -0800418 self.onos_shutdown(controller)
419 cord_test_onos_restart(node = controller, timeout = 0)
A R Karthicke14fc022016-12-08 14:50:29 -0800420 self.log_set(controllers = controller)
421 self.log_set(app = 'io.atomix', controllers = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800422 time.sleep(60)
A R Karthick1f908202016-11-16 17:32:20 -0800423 except:
424 time.sleep(5)
425 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800426
427 #first archive the test case logs for this run
A R Karthick3b2e0372016-12-14 17:37:43 -0800428 CordLogger.archive_results(self._testMethodName,
A R Karthicke8935c62016-12-08 18:17:17 -0800429 controllers = controllers,
430 iteration = 'iteration_{}'.format(num+1))
A R Karthick2a70a2f2016-12-16 14:40:16 -0800431 next_controller = check_exception(num, controller = controller)
A R Karthick1f908202016-11-16 17:32:20 -0800432
A R Karthick3b2e0372016-12-14 17:37:43 -0800433 def test_cluster_controller_restarts(self):
434 '''Test the cluster by repeatedly killing the controllers'''
435 self.cluster_controller_restarts()
436
437 def test_cluster_graceful_controller_restarts(self):
438 '''Test the cluster by repeatedly restarting the controllers gracefully'''
439 self.cluster_controller_restarts(graceful = True)
440
A.R Karthick45ab3e12016-11-30 11:25:51 -0800441 def test_cluster_single_controller_restarts(self):
442 '''Test the cluster by repeatedly restarting the same controller'''
443 controllers = self.get_controllers()
444 ctlr_len = len(controllers)
445 if ctlr_len <= 1:
446 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
447 assert_greater(ctlr_len, 1)
448
449 #this call would verify the cluster for once
450 onos_map = self.get_cluster_container_names_ips()
451
A R Karthick2a70a2f2016-12-16 14:40:16 -0800452 def check_exception(iteration, controller, inclusive = False):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800453 adjacent_controllers = list(set(controllers) - set([controller]))
454 adjacent_controller = adjacent_controllers[0]
455 controller_list = adjacent_controllers if inclusive == False else controllers
456 storage_exceptions = []
457 for node in controller_list:
458 onosLog = OnosLog(host = node)
459 ##check the logs for storage exception
460 _, output = onosLog.get_log(('ERROR', 'Exception',))
461 if output and output.find('StorageException$Timeout') >= 0:
462 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
463 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
464 log.info('\n' + '-' * 50 + '\n')
465 log.info('%s' %output)
466 log.info('\n' + '-' * 50 + '\n')
467 storage_exceptions.append(node)
468
469 failed = self.verify_leaders(controller_list)
470 if failed:
471 log.info('Leaders command failed on nodes: %s' %failed)
472 if storage_exceptions:
473 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800474 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800475 CordLogger.archive_results('test_cluster_single_controller_restarts',
476 controllers = controllers,
477 iteration = 'FAILED')
A.R Karthick45ab3e12016-11-30 11:25:51 -0800478 assert_equal(len(failed), 0)
479 return controller
480
481 for ctlr in controller_list:
482 ips = self.get_cluster_current_member_ips(controller = ctlr,
483 nodes_filter = \
484 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
485 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
486 if controller in ips and inclusive is False:
487 log.info('Controller %s still ACTIVE on Node %s after it was shutdown' %(controller, ctlr))
488 if controller not in ips and inclusive is True:
A R Karthick6cc8b812016-12-09 10:24:40 -0800489 log.info('Controller %s still INACTIVE on Node %s after it was restarted' %(controller, ctlr))
A.R Karthick45ab3e12016-11-30 11:25:51 -0800490
491 return controller
492
493 tries = 10
494 #chose a random controller for shutdown/restarts
495 controller = controllers[random.randrange(0, ctlr_len)]
496 controller_name = onos_map[controller]
A R Karthick6cc8b812016-12-09 10:24:40 -0800497 ##enable the log level for the controllers
498 self.log_set(controllers = controllers)
499 self.log_set(app = 'io.atomix', controllers = controllers)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800500 for num in range(tries):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800501 log.info('ITERATION: %d. Shutting down Controller %s' %(num + 1, controller_name))
502 try:
A R Karthick3b2e0372016-12-14 17:37:43 -0800503 cord_test_onos_shutdown(node = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800504 time.sleep(20)
505 except:
506 time.sleep(5)
507 continue
508 #check for exceptions on the adjacent nodes
A R Karthick2a70a2f2016-12-16 14:40:16 -0800509 check_exception(num, controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800510 #Now restart the controller back
511 log.info('Restarting back the controller %s' %controller_name)
A R Karthick3b2e0372016-12-14 17:37:43 -0800512 cord_test_onos_restart(node = controller)
A R Karthick6cc8b812016-12-09 10:24:40 -0800513 self.log_set(controllers = controller)
514 self.log_set(app = 'io.atomix', controllers = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800515 time.sleep(60)
A R Karthicke8935c62016-12-08 18:17:17 -0800516 #archive the logs for this run
517 CordLogger.archive_results('test_cluster_single_controller_restarts',
518 controllers = controllers,
519 iteration = 'iteration_{}'.format(num+1))
A R Karthick2a70a2f2016-12-16 14:40:16 -0800520 check_exception(num, controller, inclusive = True)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800521
A.R Karthick2560f042016-11-30 14:38:52 -0800522 def test_cluster_restarts(self):
523 '''Test the cluster by repeatedly restarting the entire cluster'''
524 controllers = self.get_controllers()
525 ctlr_len = len(controllers)
526 if ctlr_len <= 1:
527 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
528 assert_greater(ctlr_len, 1)
529
530 #this call would verify the cluster for once
531 onos_map = self.get_cluster_container_names_ips()
532
A R Karthick2a70a2f2016-12-16 14:40:16 -0800533 def check_exception(iteration):
A.R Karthick2560f042016-11-30 14:38:52 -0800534 controller_list = controllers
535 storage_exceptions = []
536 for node in controller_list:
537 onosLog = OnosLog(host = node)
538 ##check the logs for storage exception
539 _, output = onosLog.get_log(('ERROR', 'Exception',))
540 if output and output.find('StorageException$Timeout') >= 0:
541 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
542 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
543 log.info('\n' + '-' * 50 + '\n')
544 log.info('%s' %output)
545 log.info('\n' + '-' * 50 + '\n')
546 storage_exceptions.append(node)
547
548 failed = self.verify_leaders(controller_list)
549 if failed:
550 log.info('Leaders command failed on nodes: %s' %failed)
551 if storage_exceptions:
552 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800553 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800554 CordLogger.archive_results('test_cluster_restarts',
555 controllers = controllers,
556 iteration = 'FAILED')
A.R Karthick2560f042016-11-30 14:38:52 -0800557 assert_equal(len(failed), 0)
558 return
559
560 for ctlr in controller_list:
561 ips = self.get_cluster_current_member_ips(controller = ctlr,
562 nodes_filter = \
563 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
564 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
A R Karthick2a70a2f2016-12-16 14:40:16 -0800565 if len(ips) != len(controllers):
566 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800567 CordLogger.archive_results('test_cluster_restarts',
568 controllers = controllers,
569 iteration = 'FAILED')
A.R Karthick2560f042016-11-30 14:38:52 -0800570 assert_equal(len(ips), len(controllers))
571
572 tries = 10
573 for num in range(tries):
574 log.info('ITERATION: %d. Restarting cluster with controllers at %s' %(num+1, controllers))
575 try:
576 cord_test_restart_cluster()
A R Karthick6cc8b812016-12-09 10:24:40 -0800577 self.log_set(controllers = controllers)
578 self.log_set(app = 'io.atomix', controllers = controllers)
A.R Karthick2560f042016-11-30 14:38:52 -0800579 log.info('Delaying before verifying cluster status')
580 time.sleep(60)
581 except:
582 time.sleep(10)
583 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800584
585 #archive the logs for this run before verification
586 CordLogger.archive_results('test_cluster_restarts',
587 controllers = controllers,
588 iteration = 'iteration_{}'.format(num+1))
A.R Karthick2560f042016-11-30 14:38:52 -0800589 #check for exceptions on the adjacent nodes
A R Karthick2a70a2f2016-12-16 14:40:16 -0800590 check_exception(num)
A.R Karthick2560f042016-11-30 14:38:52 -0800591
ChetanGaonker2099d722016-10-07 15:16:58 -0700592 #pass
ChetanGaonker689b3862016-10-17 16:25:01 -0700593 def test_cluster_formation_and_verification(self,onos_instances = ONOS_INSTANCES):
594 status = self.verify_cluster_status(onos_instances = onos_instances)
595 assert_equal(status, True)
596 log.info('Cluster exists with %d ONOS instances'%onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -0700597
598 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700599 def test_cluster_adding_members(self, add = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700600 status = self.verify_cluster_status(onos_instances = onos_instances)
601 assert_equal(status, True)
602 onos_ips = self.get_cluster_current_member_ips()
603 onos_instances = len(onos_ips)+add
604 log.info('Adding %d nodes to the ONOS cluster' %add)
605 cord_test_onos_add_cluster(count = add)
606 status = self.verify_cluster_status(onos_instances=onos_instances)
607 assert_equal(status, True)
608
ChetanGaonker689b3862016-10-17 16:25:01 -0700609 def test_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700610 status = self.verify_cluster_status(onos_instances = onos_instances)
611 assert_equal(status, True)
612 master, standbys = self.get_cluster_current_master_standbys()
613 assert_equal(len(standbys),(onos_instances-1))
614 onos_names_ips = self.get_cluster_container_names_ips()
615 master_onos_name = onos_names_ips[master]
616 log.info('Removing cluster current master %s'%(master))
A R Karthick3b2e0372016-12-14 17:37:43 -0800617 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700618 time.sleep(60)
619 onos_instances -= 1
620 status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0])
621 assert_equal(status, True)
622 new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
623 assert_not_equal(master,new_master)
ChetanGaonker689b3862016-10-17 16:25:01 -0700624 log.info('Successfully removed clusters master instance')
ChetanGaonker2099d722016-10-07 15:16:58 -0700625
ChetanGaonker689b3862016-10-17 16:25:01 -0700626 def test_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700627 status = self.verify_cluster_status(onos_instances = onos_instances)
628 assert_equal(status, True)
629 master, standbys = self.get_cluster_current_master_standbys()
630 assert_equal(len(standbys),(onos_instances-1))
631 onos_names_ips = self.get_cluster_container_names_ips()
632 member_onos_name = onos_names_ips[standbys[0]]
633 log.info('Removing cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800634 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700635 time.sleep(60)
636 onos_instances -= 1
637 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
638 assert_equal(status, True)
639
ChetanGaonker689b3862016-10-17 16:25:01 -0700640 def test_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700641 status = self.verify_cluster_status(onos_instances = onos_instances)
642 assert_equal(status, True)
643 master, standbys = self.get_cluster_current_master_standbys()
644 assert_equal(len(standbys),(onos_instances-1))
645 onos_names_ips = self.get_cluster_container_names_ips()
646 member1_onos_name = onos_names_ips[standbys[0]]
647 member2_onos_name = onos_names_ips[standbys[1]]
648 log.info('Removing cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800649 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700650 log.info('Removing cluster member %s'%standbys[1])
A R Karthick3b2e0372016-12-14 17:37:43 -0800651 cord_test_onos_shutdown(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700652 time.sleep(60)
653 onos_instances = onos_instances - 2
654 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
655 assert_equal(status, True)
656
ChetanGaonker689b3862016-10-17 16:25:01 -0700657 def test_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700658 status = self.verify_cluster_status(onos_instances = onos_instances)
659 assert_equal(status, True)
660 master, standbys = self.get_cluster_current_master_standbys()
661 assert_equal(len(standbys),(onos_instances-1))
662 onos_names_ips = self.get_cluster_container_names_ips()
663 for i in range(remove):
664 member_onos_name = onos_names_ips[standbys[i]]
665 log.info('Removing onos container with name %s'%standbys[i])
A R Karthick3b2e0372016-12-14 17:37:43 -0800666 cord_test_onos_shutdown(node = standbys[i])
ChetanGaonker2099d722016-10-07 15:16:58 -0700667 time.sleep(60)
668 onos_instances = onos_instances - remove
669 status = self.verify_cluster_status(onos_instances = onos_instances, controller=master)
670 assert_equal(status, True)
671
672 #nottest test cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700673 def test_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES , add = 2, remove = 2):
ChetanGaonker2099d722016-10-07 15:16:58 -0700674 status = self.verify_cluster_status(onos_instances = onos_instances)
675 assert_equal(status, True)
676 onos_ips = self.get_cluster_current_member_ips()
677 onos_instances = len(onos_ips)+add
678 log.info('Adding %d ONOS instances to the cluster'%add)
679 cord_test_onos_add_cluster(count = add)
680 status = self.verify_cluster_status(onos_instances=onos_instances)
681 assert_equal(status, True)
682 log.info('Removing %d ONOS instances from the cluster'%remove)
683 for i in range(remove):
684 name = '{}-{}'.format(Onos.NAME, onos_instances - i)
685 log.info('Removing onos container with name %s'%name)
686 cord_test_onos_shutdown(node = name)
687 time.sleep(60)
688 onos_instances = onos_instances-remove
689 status = self.verify_cluster_status(onos_instances=onos_instances)
690 assert_equal(status, True)
691
692 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700693 def test_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
ChetanGaonker2099d722016-10-07 15:16:58 -0700694 status = self.verify_cluster_status(onos_instances = onos_instances)
695 assert_equal(status, True)
696 onos_ips = self.get_cluster_current_member_ips()
697 onos_instances = onos_instances-remove
698 log.info('Removing %d ONOS instances from the cluster'%remove)
699 for i in range(remove):
700 name = '{}-{}'.format(Onos.NAME, len(onos_ips)-i)
701 log.info('Removing onos container with name %s'%name)
702 cord_test_onos_shutdown(node = name)
703 time.sleep(60)
704 status = self.verify_cluster_status(onos_instances=onos_instances)
705 assert_equal(status, True)
706 log.info('Adding %d ONOS instances to the cluster'%add)
707 cord_test_onos_add_cluster(count = add)
708 onos_instances = onos_instances+add
709 status = self.verify_cluster_status(onos_instances=onos_instances)
710 assert_equal(status, True)
711
ChetanGaonker689b3862016-10-17 16:25:01 -0700712 def test_cluster_restart(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700713 status = self.verify_cluster_status(onos_instances = onos_instances)
714 assert_equal(status, True)
715 log.info('Restarting cluster')
716 cord_test_onos_restart()
717 status = self.verify_cluster_status(onos_instances = onos_instances)
718 assert_equal(status, True)
719
ChetanGaonker689b3862016-10-17 16:25:01 -0700720 def test_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700721 status = self.verify_cluster_status(onos_instances = onos_instances)
722 assert_equal(status, True)
723 master, standbys = self.get_cluster_current_master_standbys()
724 onos_names_ips = self.get_cluster_container_names_ips()
725 master_onos_name = onos_names_ips[master]
726 log.info('Restarting cluster master %s'%master)
A R Karthick3b2e0372016-12-14 17:37:43 -0800727 cord_test_onos_restart(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700728 status = self.verify_cluster_status(onos_instances = onos_instances)
729 assert_equal(status, True)
730 log.info('Cluster came up after master restart as expected')
731
732 #test fail. master changing after restart. Need to check correct behavior.
ChetanGaonker689b3862016-10-17 16:25:01 -0700733 def test_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700734 status = self.verify_cluster_status(onos_instances = onos_instances)
735 assert_equal(status, True)
736 master1, standbys = self.get_cluster_current_master_standbys()
737 onos_names_ips = self.get_cluster_container_names_ips()
738 master_onos_name = onos_names_ips[master1]
A R Karthick3b2e0372016-12-14 17:37:43 -0800739 log.info('Restarting cluster master %s'%master1)
740 cord_test_onos_restart(node = master1)
ChetanGaonker2099d722016-10-07 15:16:58 -0700741 status = self.verify_cluster_status(onos_instances = onos_instances)
742 assert_equal(status, True)
743 master2, standbys = self.get_cluster_current_master_standbys()
744 assert_equal(master1,master2)
745 log.info('Cluster master is same before and after cluster master restart as expected')
746
ChetanGaonker689b3862016-10-17 16:25:01 -0700747 def test_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700748 status = self.verify_cluster_status(onos_instances = onos_instances)
749 assert_equal(status, True)
750 master, standbys = self.get_cluster_current_master_standbys()
751 assert_equal(len(standbys),(onos_instances-1))
752 onos_names_ips = self.get_cluster_container_names_ips()
753 member_onos_name = onos_names_ips[standbys[0]]
754 log.info('Restarting cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800755 cord_test_onos_restart(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700756 status = self.verify_cluster_status(onos_instances = onos_instances)
757 assert_equal(status, True)
758 log.info('Cluster came up as expected after restarting one member')
759
ChetanGaonker689b3862016-10-17 16:25:01 -0700760 def test_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700761 status = self.verify_cluster_status(onos_instances = onos_instances)
762 assert_equal(status, True)
763 master, standbys = self.get_cluster_current_master_standbys()
764 assert_equal(len(standbys),(onos_instances-1))
765 onos_names_ips = self.get_cluster_container_names_ips()
766 member1_onos_name = onos_names_ips[standbys[0]]
767 member2_onos_name = onos_names_ips[standbys[1]]
768 log.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1]))
A R Karthick3b2e0372016-12-14 17:37:43 -0800769 cord_test_onos_restart(node = standbys[0])
770 cord_test_onos_restart(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700771 status = self.verify_cluster_status(onos_instances = onos_instances)
772 assert_equal(status, True)
773 log.info('Cluster came up as expected after restarting two members')
774
ChetanGaonker689b3862016-10-17 16:25:01 -0700775 def test_cluster_state_with_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700776 status = self.verify_cluster_status(onos_instances = onos_instances)
777 assert_equal(status,True)
778 master, standbys = self.get_cluster_current_master_standbys()
779 assert_equal(len(standbys),(onos_instances-1))
780 onos_names_ips = self.get_cluster_container_names_ips()
781 for i in range(members):
782 member_onos_name = onos_names_ips[standbys[i]]
783 log.info('Restarting cluster member %s'%standbys[i])
A R Karthick3b2e0372016-12-14 17:37:43 -0800784 cord_test_onos_restart(node = standbys[i])
ChetanGaonker2099d722016-10-07 15:16:58 -0700785
786 status = self.verify_cluster_status(onos_instances = onos_instances)
787 assert_equal(status, True)
788 log.info('Cluster came up as expected after restarting %d members'%members)
789
ChetanGaonker689b3862016-10-17 16:25:01 -0700790 def test_cluster_state_with_master_change(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700791 status = self.verify_cluster_status(onos_instances=onos_instances)
792 assert_equal(status, True)
793 master, standbys = self.get_cluster_current_master_standbys()
794 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -0700795 log.info('Cluster current master of devices is %s'%master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700796 self.change_master_current_cluster(new_master=standbys[0])
797 log.info('Cluster master changed successfully')
798
799 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700800 def test_cluster_with_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700801 status = self.verify_cluster_status(onos_instances = onos_instances)
802 assert_equal(status, True)
803 onos_ips = self.get_cluster_current_member_ips()
804 self.vrouter.setUpClass()
805 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
806 assert_equal(res, True)
807 for onos_ip in onos_ips:
808 tries = 0
809 flag = False
810 try:
811 self.cliEnter(controller = onos_ip)
812 while tries <= 5:
813 routes = json.loads(self.cli.routes(jsonFormat = True))
814 if routes:
815 assert_equal(len(routes['routes4']), networks)
816 self.cliExit()
817 flag = True
818 break
819 else:
820 tries += 1
821 time.sleep(1)
822 assert_equal(flag, True)
823 except:
824 log.info('Exception occured while checking routes in onos instance %s'%onos_ip)
825 raise
826
827 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700828 def test_cluster_with_vrouter_and_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700829 status = self.verify_cluster_status(onos_instances = onos_instances)
830 assert_equal(status, True)
831 onos_ips = self.get_cluster_current_member_ips()
832 master, standbys = self.get_cluster_current_master_standbys()
833 onos_names_ips = self.get_cluster_container_names_ips()
834 master_onos_name = onos_names_ips[master]
835 self.vrouter.setUpClass()
836 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
837 assert_equal(res,True)
A R Karthick3b2e0372016-12-14 17:37:43 -0800838 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700839 time.sleep(60)
ChetanGaonker689b3862016-10-17 16:25:01 -0700840 log.info('Verifying vrouter traffic after cluster master is down')
ChetanGaonker2099d722016-10-07 15:16:58 -0700841 self.vrouter.vrouter_traffic_verify()
842
843 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700844 def test_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700845 status = self.verify_cluster_status(onos_instances = onos_instances)
846 assert_equal(status, True)
847 onos_ips = self.get_cluster_current_member_ips()
848 master, standbys = self.get_cluster_current_master_standbys()
849 onos_names_ips = self.get_cluster_container_names_ips()
850 master_onos_name = onos_names_ips[master]
851 self.vrouter.setUpClass()
852 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
853 assert_equal(res, True)
854 cord_test_onos_restart()
855 self.vrouter.vrouter_traffic_verify()
856
857 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700858 def test_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700859 status = self.verify_cluster_status(onos_instances = onos_instances)
860 assert_equal(status, True)
861 self.vrouter.setUpClass()
862 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
863 assert_equal(res, True)
864 self.vrouter.vrouter_activate(deactivate=True)
865 time.sleep(15)
866 self.vrouter.vrouter_traffic_verify(positive_test=False)
867 self.vrouter.vrouter_activate(deactivate=False)
868
869 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700870 def test_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700871 status = self.verify_cluster_status(onos_instances = onos_instances)
872 assert_equal(status, True)
873 master, standbys = self.get_cluster_current_master_standbys()
874 onos_names_ips = self.get_cluster_container_names_ips()
875 master_onos_name = onos_names_ips[master]
876 self.vrouter.setUpClass()
877 log.info('Verifying vrouter before master down')
878 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
879 assert_equal(res, True)
880 self.vrouter.vrouter_activate(deactivate=True)
881 log.info('Verifying vrouter traffic after app deactivated')
882 time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down
883 self.vrouter.vrouter_traffic_verify(positive_test=False)
884 log.info('Verifying vrouter traffic after master down')
A R Karthick3b2e0372016-12-14 17:37:43 -0800885 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700886 time.sleep(60)
887 self.vrouter.vrouter_traffic_verify(positive_test=False)
888 self.vrouter.vrouter_activate(deactivate=False)
889
890 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700891 def test_cluster_for_vrouter_app_and_making_member_down(self, networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700892 status = self.verify_cluster_status(onos_instances = onos_instances)
893 assert_equal(status, True)
894 master, standbys = self.get_cluster_current_master_standbys()
895 onos_names_ips = self.get_cluster_container_names_ips()
896 member_onos_name = onos_names_ips[standbys[0]]
897 self.vrouter.setUpClass()
898 log.info('Verifying vrouter before cluster member down')
899 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
900 assert_equal(res, True) # Expecting vrouter should work properly
901 log.info('Verifying vrouter after cluster member down')
A R Karthick3b2e0372016-12-14 17:37:43 -0800902 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700903 time.sleep(60)
904 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
905
906 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700907 def test_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700908 status = self.verify_cluster_status(onos_instances = onos_instances)
909 assert_equal(status, True)
910 master, standbys = self.get_cluster_current_master_standbys()
911 onos_names_ips = self.get_cluster_container_names_ips()
912 member_onos_name = onos_names_ips[standbys[1]]
913 self.vrouter.setUpClass()
914 log.info('Verifying vrouter traffic before cluster member restart')
915 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
916 assert_equal(res, True) # Expecting vrouter should work properly
A R Karthick3b2e0372016-12-14 17:37:43 -0800917 cord_test_onos_restart(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700918 log.info('Verifying vrouter traffic after cluster member restart')
919 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
920
921 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700922 def test_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700923 status = self.verify_cluster_status(onos_instances = onos_instances)
924 assert_equal(status, True)
925 self.vrouter.setUpClass()
926 log.info('Verifying vrouter traffic before cluster restart')
927 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
928 assert_equal(res, True) # Expecting vrouter should work properly
929 cord_test_onos_restart()
930 log.info('Verifying vrouter traffic after cluster restart')
931 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
932
933
934 #test fails because flow state is in pending_add in onos
ChetanGaonker689b3862016-10-17 16:25:01 -0700935 def test_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700936 status = self.verify_cluster_status(onos_instances = onos_instances)
937 assert_equal(status, True)
938 master, standbys = self.get_cluster_current_master_standbys()
939 onos_names_ips = self.get_cluster_container_names_ips()
940 master_onos_name = onos_names_ips[master]
941 self.flows.setUpClass()
942 egress = 1
943 ingress = 2
944 egress_map = { 'ip': '192.168.30.1', 'udp_port': 9500 }
945 ingress_map = { 'ip': '192.168.40.1', 'udp_port': 9000 }
946 flow = OnosFlowCtrl(deviceId = self.device_id,
947 egressPort = egress,
948 ingressPort = ingress,
949 udpSrc = ingress_map['udp_port'],
950 udpDst = egress_map['udp_port'],
951 controller=master
952 )
953 result = flow.addFlow()
954 assert_equal(result, True)
955 time.sleep(1)
956 self.success = False
957 def mac_recv_task():
958 def recv_cb(pkt):
959 log.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
960 self.success = True
961 sniff(timeout=2,
962 lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
963 and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.flows.port_map[egress])
964
965 for i in [0,1]:
966 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -0800967 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700968 log.info('Verifying flows traffic after master killed')
969 time.sleep(45)
970 else:
971 log.info('Verifying flows traffic before master killed')
972 t = threading.Thread(target = mac_recv_task)
973 t.start()
974 L2 = self.flows_eth #Ether(src = ingress_map['ether'], dst = egress_map['ether'])
975 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
976 L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
977 pkt = L2/L3/L4
978 log.info('Sending packets to verify if flows are correct')
979 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
980 t.join()
981 assert_equal(self.success, True)
982
ChetanGaonker689b3862016-10-17 16:25:01 -0700983 def test_cluster_state_changing_master_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700984 status = self.verify_cluster_status(onos_instances=onos_instances)
985 assert_equal(status, True)
986 master, standbys = self.get_cluster_current_master_standbys()
987 self.flows.setUpClass()
988 egress = 1
989 ingress = 2
990 egress_map = { 'ip': '192.168.30.1' }
991 ingress_map = { 'ip': '192.168.40.1' }
992 flow = OnosFlowCtrl(deviceId = self.device_id,
993 egressPort = egress,
994 ingressPort = ingress,
995 ecn = 1,
996 controller=master
997 )
998 result = flow.addFlow()
999 assert_equal(result, True)
1000 ##wait for flows to be added to ONOS
1001 time.sleep(1)
1002 self.success = False
1003 def mac_recv_task():
1004 def recv_cb(pkt):
1005 log.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
1006 self.success = True
1007 sniff(count=2, timeout=5,
1008 lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
1009 and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb,
1010 iface = self.flows.port_map[egress])
1011 for i in [0,1]:
1012 if i == 1:
1013 log.info('Changing cluster master to %s'%standbys[0])
1014 self.change_master_current_cluster(new_master=standbys[0])
1015 log.info('Verifying flow traffic after cluster master chnaged')
1016 else:
1017 log.info('Verifying flow traffic before cluster master changed')
1018 t = threading.Thread(target = mac_recv_task)
1019 t.start()
1020 L2 = self.flows_eth # Ether(src = ingress_map['ether'], dst = egress_map['ether'])
1021 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1)
1022 pkt = L2/L3
1023 log.info('Sending a packet to verify if flows are correct')
1024 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
1025 t.join()
1026 assert_equal(self.success, True)
1027
ChetanGaonker689b3862016-10-17 16:25:01 -07001028 #pass
1029 def test_cluster_flow_for_ipv6_extension_header_and_master_restart(self,onos_instances = ONOS_INSTANCES):
1030 status = self.verify_cluster_status(onos_instances=onos_instances)
1031 assert_equal(status, True)
1032 master,standbys = self.get_cluster_current_master_standbys()
1033 onos_names_ips = self.get_cluster_container_names_ips()
1034 master_onos_name = onos_names_ips[master]
1035 self.flows.setUpClass()
1036 egress = 1
1037 ingress = 2
1038 egress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
1039 ingress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
1040 flow = OnosFlowCtrl(deviceId = self.device_id,
1041 egressPort = egress,
1042 ingressPort = ingress,
1043 ipv6_extension = 0,
1044 controller=master
1045 )
1046
1047 result = flow.addFlow()
1048 assert_equal(result, True)
1049 ##wait for flows to be added to ONOS
1050 time.sleep(1)
1051 self.success = False
1052 def mac_recv_task():
1053 def recv_cb(pkt):
1054 log.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s'%(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
1055 self.success = True
1056 sniff(timeout=2,count=5,
1057 lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.flows.port_map[egress])
1058 for i in [0,1]:
1059 if i == 1:
1060 log.info('Restart cluster current master %s'%master)
1061 Container(master_onos_name,Onos.IMAGE).restart()
1062 time.sleep(45)
1063 log.info('Verifying flow traffic after master restart')
1064 else:
1065 log.info('Verifying flow traffic before master restart')
1066 t = threading.Thread(target = mac_recv_task)
1067 t.start()
1068 L2 = self.flows_eth
1069 L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
1070 pkt = L2/L3
1071 log.info('Sending packets to verify if flows are correct')
1072 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
1073 t.join()
1074 assert_equal(self.success, True)
1075
1076 def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
1077 dst_mac = self.igmp.iptomac(group)
1078 eth = Ether(dst= dst_mac)
1079 ip = IP(dst=group,src=source)
1080 data = repr(monotonic.monotonic())
1081 sendp(eth/ip/data,count=20, iface = intf)
1082 pkt = (eth/ip/data)
1083 log.info('multicast traffic packet %s'%pkt.show())
1084
1085 def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
1086 log.info('verifying multicast traffic for group %s from source %s'%(group,source))
1087 self.success = False
1088 def recv_task():
1089 def igmp_recv_cb(pkt):
1090 log.info('multicast data received for group %s from source %s'%(group,source))
1091 self.success = True
1092 sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
1093 t = threading.Thread(target = recv_task)
1094 t.start()
1095 self.send_multicast_data_traffic(group,source=source)
1096 t.join()
1097 return self.success
1098
1099 #pass
1100 def test_cluster_with_igmp_include_exclude_modes_and_restarting_master(self, onos_instances=ONOS_INSTANCES):
1101 status = self.verify_cluster_status(onos_instances=onos_instances)
1102 assert_equal(status, True)
1103 master, standbys = self.get_cluster_current_master_standbys()
1104 assert_equal(len(standbys), (onos_instances-1))
1105 onos_names_ips = self.get_cluster_container_names_ips()
1106 master_onos_name = onos_names_ips[master]
1107 self.igmp.setUp(controller=master)
1108 groups = ['224.2.3.4','230.5.6.7']
1109 src_list = ['2.2.2.2','3.3.3.3']
1110 self.igmp.onos_ssm_table_load(groups, src_list=src_list, controller=master)
1111 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1112 iface = self.V_INF1, delay = 2)
1113 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1114 iface = self.V_INF1, delay = 2)
1115 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1116 assert_equal(status,True)
1117 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1118 assert_equal(status,False)
1119 log.info('restarting cluster master %s'%master)
1120 Container(master_onos_name,Onos.IMAGE).restart()
1121 time.sleep(60)
1122 log.info('verifying multicast data traffic after master restart')
1123 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1124 assert_equal(status,True)
1125 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1126 assert_equal(status,False)
1127
1128 #pass
1129 def test_cluster_with_igmp_include_exclude_modes_and_making_master_down(self, onos_instances=ONOS_INSTANCES):
1130 status = self.verify_cluster_status(onos_instances=onos_instances)
1131 assert_equal(status, True)
1132 master, standbys = self.get_cluster_current_master_standbys()
1133 assert_equal(len(standbys), (onos_instances-1))
1134 onos_names_ips = self.get_cluster_container_names_ips()
1135 master_onos_name = onos_names_ips[master]
1136 self.igmp.setUp(controller=master)
1137 groups = [self.igmp.random_mcast_ip(),self.igmp.random_mcast_ip()]
1138 src_list = [self.igmp.randomsourceip()]
1139 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1140 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1141 iface = self.V_INF1, delay = 2)
1142 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1143 iface = self.V_INF1, delay = 2)
1144 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1145 assert_equal(status,True)
1146 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1147 assert_equal(status,False)
1148 log.info('Killing cluster master %s'%master)
1149 Container(master_onos_name,Onos.IMAGE).kill()
1150 time.sleep(60)
1151 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=standbys[0])
1152 assert_equal(status, True)
1153 log.info('Verifying multicast data traffic after cluster master down')
1154 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1155 assert_equal(status,True)
1156 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1157 assert_equal(status,False)
1158
1159 def test_cluster_with_igmp_include_mode_checking_traffic_recovery_time_after_master_is_down(self, onos_instances=ONOS_INSTANCES):
1160 status = self.verify_cluster_status(onos_instances=onos_instances)
1161 assert_equal(status, True)
1162 master, standbys = self.get_cluster_current_master_standbys()
1163 assert_equal(len(standbys), (onos_instances-1))
1164 onos_names_ips = self.get_cluster_container_names_ips()
1165 master_onos_name = onos_names_ips[master]
1166 self.igmp.setUp(controller=master)
1167 groups = [self.igmp.random_mcast_ip()]
1168 src_list = [self.igmp.randomsourceip()]
1169 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1170 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1171 iface = self.V_INF1, delay = 2)
1172 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1173 assert_equal(status,True)
1174 log.info('Killing clusters master %s'%master)
1175 Container(master_onos_name,Onos.IMAGE).kill()
1176 count = 0
1177 for i in range(60):
1178 log.info('Verifying multicast data traffic after cluster master down')
1179 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1180 if status:
1181 break
1182 else:
1183 count += 1
1184 time.sleep(1)
1185 assert_equal(status, True)
1186 log.info('Time taken to recover traffic after clusters master down is %d seconds'%count)
1187
1188
1189 #pass
1190 def test_cluster_state_with_igmp_leave_group_after_master_change(self, onos_instances=ONOS_INSTANCES):
1191 status = self.verify_cluster_status(onos_instances=onos_instances)
1192 assert_equal(status, True)
1193 master, standbys = self.get_cluster_current_master_standbys()
1194 assert_equal(len(standbys), (onos_instances-1))
1195 self.igmp.setUp(controller=master)
1196 groups = [self.igmp.random_mcast_ip()]
1197 src_list = [self.igmp.randomsourceip()]
1198 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1199 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1200 iface = self.V_INF1, delay = 2)
1201 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1202 assert_equal(status,True)
1203 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1204 self.change_cluster_current_master(new_master=standbys[0])
1205 log.info('Verifying multicast traffic after cluster master change')
1206 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1207 assert_equal(status,True)
1208 log.info('Sending igmp TO_EXCLUDE message to leave the group %s'%groups[0])
1209 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
1210 iface = self.V_INF1, delay = 1)
1211 time.sleep(10)
1212 status = self.verify_igmp_data_traffic(groups[0],intf = self.V_INF1,source= src_list[0])
1213 assert_equal(status,False)
1214
1215 #pass
1216 def test_cluster_state_with_igmp_join_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
1217 status = self.verify_cluster_status(onos_instances=onos_instances)
1218 assert_equal(status, True)
1219 master,standbys = self.get_cluster_current_master_standbys()
1220 assert_equal(len(standbys), (onos_instances-1))
1221 self.igmp.setUp(controller=master)
1222 groups = [self.igmp.random_mcast_ip()]
1223 src_list = [self.igmp.randomsourceip()]
1224 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1225 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1226 self.change_cluster_current_master(new_master = standbys[0])
1227 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1228 iface = self.V_INF1, delay = 2)
1229 time.sleep(1)
1230 self.change_cluster_current_master(new_master = master)
1231 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1232 assert_equal(status,True)
1233
1234 #pass
ChetanGaonker2099d722016-10-07 15:16:58 -07001235 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001236 def test_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001237 status = self.verify_cluster_status(onos_instances=onos_instances)
1238 assert_equal(status, True)
1239 master, standbys = self.get_cluster_current_master_standbys()
1240 assert_equal(len(standbys), (onos_instances-1))
1241 self.tls.setUp(controller=master)
1242 df = defer.Deferred()
1243 def eap_tls_verify(df):
1244 tls = TLSAuthTest()
1245 tls.runTest()
1246 df.callback(0)
1247 reactor.callLater(0, eap_tls_verify, df)
1248 return df
1249
1250 @deferred(120)
ChetanGaonker689b3862016-10-17 16:25:01 -07001251 def test_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001252 master, standbys = self.get_cluster_current_master_standbys()
1253 assert_equal(len(standbys), (onos_instances-1))
1254 self.tls.setUp()
1255 df = defer.Deferred()
1256 def eap_tls_verify2(df2):
1257 tls = TLSAuthTest()
1258 tls.runTest()
1259 df.callback(0)
1260 for i in [0,1]:
1261 if i == 1:
1262 log.info('Changing cluster master %s to %s'%(master, standbys[0]))
1263 self.change_master_current_cluster(new_master=standbys[0])
1264 log.info('Verifying tls authentication after cluster master changed to %s'%standbys[0])
1265 else:
1266 log.info('Verifying tls authentication before cluster master change')
1267 reactor.callLater(0, eap_tls_verify, df)
1268 return df
1269
1270 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001271 def test_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001272 status = self.verify_cluster_status(onos_instances=onos_instances)
1273 assert_equal(status, True)
1274 master, standbys = self.get_cluster_current_master_standbys()
1275 assert_equal(len(standbys), (onos_instances-1))
1276 onos_names_ips = self.get_cluster_container_names_ips()
1277 master_onos_name = onos_names_ips[master]
1278 self.tls.setUp()
1279 df = defer.Deferred()
1280 def eap_tls_verify(df):
1281 tls = TLSAuthTest()
1282 tls.runTest()
1283 df.callback(0)
1284 for i in [0,1]:
1285 if i == 1:
1286 log.info('Killing cluster current master %s'%master)
A R Karthick3b2e0372016-12-14 17:37:43 -08001287 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001288 time.sleep(20)
1289 status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True)
1290 assert_equal(status, True)
1291 log.info('Cluster came up with %d instances after killing master'%(onos_instances-1))
1292 log.info('Verifying tls authentication after killing cluster master')
1293 reactor.callLater(0, eap_tls_verify, df)
1294 return df
1295
1296 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001297 def test_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001298 status = self.verify_cluster_status(onos_instances=onos_instances)
1299 assert_equal(status, True)
1300 master, standbys = self.get_cluster_current_master_standbys()
1301 assert_equal(len(standbys), (onos_instances-1))
1302 onos_names_ips = self.get_cluster_container_names_ips()
1303 member_onos_name = onos_names_ips[standbys[0]]
1304 self.tls.setUp()
1305 df = defer.Deferred()
1306 def eap_tls_no_cert(df):
1307 def tls_no_cert_cb():
1308 log.info('TLS authentication failed with no certificate')
1309 tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
1310 tls.runTest()
1311 assert_equal(tls.failTest, True)
1312 df.callback(0)
1313 for i in [0,1]:
1314 if i == 1:
1315 log.info('Restart cluster member %s'%standbys[0])
1316 Container(member_onos_name,Onos.IMAGE).restart()
1317 time.sleep(20)
1318 status = self.verify_cluster_status(onos_instances=onos_instances)
1319 assert_equal(status, True)
1320 log.info('Cluster came up with %d instances after member restart'%(onos_instances))
1321 log.info('Verifying tls authentication after member restart')
1322 reactor.callLater(0, eap_tls_no_cert, df)
1323 return df
1324
ChetanGaonker689b3862016-10-17 16:25:01 -07001325 #pass
1326 def test_cluster_proxyarp_master_change_and_app_deactivation(self,onos_instances=ONOS_INSTANCES,hosts = 3):
1327 status = self.verify_cluster_status(onos_instances=onos_instances)
1328 assert_equal(status,True)
1329 master,standbys = self.get_cluster_current_master_standbys()
1330 assert_equal(len(standbys),(onos_instances-1))
1331 self.proxyarp.setUpClass()
1332 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1333 ingress = hosts+1
1334 for hostip, hostmac in hosts_config:
1335 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1336 time.sleep(1)
1337 log.info('changing cluster current master from %s to %s'%(master,standbys[0]))
1338 self.change_cluster_current_master(new_master=standbys[0])
1339 log.info('verifying proxyarp after master change')
1340 for hostip, hostmac in hosts_config:
1341 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1342 time.sleep(1)
1343 log.info('Deactivating proxyarp app and expecting proxyarp functionality not to work')
1344 self.proxyarp.proxyarp_activate(deactivate = True,controller=standbys[0])
1345 time.sleep(3)
1346 for hostip, hostmac in hosts_config:
1347 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
1348 time.sleep(1)
1349 log.info('activating proxyarp app and expecting to get arp reply from ONOS')
1350 self.proxyarp.proxyarp_activate(deactivate = False,controller=standbys[0])
1351 time.sleep(3)
1352 for hostip, hostmac in hosts_config:
1353 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1354 time.sleep(1)
ChetanGaonker2099d722016-10-07 15:16:58 -07001355
ChetanGaonker689b3862016-10-17 16:25:01 -07001356 #pass
1357 def test_cluster_with_proxyarp_and_one_member_down(self,hosts=3,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001358 status = self.verify_cluster_status(onos_instances=onos_instances)
1359 assert_equal(status, True)
1360 master, standbys = self.get_cluster_current_master_standbys()
ChetanGaonker689b3862016-10-17 16:25:01 -07001361 assert_equal(len(standbys), (onos_instances-1))
1362 onos_names_ips = self.get_cluster_container_names_ips()
1363 member_onos_name = onos_names_ips[standbys[1]]
1364 self.proxyarp.setUpClass()
1365 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1366 ingress = hosts+1
1367 for hostip, hostmac in hosts_config:
1368 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1369 time.sleep(1)
1370 log.info('killing cluster member %s'%standbys[1])
1371 Container(member_onos_name,Onos.IMAGE).kill()
1372 time.sleep(20)
1373 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=master,verify=True)
1374 assert_equal(status, True)
1375 log.info('cluster came up with %d instances after member down'%(onos_instances-1))
1376 log.info('verifying proxy arp functionality after cluster member down')
1377 for hostip, hostmac in hosts_config:
1378 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1379 time.sleep(1)
1380
1381 #pass
1382 def test_cluster_with_proxyarp_and_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts=10,onos_instances=ONOS_INSTANCES):
1383 status = self.verify_cluster_status(onos_instances=onos_instances)
1384 assert_equal(status, True)
1385 self.proxyarp.setUpClass()
1386 master, standbys = self.get_cluster_current_master_standbys()
1387 assert_equal(len(standbys), (onos_instances-1))
1388 ports_map, egress_map, hosts_config = self.proxyarp.proxyarp_config(hosts = hosts, controller=master)
1389 self.success = True
1390 ingress = hosts+1
1391 ports = range(ingress,ingress+10)
1392 hostmac = []
1393 hostip = []
1394 for ip,mac in hosts_config:
1395 hostmac.append(mac)
1396 hostip.append(ip)
1397 success_dir = {}
1398 def verify_proxyarp(*r):
1399 ingress, hostmac, hostip = r[0],r[1],r[2]
1400 def mac_recv_task():
1401 def recv_cb(pkt):
1402 log.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
1403 success_dir[current_thread().name] = True
1404 sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
1405 prn = recv_cb, iface = self.proxyarp.port_map[ingress])
1406 t = threading.Thread(target = mac_recv_task)
1407 t.start()
1408 pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
1409 log.info('Sending arp request for dest ip %s on interface %s' %
1410 (hostip,self.proxyarp.port_map[ingress]))
1411 sendp(pkt, count = 10,iface = self.proxyarp.port_map[ingress])
1412 t.join()
1413 t = []
1414 for i in range(10):
1415 t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
1416 for i in range(10):
1417 t[i].start()
1418 time.sleep(2)
1419 for i in range(10):
1420 t[i].join()
1421 if len(success_dir) != 10:
1422 self.success = False
1423 assert_equal(self.success, True)
1424
1425 #pass
1426 def test_cluster_with_acl_rule_before_master_change_and_remove_acl_rule_after_master_change(self,onos_instances=ONOS_INSTANCES):
1427 status = self.verify_cluster_status(onos_instances=onos_instances)
1428 assert_equal(status, True)
1429 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001430 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001431 self.acl.setUp()
1432 acl_rule = ACLTest()
1433 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1434 if status is False:
1435 log.info('JSON request returned status %d' %code)
1436 assert_equal(status, True)
1437 result = acl_rule.get_acl_rules(controller=master)
1438 aclRules1 = result.json()['aclRules']
1439 log.info('Added acl rules is %s'%aclRules1)
1440 acl_Id = map(lambda d: d['id'], aclRules1)
1441 log.info('Changing cluster current master from %s to %s'%(master,standbys[0]))
1442 self.change_cluster_current_master(new_master=standbys[0])
1443 status,code = acl_rule.remove_acl_rule(acl_Id[0],controller=standbys[0])
1444 if status is False:
1445 log.info('JSON request returned status %d' %code)
1446 assert_equal(status, True)
1447
1448 #pass
1449 def test_cluster_verifying_acl_rule_in_new_master_after_current_master_is_down(self,onos_instances=ONOS_INSTANCES):
1450 status = self.verify_cluster_status(onos_instances=onos_instances)
1451 assert_equal(status, True)
1452 master,standbys = self.get_cluster_current_master_standbys()
1453 assert_equal(len(standbys),(onos_instances-1))
1454 onos_names_ips = self.get_cluster_container_names_ips()
1455 master_onos_name = onos_names_ips[master]
1456 self.acl.setUp()
1457 acl_rule = ACLTest()
1458 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1459 if status is False:
1460 log.info('JSON request returned status %d' %code)
1461 assert_equal(status, True)
1462 result1 = acl_rule.get_acl_rules(controller=master)
1463 aclRules1 = result1.json()['aclRules']
1464 log.info('Added acl rules is %s'%aclRules1)
1465 acl_Id1 = map(lambda d: d['id'], aclRules1)
1466 log.info('Killing cluster current master %s'%master)
1467 Container(master_onos_name,Onos.IMAGE).kill()
1468 time.sleep(45)
1469 status = self.verify_cluster_status(onos_instances=onos_instances,controller=standbys[0])
1470 assert_equal(status, True)
1471 new_master,standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
1472 assert_equal(len(standbys),(onos_instances-2))
1473 assert_not_equal(new_master,master)
1474 result2 = acl_rule.get_acl_rules(controller=new_master)
1475 aclRules2 = result2.json()['aclRules']
1476 acl_Id2 = map(lambda d: d['id'], aclRules2)
1477 log.info('Acl Ids before and after master down are %s and %s'%(acl_Id1,acl_Id2))
1478 assert_equal(acl_Id2,acl_Id1)
1479
1480 #acl traffic scenario not working as acl rule is not getting added to onos
1481 def test_cluster_with_acl_traffic_before_and_after_two_members_down(self,onos_instances=ONOS_INSTANCES):
1482 status = self.verify_cluster_status(onos_instances=onos_instances)
1483 assert_equal(status, True)
1484 master,standbys = self.get_cluster_current_master_standbys()
1485 assert_equal(len(standbys),(onos_instances-1))
1486 onos_names_ips = self.get_cluster_container_names_ips()
1487 member1_onos_name = onos_names_ips[standbys[0]]
1488 member2_onos_name = onos_names_ips[standbys[1]]
1489 ingress = self.acl.ingress_iface
1490 egress = self.acl.CURRENT_PORT_NUM
1491 acl_rule = ACLTest()
1492 status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.acl.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.acl.HOST_DST_IP)
1493 self.acl.CURRENT_PORT_NUM += 1
1494 time.sleep(5)
1495 if status is False:
1496 log.info('JSON request returned status %d' %code)
1497 assert_equal(status, True)
1498 srcMac = '00:00:00:00:00:11'
1499 dstMac = host_ip_mac[0][1]
1500 self.acl.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1, egress_iface_num = egress )
1501 status, code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'deny',controller=master)
1502 time.sleep(10)
1503 if status is False:
1504 log.info('JSON request returned status %d' %code)
1505 assert_equal(status, True)
1506 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1507 log.info('killing cluster members %s and %s'%(standbys[0],standbys[1]))
1508 Container(member1_onos_name, Onos.IMAGE).kill()
1509 Container(member2_onos_name, Onos.IMAGE).kill()
1510 time.sleep(40)
1511 status = self.verify_cluster_status(onos_instances=onos_instances-2,verify=True,controller=master)
1512 assert_equal(status, True)
1513 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1514 self.acl.acl_hosts_remove(egress_iface_count = 1, egress_iface_num = egress)
1515
1516 #pass
1517 def test_cluster_with_dhcpRelay_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1518 status = self.verify_cluster_status(onos_instances=onos_instances)
1519 assert_equal(status, True)
1520 master,standbys = self.get_cluster_current_master_standbys()
1521 assert_equal(len(standbys),(onos_instances-1))
1522 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001523 mac = self.dhcprelay.get_mac(iface)
1524 self.dhcprelay.host_load(iface)
1525 ##we use the defaults for this test that serves as an example for others
1526 ##You don't need to restart dhcpd server if retaining default config
1527 config = self.dhcprelay.default_config
1528 options = self.dhcprelay.default_options
1529 subnet = self.dhcprelay.default_subnet_config
1530 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1531 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1532 config = config,
1533 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001534 subnet = subnet,
1535 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001536 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
1537 cip, sip = self.dhcprelay.send_recv(mac)
1538 log.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
1539 self.change_master_current_cluster(new_master=standbys[0])
1540 log.info('Releasing ip %s to server %s' %(cip, sip))
1541 assert_equal(self.dhcprelay.dhcp.release(cip), True)
1542 log.info('Triggering DHCP discover again after release')
1543 cip2, sip2 = self.dhcprelay.send_recv(mac)
1544 log.info('Verifying released IP was given back on rediscover')
1545 assert_equal(cip, cip2)
1546 log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
1547 assert_equal(self.dhcprelay.dhcp.release(cip2), True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001548 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001549
ChetanGaonker689b3862016-10-17 16:25:01 -07001550
1551 def test_cluster_with_dhcpRelay_and_verify_dhcp_ip_after_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1552 status = self.verify_cluster_status(onos_instances=onos_instances)
1553 assert_equal(status, True)
1554 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001555 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001556 onos_names_ips = self.get_cluster_container_names_ips()
1557 master_onos_name = onos_names_ips[master]
1558 self.dhcprelay.setUpClass(controller=master)
1559 mac = self.dhcprelay.get_mac(iface)
1560 self.dhcprelay.host_load(iface)
1561 ##we use the defaults for this test that serves as an example for others
1562 ##You don't need to restart dhcpd server if retaining default config
1563 config = self.dhcprelay.default_config
1564 options = self.dhcprelay.default_options
1565 subnet = self.dhcprelay.default_subnet_config
1566 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1567 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1568 config = config,
1569 options = options,
1570 subnet = subnet,
1571 controller=master)
1572 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1573 log.info('Initiating dhcp process from client %s'%mac)
1574 cip, sip = self.dhcprelay.send_recv(mac)
1575 log.info('Killing cluster current master %s'%master)
1576 Container(master_onos_name, Onos.IMAGE).kill()
1577 time.sleep(60)
1578 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=standbys[0])
1579 assert_equal(status, True)
1580 mac = self.dhcprelay.dhcp.get_mac(cip)[0]
1581 log.info("Verifying dhcp clients gets same IP after cluster master restarts")
1582 new_cip, new_sip = self.dhcprelay.dhcp.only_request(cip, mac)
1583 assert_equal(new_cip, cip)
1584 self.dhcprelay.tearDownClass(controller=standbys[0])
1585
1586 #pass
1587 def test_cluster_with_dhcpRelay_and_simulate_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1588 status = self.verify_cluster_status(onos_instances=onos_instances)
1589 assert_equal(status, True)
1590 master,standbys = self.get_cluster_current_master_standbys()
1591 assert_equal(len(standbys),(onos_instances-1))
1592 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001593 macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3']
1594 self.dhcprelay.host_load(iface)
1595 ##we use the defaults for this test that serves as an example for others
1596 ##You don't need to restart dhcpd server if retaining default config
1597 config = self.dhcprelay.default_config
1598 options = self.dhcprelay.default_options
1599 subnet = self.dhcprelay.default_subnet_config
1600 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1601 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1602 config = config,
1603 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001604 subnet = subnet,
1605 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001606 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1607 cip1, sip1 = self.dhcprelay.send_recv(macs[0])
1608 assert_not_equal(cip1,None)
1609 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip1,macs[0],master))
1610 log.info('Changing cluster master from %s to %s'%(master, standbys[0]))
1611 self.change_master_current_cluster(new_master=standbys[0])
1612 cip2, sip2 = self.dhcprelay.send_recv(macs[1])
1613 assert_not_equal(cip2,None)
1614 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[1],standbys[0]))
1615 self.change_master_current_cluster(new_master=master)
1616 log.info('Changing cluster master from %s to %s'%(standbys[0],master))
1617 cip3, sip3 = self.dhcprelay.send_recv(macs[2])
1618 assert_not_equal(cip3,None)
1619 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master))
ChetanGaonker689b3862016-10-17 16:25:01 -07001620 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001621
ChetanGaonker689b3862016-10-17 16:25:01 -07001622 def test_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001623 status = self.verify_cluster_status(onos_instances=onos_instances)
1624 assert_equal(status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001625 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001626 self.subscriber.num_subscribers = 5
1627 self.subscriber.num_channels = 10
1628 for i in [0,1]:
1629 if i == 1:
1630 cord_test_onos_restart()
1631 time.sleep(45)
1632 status = self.verify_cluster_status(onos_instances=onos_instances)
1633 assert_equal(status, True)
1634 log.info('Verifying cord subscriber functionality after cluster restart')
1635 else:
1636 log.info('Verifying cord subscriber functionality before cluster restart')
1637 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1638 num_channels = self.subscriber.num_channels,
1639 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1640 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1641 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1642 self.subscriber.num_channels))
1643 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001644 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001645
ChetanGaonker689b3862016-10-17 16:25:01 -07001646 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1647 def test_cluster_with_cord_subscriber_join_next_channel_before_and_after_cluster_mastership_is_withdrawn(self,onos_instances=ONOS_INSTANCES):
1648 status = self.verify_cluster_status(onos_instances=onos_instances)
1649 assert_equal(status, True)
1650 master,standbys = self.get_cluster_current_master_standbys()
1651 assert_equal(len(standbys),(onos_instances-1))
1652 self.subscriber.setUpClass(controller=master)
1653 self.subscriber.num_subscribers = 5
1654 self.subscriber.num_channels = 10
1655 for i in [0,1]:
1656 if i == 1:
1657 status=self.withdraw_cluster_current_mastership(master_ip=master)
1658 asser_equal(status, True)
1659 master,standbys = self.get_cluster_current_master_standbys()
1660 log.info('verifying cord subscriber functionality after cluster current master withdraw mastership')
1661 else:
1662 log.info('verifying cord subscriber functionality before cluster master withdraw mastership')
1663 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1664 num_channels = self.subscriber.num_channels,
1665 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1666 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1667 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1668 self.subscriber.num_channels),controller=master)
1669 assert_equal(test_status, True)
1670 self.subscriber.tearDownClass(controller=master)
1671
1672 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1673 def test_cluster_with_cord_subscriber_join_recv_traffic_from_10channels_and_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001674 status = self.verify_cluster_status(onos_instances=onos_instances)
1675 assert_equal(status, True)
1676 master, standbys = self.get_cluster_current_master_standbys()
1677 assert_equal(len(standbys),(onos_instances-1))
1678 onos_names_ips = self.get_cluster_container_names_ips()
1679 member_onos_name = onos_names_ips[standbys[0]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001680 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001681 num_subscribers = 1
1682 num_channels = 10
1683 for i in [0,1]:
1684 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -08001685 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001686 time.sleep(30)
ChetanGaonker689b3862016-10-17 16:25:01 -07001687 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001688 assert_equal(status, True)
1689 log.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0])
1690 else:
1691 log.info('Verifying cord subscriber functionality before cluster member %s is down'%standbys[0])
1692 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1693 num_channels = num_channels,
1694 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify,
1695 self.subscriber.igmp_verify, self.subscriber.traffic_verify),
1696 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
ChetanGaonker689b3862016-10-17 16:25:01 -07001697 negative_subscriber_auth = 'all',controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001698 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001699 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001700
ChetanGaonker689b3862016-10-17 16:25:01 -07001701 def test_cluster_with_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001702 status = self.verify_cluster_status(onos_instances=onos_instances)
1703 assert_equal(status, True)
1704 master, standbys = self.get_cluster_current_master_standbys()
1705 assert_equal(len(standbys),(onos_instances-1))
1706 onos_names_ips = self.get_cluster_container_names_ips()
1707 member1_onos_name = onos_names_ips[standbys[0]]
1708 member2_onos_name = onos_names_ips[standbys[1]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001709 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001710 num_subscribers = 1
1711 num_channels = 10
1712 for i in [0,1]:
1713 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -08001714 cord_test_onos_shutdown(node = standbys[0])
1715 cord_test_onos_shutdown(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -07001716 time.sleep(60)
1717 status = self.verify_cluster_status(onos_instances=onos_instances-2)
1718 assert_equal(status, True)
1719 log.info('Verifying cord subscriber funtionality after cluster two members %s and %s down'%(standbys[0],standbys[1]))
1720 else:
1721 log.info('Verifying cord subscriber funtionality before cluster two members %s and %s down'%(standbys[0],standbys[1]))
1722 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1723 num_channels = num_channels,
1724 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1725 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1726 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
1727 negative_subscriber_auth = 'all')
1728 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001729 self.subscriber.tearDownClass(controller=master)
1730
1731 #pass
1732 def test_cluster_with_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1733 status = self.verify_cluster_status(onos_instances=onos_instances)
1734 assert_equal(status, True)
1735 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1736 for device in device_dict.keys():
1737 log.info("Device is %s"%device_dict[device])
1738 assert_not_equal(device_dict[device]['master'],'none')
1739 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1740 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1741
1742 #pass
1743 def test_cluster_state_in_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1744 status = self.verify_cluster_status(onos_instances=onos_instances)
1745 assert_equal(status, True)
1746 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1747 cluster_ips = self.get_cluster_current_member_ips()
1748 for ip in cluster_ips:
1749 device_dict= self.get_cluster_current_master_standbys_of_connected_devices(controller = ip)
1750 assert_equal(len(device_dict.keys()),onos_instances)
1751 for device in device_dict.keys():
1752 log.info("Device is %s"%device_dict[device])
1753 assert_not_equal(device_dict[device]['master'],'none')
1754 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1755 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1756
1757 #pass
1758 def test_cluster_verifying_multiple_ovs_switches_after_master_is_restarted(self,onos_instances = ONOS_INSTANCES):
1759 status = self.verify_cluster_status(onos_instances=onos_instances)
1760 assert_equal(status, True)
1761 onos_names_ips = self.get_cluster_container_names_ips()
1762 master_count = self.get_number_of_devices_of_master()
1763 log.info('Master count information is %s'%master_count)
1764 total_devices = 0
1765 for master in master_count.keys():
1766 total_devices += master_count[master]['size']
1767 if master_count[master]['size'] != 0:
1768 restart_ip = master
1769 assert_equal(total_devices,onos_instances)
1770 member_onos_name = onos_names_ips[restart_ip]
1771 log.info('Restarting cluster member %s having ip %s'%(member_onos_name,restart_ip))
1772 Container(member_onos_name, Onos.IMAGE).restart()
1773 time.sleep(40)
1774 master_count = self.get_number_of_devices_of_master()
1775 log.info('Master count information after restart is %s'%master_count)
1776 total_devices = 0
1777 for master in master_count.keys():
1778 total_devices += master_count[master]['size']
1779 if master == restart_ip:
1780 assert_equal(master_count[master]['size'], 0)
1781 assert_equal(total_devices,onos_instances)
1782
1783 #pass
1784 def test_cluster_verifying_multiple_ovs_switches_with_one_master_down(self,onos_instances = ONOS_INSTANCES):
1785 status = self.verify_cluster_status(onos_instances=onos_instances)
1786 assert_equal(status, True)
1787 onos_names_ips = self.get_cluster_container_names_ips()
1788 master_count = self.get_number_of_devices_of_master()
1789 log.info('Master count information is %s'%master_count)
1790 total_devices = 0
1791 for master in master_count.keys():
1792 total_devices += master_count[master]['size']
1793 if master_count[master]['size'] != 0:
1794 restart_ip = master
1795 assert_equal(total_devices,onos_instances)
1796 master_onos_name = onos_names_ips[restart_ip]
1797 log.info('Shutting down cluster member %s having ip %s'%(master_onos_name,restart_ip))
1798 Container(master_onos_name, Onos.IMAGE).kill()
1799 time.sleep(40)
1800 for ip in onos_names_ips.keys():
1801 if ip != restart_ip:
1802 controller_ip = ip
1803 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=controller_ip)
1804 assert_equal(status, True)
1805 master_count = self.get_number_of_devices_of_master(controller=controller_ip)
1806 log.info('Master count information after restart is %s'%master_count)
1807 total_devices = 0
1808 for master in master_count.keys():
1809 total_devices += master_count[master]['size']
1810 if master == restart_ip:
1811 assert_equal(master_count[master]['size'], 0)
1812 assert_equal(total_devices,onos_instances)
1813
1814 #pass
1815 def test_cluster_verifying_multiple_ovs_switches_with_current_master_withdrawing_mastership(self,onos_instances = ONOS_INSTANCES):
1816 status = self.verify_cluster_status(onos_instances=onos_instances)
1817 assert_equal(status, True)
1818 master_count = self.get_number_of_devices_of_master()
1819 log.info('Master count information is %s'%master_count)
1820 total_devices = 0
1821 for master in master_count.keys():
1822 total_devices += int(master_count[master]['size'])
1823 if master_count[master]['size'] != 0:
1824 master_ip = master
1825 log.info('Devices of master %s are %s'%(master_count[master]['devices'],master))
1826 device_id = str(master_count[master]['devices'][0])
1827 device_count = master_count[master]['size']
1828 assert_equal(total_devices,onos_instances)
1829 log.info('Withdrawing mastership of device %s for controller %s'%(device_id,master_ip))
1830 status=self.withdraw_cluster_current_mastership(master_ip=master_ip,device_id = device_id)
1831 assert_equal(status, True)
1832 master_count = self.get_number_of_devices_of_master()
1833 log.info('Master count information after cluster mastership withdraw is %s'%master_count)
1834 total_devices = 0
1835 for master in master_count.keys():
1836 total_devices += int(master_count[master]['size'])
1837 if master == master_ip:
1838 assert_equal(master_count[master]['size'], device_count-1)
1839 assert_equal(total_devices,onos_instances)
1840
1841 #pass
1842 def test_cluster_verifying_multiple_ovs_switches_and_restarting_cluster(self,onos_instances = ONOS_INSTANCES):
1843 status = self.verify_cluster_status(onos_instances=onos_instances)
1844 assert_equal(status, True)
1845 master_count = self.get_number_of_devices_of_master()
1846 log.info('Master count information is %s'%master_count)
1847 total_devices = 0
1848 for master in master_count.keys():
1849 total_devices += master_count[master]['size']
1850 assert_equal(total_devices,onos_instances)
1851 log.info('Restarting cluster')
1852 cord_test_onos_restart()
1853 time.sleep(60)
1854 master_count = self.get_number_of_devices_of_master()
1855 log.info('Master count information after restart is %s'%master_count)
1856 total_devices = 0
1857 for master in master_count.keys():
1858 total_devices += master_count[master]['size']
1859 assert_equal(total_devices,onos_instances)