blob: 6d279550392d161bc0ea4365f18d263ccc0d86e1 [file] [log] [blame]
ChetanGaonker2099d722016-10-07 15:16:58 -07001#copyright 2016-present Ciena Corporation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14#
15import unittest
16from nose.tools import *
17from scapy.all import *
18from OnosCtrl import OnosCtrl, get_mac
19from OltConfig import OltConfig
20from socket import socket
21from OnosFlowCtrl import OnosFlowCtrl
22from nose.twistedtools import reactor, deferred
23from twisted.internet import defer
24from onosclidriver import OnosCliDriver
25from CordContainer import Container, Onos, Quagga
A.R Karthick2560f042016-11-30 14:38:52 -080026from CordTestServer import cord_test_onos_restart, cord_test_onos_shutdown, cord_test_onos_add_cluster, cord_test_quagga_restart, cord_test_restart_cluster
ChetanGaonker2099d722016-10-07 15:16:58 -070027from portmaps import g_subscriber_port_map
28from scapy.all import *
29import time, monotonic
30import threading
31from threading import current_thread
32from Cluster import *
33from EapTLS import TLSAuthTest
34from ACL import ACLTest
A R Karthick1f908202016-11-16 17:32:20 -080035from OnosLog import OnosLog
36from CordLogger import CordLogger
A.R Karthick99044822017-02-09 14:04:20 -080037from CordTestConfig import setup_module
ChetanGaonker2099d722016-10-07 15:16:58 -070038import os
39import json
40import random
41import collections
42log.setLevel('INFO')
43
A R Karthick1f908202016-11-16 17:32:20 -080044class cluster_exchange(CordLogger):
ChetanGaonker2099d722016-10-07 15:16:58 -070045 test_path = os.path.dirname(os.path.realpath(__file__))
46 onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
47 mac = RandMAC()._fix()
48 flows_eth = Ether(src = RandMAC()._fix(), dst = RandMAC()._fix())
49 igmp_eth = Ether(dst = '01:00:5e:00:00:16', type = ETH_P_IP)
50 igmp_ip = IP(dst = '224.0.0.22')
51 ONOS_INSTANCES = 3
52 V_INF1 = 'veth0'
53 TLS_TIMEOUT = 100
54 device_id = 'of:' + get_mac()
55 igmp = cluster_igmp()
56 igmp_groups = igmp.mcast_ip_range(start_ip = '224.1.8.10',end_ip = '224.1.10.49')
57 igmp_sources = igmp.source_ip_range(start_ip = '38.24.29.35',end_ip='38.24.35.56')
58 tls = cluster_tls()
59 flows = cluster_flows()
60 proxyarp = cluster_proxyarp()
61 vrouter = cluster_vrouter()
62 acl = cluster_acl()
63 dhcprelay = cluster_dhcprelay()
64 subscriber = cluster_subscriber()
A R Karthick3b2e0372016-12-14 17:37:43 -080065 testcaseLoggers = ('test_cluster_controller_restarts', 'test_cluster_graceful_controller_restarts',
66 'test_cluster_single_controller_restarts', 'test_cluster_restarts')
A.R Karthick99044822017-02-09 14:04:20 -080067 ITERATIONS = int(os.getenv('ITERATIONS', 10))
A R Karthick1f908202016-11-16 17:32:20 -080068
69 def setUp(self):
70 if self._testMethodName not in self.testcaseLoggers:
71 super(cluster_exchange, self).setUp()
72
73 def tearDown(self):
74 if self._testMethodName not in self.testcaseLoggers:
75 super(cluster_exchange, self).tearDown()
ChetanGaonker2099d722016-10-07 15:16:58 -070076
77 def get_controller(self):
78 controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
79 controller = controller.split(',')[0]
80 return controller
81
A R Karthick1f908202016-11-16 17:32:20 -080082 @classmethod
83 def get_controllers(cls):
84 controllers = os.getenv('ONOS_CONTROLLER_IP') or ''
85 return controllers.split(',')
86
A R Karthick6cc8b812016-12-09 10:24:40 -080087 def cliEnter(self, controller = None):
ChetanGaonker2099d722016-10-07 15:16:58 -070088 retries = 0
A R Karthick6cc8b812016-12-09 10:24:40 -080089 while retries < 30:
90 self.cli = OnosCliDriver(controller = controller, connect = True)
ChetanGaonker2099d722016-10-07 15:16:58 -070091 if self.cli.handle:
92 break
93 else:
94 retries += 1
95 time.sleep(2)
96
97 def cliExit(self):
98 self.cli.disconnect()
99
A R Karthick1f908202016-11-16 17:32:20 -0800100 def get_leader(self, controller = None):
101 self.cliEnter(controller = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800102 try:
103 result = json.loads(self.cli.leaders(jsonFormat = True))
104 except:
105 result = None
106
A R Karthick1f908202016-11-16 17:32:20 -0800107 if result is None:
108 log.info('Leaders command failure for controller %s' %controller)
109 else:
110 log.info('Leaders returned: %s' %result)
111 self.cliExit()
112 return result
113
A R Karthick3b2e0372016-12-14 17:37:43 -0800114 def onos_shutdown(self, controller = None):
115 status = True
116 self.cliEnter(controller = controller)
117 try:
118 self.cli.shutdown(timeout = 10)
119 except:
120 log.info('Graceful shutdown of ONOS failed for controller: %s' %controller)
121 status = False
122
123 self.cliExit()
124 return status
125
A R Karthicke14fc022016-12-08 14:50:29 -0800126 def log_set(self, level = None, app = 'org.onosproject', controllers = None):
127 CordLogger.logSet(level = level, app = app, controllers = controllers, forced = True)
A R Karthickef1232d2016-12-07 09:18:15 -0800128
A R Karthick1f908202016-11-16 17:32:20 -0800129 def get_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800130 result_map = {}
131 if controller is None:
132 controller = self.get_controller()
A R Karthick1f908202016-11-16 17:32:20 -0800133 if type(controller) in [ list, tuple ]:
134 for c in controller:
135 leaders = self.get_leader(controller = c)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800136 result_map[c] = leaders
A R Karthick1f908202016-11-16 17:32:20 -0800137 else:
138 leaders = self.get_leader(controller = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800139 result_map[controller] = leaders
140 return result_map
A R Karthick1f908202016-11-16 17:32:20 -0800141
A R Karthickec2db322016-11-17 15:06:01 -0800142 def verify_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800143 leaders_map = self.get_leaders(controller = controller)
144 failed = [ k for k,v in leaders_map.items() if v == None ]
A R Karthickec2db322016-11-17 15:06:01 -0800145 return failed
146
ChetanGaonker2099d722016-10-07 15:16:58 -0700147 def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
148 tries = 0
149 try:
150 self.cliEnter(controller = controller)
151 while tries <= 10:
152 cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
153 if cluster_summary:
154 log.info("cluster 'summary' command output is %s"%cluster_summary)
155 nodes = cluster_summary['nodes']
156 if verify:
157 if nodes == onos_instances:
158 self.cliExit()
159 return True
160 else:
161 tries += 1
162 time.sleep(1)
163 else:
164 if nodes >= onos_instances:
165 self.cliExit()
166 return True
167 else:
168 tries += 1
169 time.sleep(1)
170 else:
171 tries += 1
172 time.sleep(1)
173 self.cliExit()
174 return False
175 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700176 raise Exception('Failed to get cluster members')
177 return False
ChetanGaonker2099d722016-10-07 15:16:58 -0700178
A.R Karthick45ab3e12016-11-30 11:25:51 -0800179 def get_cluster_current_member_ips(self, controller = None, nodes_filter = None):
ChetanGaonker2099d722016-10-07 15:16:58 -0700180 tries = 0
181 cluster_ips = []
182 try:
183 self.cliEnter(controller = controller)
184 while tries <= 10:
185 cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
186 if cluster_nodes:
187 log.info("cluster 'nodes' output is %s"%cluster_nodes)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800188 if nodes_filter:
189 cluster_nodes = nodes_filter(cluster_nodes)
ChetanGaonker2099d722016-10-07 15:16:58 -0700190 cluster_ips = map(lambda c: c['id'], cluster_nodes)
191 self.cliExit()
192 cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
193 return cluster_ips
194 else:
195 tries += 1
196 self.cliExit()
197 return cluster_ips
198 except:
199 raise Exception('Failed to get cluster members')
200 return cluster_ips
201
ChetanGaonker689b3862016-10-17 16:25:01 -0700202 def get_cluster_container_names_ips(self,controller=None):
A R Karthick1f908202016-11-16 17:32:20 -0800203 onos_names_ips = {}
A R Karthick0f3f25b2016-12-15 09:50:57 -0800204 controllers = self.get_controllers()
205 i = 0
206 for controller in controllers:
207 if i == 0:
208 name = Onos.NAME
209 else:
210 name = '{}-{}'.format(Onos.NAME, i+1)
211 onos_names_ips[controller] = name
212 onos_names_ips[name] = controller
213 i += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700214 return onos_names_ips
A R Karthick0f3f25b2016-12-15 09:50:57 -0800215 # onos_ips = self.get_cluster_current_member_ips(controller=controller)
216 # onos_names_ips[onos_ips[0]] = Onos.NAME
217 # onos_names_ips[Onos.NAME] = onos_ips[0]
218 # for i in range(1,len(onos_ips)):
219 # name = '{0}-{1}'.format(Onos.NAME,i+1)
220 # onos_names_ips[onos_ips[i]] = name
221 # onos_names_ips[name] = onos_ips[i]
222
223 # return onos_names_ips
ChetanGaonker2099d722016-10-07 15:16:58 -0700224
225 #identifying current master of a connected device, not tested
226 def get_cluster_current_master_standbys(self,controller=None,device_id=device_id):
227 master = None
228 standbys = []
229 tries = 0
230 try:
231 cli = self.cliEnter(controller = controller)
232 while tries <= 10:
233 roles = json.loads(self.cli.roles(jsonFormat = True))
234 log.info("cluster 'roles' command output is %s"%roles)
235 if roles:
236 for device in roles:
237 log.info('Verifying device info in line %s'%device)
238 if device['id'] == device_id:
239 master = str(device['master'])
240 standbys = map(lambda d: str(d), device['standbys'])
241 log.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
242 self.cliExit()
243 return master, standbys
244 self.cliExit()
245 return master, standbys
246 else:
247 tries += 1
ChetanGaonker689b3862016-10-17 16:25:01 -0700248 time.sleep(1)
249 self.cliExit()
250 return master,standbys
251 except:
252 raise Exception('Failed to get cluster members')
253 return master,standbys
254
255 def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
256 ''' returns master and standbys of all the connected devices to ONOS cluster instance'''
257 device_dict = {}
258 tries = 0
259 try:
260 cli = self.cliEnter(controller = controller)
261 while tries <= 10:
262 device_dict = {}
263 roles = json.loads(self.cli.roles(jsonFormat = True))
264 log.info("cluster 'roles' command output is %s"%roles)
265 if roles:
266 for device in roles:
267 device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
268 for i in range(len(device_dict[device['id']]['standbys'])):
269 device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
270 log.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
271 self.cliExit()
272 return device_dict
273 else:
274 tries += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700275 time.sleep(1)
276 self.cliExit()
ChetanGaonker689b3862016-10-17 16:25:01 -0700277 return device_dict
278 except:
279 raise Exception('Failed to get cluster members')
280 return device_dict
281
282 #identify current master of a connected device, not tested
283 def get_cluster_connected_devices(self,controller=None):
284 '''returns all the devices connected to ONOS cluster'''
285 device_list = []
286 tries = 0
287 try:
288 cli = self.cliEnter(controller = controller)
289 while tries <= 10:
290 device_list = []
291 devices = json.loads(self.cli.devices(jsonFormat = True))
292 log.info("cluster 'devices' command output is %s"%devices)
293 if devices:
294 for device in devices:
295 log.info('device id is %s'%device['id'])
296 device_list.append(str(device['id']))
297 self.cliExit()
298 return device_list
299 else:
300 tries += 1
301 time.sleep(1)
302 self.cliExit()
303 return device_list
304 except:
305 raise Exception('Failed to get cluster members')
306 return device_list
307
308 def get_number_of_devices_of_master(self,controller=None):
309 '''returns master-device pairs, which master having what devices'''
310 master_count = {}
311 try:
312 cli = self.cliEnter(controller = controller)
313 masters = json.loads(self.cli.masters(jsonFormat = True))
314 if masters:
315 for master in masters:
316 master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
317 return master_count
318 else:
319 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700320 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700321 raise Exception('Failed to get cluster members')
322 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700323
324 def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None):
325 if new_master is None: return False
ChetanGaonker689b3862016-10-17 16:25:01 -0700326 self.cliEnter(controller=controller)
ChetanGaonker2099d722016-10-07 15:16:58 -0700327 cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
328 command = self.cli.command(cmd = cmd, jsonFormat = False)
329 self.cliExit()
330 time.sleep(60)
331 master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
332 assert_equal(master,new_master)
333 log.info('Cluster master changed to %s successfully'%new_master)
334
ChetanGaonker689b3862016-10-17 16:25:01 -0700335 def withdraw_cluster_current_mastership(self,master_ip=None,device_id=device_id,controller=None):
336 '''current master looses its mastership and hence new master will be elected'''
337 self.cliEnter(controller=controller)
338 cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
339 command = self.cli.command(cmd = cmd, jsonFormat = False)
340 self.cliExit()
341 time.sleep(60)
342 new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
343 assert_not_equal(new_master_ip,master_ip)
344 log.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
345 log.info('Cluster new master is %s'%new_master_ip)
346 return True
347
A R Karthick3b2e0372016-12-14 17:37:43 -0800348 def cluster_controller_restarts(self, graceful = False):
A R Karthick1f908202016-11-16 17:32:20 -0800349 controllers = self.get_controllers()
350 ctlr_len = len(controllers)
351 if ctlr_len <= 1:
352 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
353 assert_greater(ctlr_len, 1)
354
355 #this call would verify the cluster for once
356 onos_map = self.get_cluster_container_names_ips()
357
A R Karthick2a70a2f2016-12-16 14:40:16 -0800358 def check_exception(iteration, controller = None):
A R Karthick1f908202016-11-16 17:32:20 -0800359 adjacent_controller = None
360 adjacent_controllers = None
361 if controller:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800362 adjacent_controllers = list(set(controllers) - set([controller]))
363 adjacent_controller = adjacent_controllers[0]
A R Karthick1f908202016-11-16 17:32:20 -0800364 for node in controllers:
365 onosLog = OnosLog(host = node)
366 ##check the logs for storage exception
367 _, output = onosLog.get_log(('ERROR', 'Exception',))
A R Karthickec2db322016-11-17 15:06:01 -0800368 if output and output.find('StorageException$Timeout') >= 0:
369 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
370 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
371 log.info('\n' + '-' * 50 + '\n')
A R Karthick1f908202016-11-16 17:32:20 -0800372 log.info('%s' %output)
A R Karthickec2db322016-11-17 15:06:01 -0800373 log.info('\n' + '-' * 50 + '\n')
374 failed = self.verify_leaders(controllers)
375 if failed:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800376 log.info('Leaders command failed on nodes: %s' %failed)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800377 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800378 CordLogger.archive_results(self._testMethodName,
379 controllers = controllers,
380 iteration = 'FAILED')
A R Karthickec2db322016-11-17 15:06:01 -0800381 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800382 return controller
383
384 try:
A R Karthickec2db322016-11-17 15:06:01 -0800385 ips = self.get_cluster_current_member_ips(controller = adjacent_controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800386 log.info('ONOS cluster formed with controllers: %s' %ips)
A R Karthick1f908202016-11-16 17:32:20 -0800387 st = True
388 except:
389 st = False
390
A R Karthickec2db322016-11-17 15:06:01 -0800391 failed = self.verify_leaders(controllers)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800392 if failed:
393 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick3396ec42017-01-11 17:12:13 -0800394 CordLogger.archive_results(self._testMethodName,
395 controllers = controllers,
396 iteration = 'FAILED')
A R Karthick1f908202016-11-16 17:32:20 -0800397 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800398 if st is False:
399 log.info('No storage exception and ONOS cluster was not formed successfully')
400 else:
401 controller = None
402
403 return controller
404
405 next_controller = None
A.R Karthick99044822017-02-09 14:04:20 -0800406 tries = self.ITERATIONS
A R Karthick1f908202016-11-16 17:32:20 -0800407 for num in range(tries):
408 index = num % ctlr_len
409 #index = random.randrange(0, ctlr_len)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800410 controller_name = onos_map[controllers[index]] if next_controller is None else onos_map[next_controller]
411 controller = onos_map[controller_name]
412 log.info('ITERATION: %d. Restarting Controller %s' %(num + 1, controller_name))
A R Karthick1f908202016-11-16 17:32:20 -0800413 try:
A R Karthickef1232d2016-12-07 09:18:15 -0800414 #enable debug log for the other controllers before restarting this controller
A R Karthicke14fc022016-12-08 14:50:29 -0800415 adjacent_controllers = list( set(controllers) - set([controller]) )
416 self.log_set(controllers = adjacent_controllers)
417 self.log_set(app = 'io.atomix', controllers = adjacent_controllers)
A R Karthick3b2e0372016-12-14 17:37:43 -0800418 if graceful is True:
A R Karthick0f3f25b2016-12-15 09:50:57 -0800419 log.info('Gracefully shutting down controller: %s' %controller)
A R Karthick3b2e0372016-12-14 17:37:43 -0800420 self.onos_shutdown(controller)
421 cord_test_onos_restart(node = controller, timeout = 0)
A R Karthicke14fc022016-12-08 14:50:29 -0800422 self.log_set(controllers = controller)
423 self.log_set(app = 'io.atomix', controllers = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800424 time.sleep(60)
A R Karthick1f908202016-11-16 17:32:20 -0800425 except:
426 time.sleep(5)
427 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800428
429 #first archive the test case logs for this run
A R Karthick3b2e0372016-12-14 17:37:43 -0800430 CordLogger.archive_results(self._testMethodName,
A R Karthicke8935c62016-12-08 18:17:17 -0800431 controllers = controllers,
432 iteration = 'iteration_{}'.format(num+1))
A R Karthick2a70a2f2016-12-16 14:40:16 -0800433 next_controller = check_exception(num, controller = controller)
A R Karthick1f908202016-11-16 17:32:20 -0800434
A R Karthick3b2e0372016-12-14 17:37:43 -0800435 def test_cluster_controller_restarts(self):
436 '''Test the cluster by repeatedly killing the controllers'''
437 self.cluster_controller_restarts()
438
439 def test_cluster_graceful_controller_restarts(self):
440 '''Test the cluster by repeatedly restarting the controllers gracefully'''
441 self.cluster_controller_restarts(graceful = True)
442
A.R Karthick45ab3e12016-11-30 11:25:51 -0800443 def test_cluster_single_controller_restarts(self):
444 '''Test the cluster by repeatedly restarting the same controller'''
445 controllers = self.get_controllers()
446 ctlr_len = len(controllers)
447 if ctlr_len <= 1:
448 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
449 assert_greater(ctlr_len, 1)
450
451 #this call would verify the cluster for once
452 onos_map = self.get_cluster_container_names_ips()
453
A R Karthick2a70a2f2016-12-16 14:40:16 -0800454 def check_exception(iteration, controller, inclusive = False):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800455 adjacent_controllers = list(set(controllers) - set([controller]))
456 adjacent_controller = adjacent_controllers[0]
457 controller_list = adjacent_controllers if inclusive == False else controllers
458 storage_exceptions = []
459 for node in controller_list:
460 onosLog = OnosLog(host = node)
461 ##check the logs for storage exception
462 _, output = onosLog.get_log(('ERROR', 'Exception',))
463 if output and output.find('StorageException$Timeout') >= 0:
464 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
465 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
466 log.info('\n' + '-' * 50 + '\n')
467 log.info('%s' %output)
468 log.info('\n' + '-' * 50 + '\n')
469 storage_exceptions.append(node)
470
471 failed = self.verify_leaders(controller_list)
472 if failed:
473 log.info('Leaders command failed on nodes: %s' %failed)
474 if storage_exceptions:
475 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800476 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800477 CordLogger.archive_results('test_cluster_single_controller_restarts',
478 controllers = controllers,
479 iteration = 'FAILED')
A.R Karthick45ab3e12016-11-30 11:25:51 -0800480 assert_equal(len(failed), 0)
481 return controller
482
483 for ctlr in controller_list:
484 ips = self.get_cluster_current_member_ips(controller = ctlr,
485 nodes_filter = \
486 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
487 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
488 if controller in ips and inclusive is False:
489 log.info('Controller %s still ACTIVE on Node %s after it was shutdown' %(controller, ctlr))
490 if controller not in ips and inclusive is True:
A R Karthick6cc8b812016-12-09 10:24:40 -0800491 log.info('Controller %s still INACTIVE on Node %s after it was restarted' %(controller, ctlr))
A.R Karthick45ab3e12016-11-30 11:25:51 -0800492
493 return controller
494
A.R Karthick99044822017-02-09 14:04:20 -0800495 tries = self.ITERATIONS
A.R Karthick45ab3e12016-11-30 11:25:51 -0800496 #chose a random controller for shutdown/restarts
497 controller = controllers[random.randrange(0, ctlr_len)]
498 controller_name = onos_map[controller]
A R Karthick6cc8b812016-12-09 10:24:40 -0800499 ##enable the log level for the controllers
500 self.log_set(controllers = controllers)
501 self.log_set(app = 'io.atomix', controllers = controllers)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800502 for num in range(tries):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800503 log.info('ITERATION: %d. Shutting down Controller %s' %(num + 1, controller_name))
504 try:
A R Karthick3b2e0372016-12-14 17:37:43 -0800505 cord_test_onos_shutdown(node = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800506 time.sleep(20)
507 except:
508 time.sleep(5)
509 continue
510 #check for exceptions on the adjacent nodes
A R Karthick2a70a2f2016-12-16 14:40:16 -0800511 check_exception(num, controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800512 #Now restart the controller back
513 log.info('Restarting back the controller %s' %controller_name)
A R Karthick3b2e0372016-12-14 17:37:43 -0800514 cord_test_onos_restart(node = controller)
A R Karthick6cc8b812016-12-09 10:24:40 -0800515 self.log_set(controllers = controller)
516 self.log_set(app = 'io.atomix', controllers = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800517 time.sleep(60)
A R Karthicke8935c62016-12-08 18:17:17 -0800518 #archive the logs for this run
519 CordLogger.archive_results('test_cluster_single_controller_restarts',
520 controllers = controllers,
521 iteration = 'iteration_{}'.format(num+1))
A R Karthick2a70a2f2016-12-16 14:40:16 -0800522 check_exception(num, controller, inclusive = True)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800523
A.R Karthick2560f042016-11-30 14:38:52 -0800524 def test_cluster_restarts(self):
525 '''Test the cluster by repeatedly restarting the entire cluster'''
526 controllers = self.get_controllers()
527 ctlr_len = len(controllers)
528 if ctlr_len <= 1:
529 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
530 assert_greater(ctlr_len, 1)
531
532 #this call would verify the cluster for once
533 onos_map = self.get_cluster_container_names_ips()
534
A R Karthick2a70a2f2016-12-16 14:40:16 -0800535 def check_exception(iteration):
A.R Karthick2560f042016-11-30 14:38:52 -0800536 controller_list = controllers
537 storage_exceptions = []
538 for node in controller_list:
539 onosLog = OnosLog(host = node)
540 ##check the logs for storage exception
541 _, output = onosLog.get_log(('ERROR', 'Exception',))
542 if output and output.find('StorageException$Timeout') >= 0:
543 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
544 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
545 log.info('\n' + '-' * 50 + '\n')
546 log.info('%s' %output)
547 log.info('\n' + '-' * 50 + '\n')
548 storage_exceptions.append(node)
549
550 failed = self.verify_leaders(controller_list)
551 if failed:
552 log.info('Leaders command failed on nodes: %s' %failed)
553 if storage_exceptions:
554 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800555 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800556 CordLogger.archive_results('test_cluster_restarts',
557 controllers = controllers,
558 iteration = 'FAILED')
A.R Karthick2560f042016-11-30 14:38:52 -0800559 assert_equal(len(failed), 0)
560 return
561
562 for ctlr in controller_list:
563 ips = self.get_cluster_current_member_ips(controller = ctlr,
564 nodes_filter = \
565 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
566 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
A R Karthick2a70a2f2016-12-16 14:40:16 -0800567 if len(ips) != len(controllers):
568 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800569 CordLogger.archive_results('test_cluster_restarts',
570 controllers = controllers,
571 iteration = 'FAILED')
A.R Karthick2560f042016-11-30 14:38:52 -0800572 assert_equal(len(ips), len(controllers))
573
A.R Karthick99044822017-02-09 14:04:20 -0800574 tries = self.ITERATIONS
A.R Karthick2560f042016-11-30 14:38:52 -0800575 for num in range(tries):
576 log.info('ITERATION: %d. Restarting cluster with controllers at %s' %(num+1, controllers))
577 try:
578 cord_test_restart_cluster()
A R Karthick6cc8b812016-12-09 10:24:40 -0800579 self.log_set(controllers = controllers)
580 self.log_set(app = 'io.atomix', controllers = controllers)
A.R Karthick2560f042016-11-30 14:38:52 -0800581 log.info('Delaying before verifying cluster status')
582 time.sleep(60)
583 except:
584 time.sleep(10)
585 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800586
587 #archive the logs for this run before verification
588 CordLogger.archive_results('test_cluster_restarts',
589 controllers = controllers,
590 iteration = 'iteration_{}'.format(num+1))
A.R Karthick2560f042016-11-30 14:38:52 -0800591 #check for exceptions on the adjacent nodes
A R Karthick2a70a2f2016-12-16 14:40:16 -0800592 check_exception(num)
A.R Karthick2560f042016-11-30 14:38:52 -0800593
ChetanGaonker2099d722016-10-07 15:16:58 -0700594 #pass
ChetanGaonker689b3862016-10-17 16:25:01 -0700595 def test_cluster_formation_and_verification(self,onos_instances = ONOS_INSTANCES):
596 status = self.verify_cluster_status(onos_instances = onos_instances)
597 assert_equal(status, True)
598 log.info('Cluster exists with %d ONOS instances'%onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -0700599
600 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700601 def test_cluster_adding_members(self, add = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700602 status = self.verify_cluster_status(onos_instances = onos_instances)
603 assert_equal(status, True)
604 onos_ips = self.get_cluster_current_member_ips()
605 onos_instances = len(onos_ips)+add
606 log.info('Adding %d nodes to the ONOS cluster' %add)
607 cord_test_onos_add_cluster(count = add)
608 status = self.verify_cluster_status(onos_instances=onos_instances)
609 assert_equal(status, True)
610
ChetanGaonker689b3862016-10-17 16:25:01 -0700611 def test_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700612 status = self.verify_cluster_status(onos_instances = onos_instances)
613 assert_equal(status, True)
614 master, standbys = self.get_cluster_current_master_standbys()
615 assert_equal(len(standbys),(onos_instances-1))
616 onos_names_ips = self.get_cluster_container_names_ips()
617 master_onos_name = onos_names_ips[master]
618 log.info('Removing cluster current master %s'%(master))
A R Karthick3b2e0372016-12-14 17:37:43 -0800619 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700620 time.sleep(60)
621 onos_instances -= 1
622 status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0])
623 assert_equal(status, True)
624 new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
625 assert_not_equal(master,new_master)
ChetanGaonker689b3862016-10-17 16:25:01 -0700626 log.info('Successfully removed clusters master instance')
ChetanGaonker2099d722016-10-07 15:16:58 -0700627
ChetanGaonker689b3862016-10-17 16:25:01 -0700628 def test_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700629 status = self.verify_cluster_status(onos_instances = onos_instances)
630 assert_equal(status, True)
631 master, standbys = self.get_cluster_current_master_standbys()
632 assert_equal(len(standbys),(onos_instances-1))
633 onos_names_ips = self.get_cluster_container_names_ips()
634 member_onos_name = onos_names_ips[standbys[0]]
635 log.info('Removing cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800636 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700637 time.sleep(60)
638 onos_instances -= 1
639 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
640 assert_equal(status, True)
641
ChetanGaonker689b3862016-10-17 16:25:01 -0700642 def test_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700643 status = self.verify_cluster_status(onos_instances = onos_instances)
644 assert_equal(status, True)
645 master, standbys = self.get_cluster_current_master_standbys()
646 assert_equal(len(standbys),(onos_instances-1))
647 onos_names_ips = self.get_cluster_container_names_ips()
648 member1_onos_name = onos_names_ips[standbys[0]]
649 member2_onos_name = onos_names_ips[standbys[1]]
650 log.info('Removing cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800651 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700652 log.info('Removing cluster member %s'%standbys[1])
A R Karthick3b2e0372016-12-14 17:37:43 -0800653 cord_test_onos_shutdown(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700654 time.sleep(60)
655 onos_instances = onos_instances - 2
656 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
657 assert_equal(status, True)
658
ChetanGaonker689b3862016-10-17 16:25:01 -0700659 def test_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700660 status = self.verify_cluster_status(onos_instances = onos_instances)
661 assert_equal(status, True)
662 master, standbys = self.get_cluster_current_master_standbys()
663 assert_equal(len(standbys),(onos_instances-1))
664 onos_names_ips = self.get_cluster_container_names_ips()
665 for i in range(remove):
666 member_onos_name = onos_names_ips[standbys[i]]
667 log.info('Removing onos container with name %s'%standbys[i])
A R Karthick3b2e0372016-12-14 17:37:43 -0800668 cord_test_onos_shutdown(node = standbys[i])
ChetanGaonker2099d722016-10-07 15:16:58 -0700669 time.sleep(60)
670 onos_instances = onos_instances - remove
671 status = self.verify_cluster_status(onos_instances = onos_instances, controller=master)
672 assert_equal(status, True)
673
674 #nottest test cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700675 def test_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES , add = 2, remove = 2):
ChetanGaonker2099d722016-10-07 15:16:58 -0700676 status = self.verify_cluster_status(onos_instances = onos_instances)
677 assert_equal(status, True)
678 onos_ips = self.get_cluster_current_member_ips()
679 onos_instances = len(onos_ips)+add
680 log.info('Adding %d ONOS instances to the cluster'%add)
681 cord_test_onos_add_cluster(count = add)
682 status = self.verify_cluster_status(onos_instances=onos_instances)
683 assert_equal(status, True)
684 log.info('Removing %d ONOS instances from the cluster'%remove)
685 for i in range(remove):
686 name = '{}-{}'.format(Onos.NAME, onos_instances - i)
687 log.info('Removing onos container with name %s'%name)
688 cord_test_onos_shutdown(node = name)
689 time.sleep(60)
690 onos_instances = onos_instances-remove
691 status = self.verify_cluster_status(onos_instances=onos_instances)
692 assert_equal(status, True)
693
694 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700695 def test_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
ChetanGaonker2099d722016-10-07 15:16:58 -0700696 status = self.verify_cluster_status(onos_instances = onos_instances)
697 assert_equal(status, True)
698 onos_ips = self.get_cluster_current_member_ips()
699 onos_instances = onos_instances-remove
700 log.info('Removing %d ONOS instances from the cluster'%remove)
701 for i in range(remove):
702 name = '{}-{}'.format(Onos.NAME, len(onos_ips)-i)
703 log.info('Removing onos container with name %s'%name)
704 cord_test_onos_shutdown(node = name)
705 time.sleep(60)
706 status = self.verify_cluster_status(onos_instances=onos_instances)
707 assert_equal(status, True)
708 log.info('Adding %d ONOS instances to the cluster'%add)
709 cord_test_onos_add_cluster(count = add)
710 onos_instances = onos_instances+add
711 status = self.verify_cluster_status(onos_instances=onos_instances)
712 assert_equal(status, True)
713
ChetanGaonker689b3862016-10-17 16:25:01 -0700714 def test_cluster_restart(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700715 status = self.verify_cluster_status(onos_instances = onos_instances)
716 assert_equal(status, True)
717 log.info('Restarting cluster')
718 cord_test_onos_restart()
719 status = self.verify_cluster_status(onos_instances = onos_instances)
720 assert_equal(status, True)
721
ChetanGaonker689b3862016-10-17 16:25:01 -0700722 def test_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700723 status = self.verify_cluster_status(onos_instances = onos_instances)
724 assert_equal(status, True)
725 master, standbys = self.get_cluster_current_master_standbys()
726 onos_names_ips = self.get_cluster_container_names_ips()
727 master_onos_name = onos_names_ips[master]
728 log.info('Restarting cluster master %s'%master)
A R Karthick3b2e0372016-12-14 17:37:43 -0800729 cord_test_onos_restart(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700730 status = self.verify_cluster_status(onos_instances = onos_instances)
731 assert_equal(status, True)
732 log.info('Cluster came up after master restart as expected')
733
734 #test fail. master changing after restart. Need to check correct behavior.
ChetanGaonker689b3862016-10-17 16:25:01 -0700735 def test_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700736 status = self.verify_cluster_status(onos_instances = onos_instances)
737 assert_equal(status, True)
738 master1, standbys = self.get_cluster_current_master_standbys()
739 onos_names_ips = self.get_cluster_container_names_ips()
740 master_onos_name = onos_names_ips[master1]
A R Karthick3b2e0372016-12-14 17:37:43 -0800741 log.info('Restarting cluster master %s'%master1)
742 cord_test_onos_restart(node = master1)
ChetanGaonker2099d722016-10-07 15:16:58 -0700743 status = self.verify_cluster_status(onos_instances = onos_instances)
744 assert_equal(status, True)
745 master2, standbys = self.get_cluster_current_master_standbys()
746 assert_equal(master1,master2)
747 log.info('Cluster master is same before and after cluster master restart as expected')
748
ChetanGaonker689b3862016-10-17 16:25:01 -0700749 def test_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700750 status = self.verify_cluster_status(onos_instances = onos_instances)
751 assert_equal(status, True)
752 master, standbys = self.get_cluster_current_master_standbys()
753 assert_equal(len(standbys),(onos_instances-1))
754 onos_names_ips = self.get_cluster_container_names_ips()
755 member_onos_name = onos_names_ips[standbys[0]]
756 log.info('Restarting cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800757 cord_test_onos_restart(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700758 status = self.verify_cluster_status(onos_instances = onos_instances)
759 assert_equal(status, True)
760 log.info('Cluster came up as expected after restarting one member')
761
ChetanGaonker689b3862016-10-17 16:25:01 -0700762 def test_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700763 status = self.verify_cluster_status(onos_instances = onos_instances)
764 assert_equal(status, True)
765 master, standbys = self.get_cluster_current_master_standbys()
766 assert_equal(len(standbys),(onos_instances-1))
767 onos_names_ips = self.get_cluster_container_names_ips()
768 member1_onos_name = onos_names_ips[standbys[0]]
769 member2_onos_name = onos_names_ips[standbys[1]]
770 log.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1]))
A R Karthick3b2e0372016-12-14 17:37:43 -0800771 cord_test_onos_restart(node = standbys[0])
772 cord_test_onos_restart(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700773 status = self.verify_cluster_status(onos_instances = onos_instances)
774 assert_equal(status, True)
775 log.info('Cluster came up as expected after restarting two members')
776
ChetanGaonker689b3862016-10-17 16:25:01 -0700777 def test_cluster_state_with_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700778 status = self.verify_cluster_status(onos_instances = onos_instances)
779 assert_equal(status,True)
780 master, standbys = self.get_cluster_current_master_standbys()
781 assert_equal(len(standbys),(onos_instances-1))
782 onos_names_ips = self.get_cluster_container_names_ips()
783 for i in range(members):
784 member_onos_name = onos_names_ips[standbys[i]]
785 log.info('Restarting cluster member %s'%standbys[i])
A R Karthick3b2e0372016-12-14 17:37:43 -0800786 cord_test_onos_restart(node = standbys[i])
ChetanGaonker2099d722016-10-07 15:16:58 -0700787
788 status = self.verify_cluster_status(onos_instances = onos_instances)
789 assert_equal(status, True)
790 log.info('Cluster came up as expected after restarting %d members'%members)
791
ChetanGaonker689b3862016-10-17 16:25:01 -0700792 def test_cluster_state_with_master_change(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700793 status = self.verify_cluster_status(onos_instances=onos_instances)
794 assert_equal(status, True)
795 master, standbys = self.get_cluster_current_master_standbys()
796 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -0700797 log.info('Cluster current master of devices is %s'%master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700798 self.change_master_current_cluster(new_master=standbys[0])
799 log.info('Cluster master changed successfully')
800
801 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700802 def test_cluster_with_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700803 status = self.verify_cluster_status(onos_instances = onos_instances)
804 assert_equal(status, True)
805 onos_ips = self.get_cluster_current_member_ips()
806 self.vrouter.setUpClass()
807 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
808 assert_equal(res, True)
809 for onos_ip in onos_ips:
810 tries = 0
811 flag = False
812 try:
813 self.cliEnter(controller = onos_ip)
814 while tries <= 5:
815 routes = json.loads(self.cli.routes(jsonFormat = True))
816 if routes:
817 assert_equal(len(routes['routes4']), networks)
818 self.cliExit()
819 flag = True
820 break
821 else:
822 tries += 1
823 time.sleep(1)
824 assert_equal(flag, True)
825 except:
826 log.info('Exception occured while checking routes in onos instance %s'%onos_ip)
827 raise
828
829 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700830 def test_cluster_with_vrouter_and_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700831 status = self.verify_cluster_status(onos_instances = onos_instances)
832 assert_equal(status, True)
833 onos_ips = self.get_cluster_current_member_ips()
834 master, standbys = self.get_cluster_current_master_standbys()
835 onos_names_ips = self.get_cluster_container_names_ips()
836 master_onos_name = onos_names_ips[master]
837 self.vrouter.setUpClass()
838 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
839 assert_equal(res,True)
A R Karthick3b2e0372016-12-14 17:37:43 -0800840 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700841 time.sleep(60)
ChetanGaonker689b3862016-10-17 16:25:01 -0700842 log.info('Verifying vrouter traffic after cluster master is down')
ChetanGaonker2099d722016-10-07 15:16:58 -0700843 self.vrouter.vrouter_traffic_verify()
844
845 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700846 def test_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700847 status = self.verify_cluster_status(onos_instances = onos_instances)
848 assert_equal(status, True)
849 onos_ips = self.get_cluster_current_member_ips()
850 master, standbys = self.get_cluster_current_master_standbys()
851 onos_names_ips = self.get_cluster_container_names_ips()
852 master_onos_name = onos_names_ips[master]
853 self.vrouter.setUpClass()
854 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
855 assert_equal(res, True)
856 cord_test_onos_restart()
857 self.vrouter.vrouter_traffic_verify()
858
859 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700860 def test_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700861 status = self.verify_cluster_status(onos_instances = onos_instances)
862 assert_equal(status, True)
863 self.vrouter.setUpClass()
864 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
865 assert_equal(res, True)
866 self.vrouter.vrouter_activate(deactivate=True)
867 time.sleep(15)
868 self.vrouter.vrouter_traffic_verify(positive_test=False)
869 self.vrouter.vrouter_activate(deactivate=False)
870
871 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700872 def test_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700873 status = self.verify_cluster_status(onos_instances = onos_instances)
874 assert_equal(status, True)
875 master, standbys = self.get_cluster_current_master_standbys()
876 onos_names_ips = self.get_cluster_container_names_ips()
877 master_onos_name = onos_names_ips[master]
878 self.vrouter.setUpClass()
879 log.info('Verifying vrouter before master down')
880 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
881 assert_equal(res, True)
882 self.vrouter.vrouter_activate(deactivate=True)
883 log.info('Verifying vrouter traffic after app deactivated')
884 time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down
885 self.vrouter.vrouter_traffic_verify(positive_test=False)
886 log.info('Verifying vrouter traffic after master down')
A R Karthick3b2e0372016-12-14 17:37:43 -0800887 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700888 time.sleep(60)
889 self.vrouter.vrouter_traffic_verify(positive_test=False)
890 self.vrouter.vrouter_activate(deactivate=False)
891
892 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700893 def test_cluster_for_vrouter_app_and_making_member_down(self, networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700894 status = self.verify_cluster_status(onos_instances = onos_instances)
895 assert_equal(status, True)
896 master, standbys = self.get_cluster_current_master_standbys()
897 onos_names_ips = self.get_cluster_container_names_ips()
898 member_onos_name = onos_names_ips[standbys[0]]
899 self.vrouter.setUpClass()
900 log.info('Verifying vrouter before cluster member down')
901 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
902 assert_equal(res, True) # Expecting vrouter should work properly
903 log.info('Verifying vrouter after cluster member down')
A R Karthick3b2e0372016-12-14 17:37:43 -0800904 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700905 time.sleep(60)
906 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
907
908 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700909 def test_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700910 status = self.verify_cluster_status(onos_instances = onos_instances)
911 assert_equal(status, True)
912 master, standbys = self.get_cluster_current_master_standbys()
913 onos_names_ips = self.get_cluster_container_names_ips()
914 member_onos_name = onos_names_ips[standbys[1]]
915 self.vrouter.setUpClass()
916 log.info('Verifying vrouter traffic before cluster member restart')
917 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
918 assert_equal(res, True) # Expecting vrouter should work properly
A R Karthick3b2e0372016-12-14 17:37:43 -0800919 cord_test_onos_restart(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700920 log.info('Verifying vrouter traffic after cluster member restart')
921 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
922
923 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700924 def test_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700925 status = self.verify_cluster_status(onos_instances = onos_instances)
926 assert_equal(status, True)
927 self.vrouter.setUpClass()
928 log.info('Verifying vrouter traffic before cluster restart')
929 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
930 assert_equal(res, True) # Expecting vrouter should work properly
931 cord_test_onos_restart()
932 log.info('Verifying vrouter traffic after cluster restart')
933 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
934
935
936 #test fails because flow state is in pending_add in onos
ChetanGaonker689b3862016-10-17 16:25:01 -0700937 def test_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700938 status = self.verify_cluster_status(onos_instances = onos_instances)
939 assert_equal(status, True)
940 master, standbys = self.get_cluster_current_master_standbys()
941 onos_names_ips = self.get_cluster_container_names_ips()
942 master_onos_name = onos_names_ips[master]
943 self.flows.setUpClass()
944 egress = 1
945 ingress = 2
946 egress_map = { 'ip': '192.168.30.1', 'udp_port': 9500 }
947 ingress_map = { 'ip': '192.168.40.1', 'udp_port': 9000 }
948 flow = OnosFlowCtrl(deviceId = self.device_id,
949 egressPort = egress,
950 ingressPort = ingress,
951 udpSrc = ingress_map['udp_port'],
952 udpDst = egress_map['udp_port'],
953 controller=master
954 )
955 result = flow.addFlow()
956 assert_equal(result, True)
957 time.sleep(1)
958 self.success = False
959 def mac_recv_task():
960 def recv_cb(pkt):
961 log.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
962 self.success = True
963 sniff(timeout=2,
964 lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
965 and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.flows.port_map[egress])
966
967 for i in [0,1]:
968 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -0800969 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700970 log.info('Verifying flows traffic after master killed')
971 time.sleep(45)
972 else:
973 log.info('Verifying flows traffic before master killed')
974 t = threading.Thread(target = mac_recv_task)
975 t.start()
976 L2 = self.flows_eth #Ether(src = ingress_map['ether'], dst = egress_map['ether'])
977 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
978 L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
979 pkt = L2/L3/L4
980 log.info('Sending packets to verify if flows are correct')
981 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
982 t.join()
983 assert_equal(self.success, True)
984
ChetanGaonker689b3862016-10-17 16:25:01 -0700985 def test_cluster_state_changing_master_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700986 status = self.verify_cluster_status(onos_instances=onos_instances)
987 assert_equal(status, True)
988 master, standbys = self.get_cluster_current_master_standbys()
989 self.flows.setUpClass()
990 egress = 1
991 ingress = 2
992 egress_map = { 'ip': '192.168.30.1' }
993 ingress_map = { 'ip': '192.168.40.1' }
994 flow = OnosFlowCtrl(deviceId = self.device_id,
995 egressPort = egress,
996 ingressPort = ingress,
997 ecn = 1,
998 controller=master
999 )
1000 result = flow.addFlow()
1001 assert_equal(result, True)
1002 ##wait for flows to be added to ONOS
1003 time.sleep(1)
1004 self.success = False
1005 def mac_recv_task():
1006 def recv_cb(pkt):
1007 log.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
1008 self.success = True
1009 sniff(count=2, timeout=5,
1010 lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
1011 and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb,
1012 iface = self.flows.port_map[egress])
1013 for i in [0,1]:
1014 if i == 1:
1015 log.info('Changing cluster master to %s'%standbys[0])
1016 self.change_master_current_cluster(new_master=standbys[0])
1017 log.info('Verifying flow traffic after cluster master chnaged')
1018 else:
1019 log.info('Verifying flow traffic before cluster master changed')
1020 t = threading.Thread(target = mac_recv_task)
1021 t.start()
1022 L2 = self.flows_eth # Ether(src = ingress_map['ether'], dst = egress_map['ether'])
1023 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1)
1024 pkt = L2/L3
1025 log.info('Sending a packet to verify if flows are correct')
1026 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
1027 t.join()
1028 assert_equal(self.success, True)
1029
ChetanGaonker689b3862016-10-17 16:25:01 -07001030 #pass
1031 def test_cluster_flow_for_ipv6_extension_header_and_master_restart(self,onos_instances = ONOS_INSTANCES):
1032 status = self.verify_cluster_status(onos_instances=onos_instances)
1033 assert_equal(status, True)
1034 master,standbys = self.get_cluster_current_master_standbys()
1035 onos_names_ips = self.get_cluster_container_names_ips()
1036 master_onos_name = onos_names_ips[master]
1037 self.flows.setUpClass()
1038 egress = 1
1039 ingress = 2
1040 egress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
1041 ingress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
1042 flow = OnosFlowCtrl(deviceId = self.device_id,
1043 egressPort = egress,
1044 ingressPort = ingress,
1045 ipv6_extension = 0,
1046 controller=master
1047 )
1048
1049 result = flow.addFlow()
1050 assert_equal(result, True)
1051 ##wait for flows to be added to ONOS
1052 time.sleep(1)
1053 self.success = False
1054 def mac_recv_task():
1055 def recv_cb(pkt):
1056 log.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s'%(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
1057 self.success = True
1058 sniff(timeout=2,count=5,
1059 lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.flows.port_map[egress])
1060 for i in [0,1]:
1061 if i == 1:
1062 log.info('Restart cluster current master %s'%master)
1063 Container(master_onos_name,Onos.IMAGE).restart()
1064 time.sleep(45)
1065 log.info('Verifying flow traffic after master restart')
1066 else:
1067 log.info('Verifying flow traffic before master restart')
1068 t = threading.Thread(target = mac_recv_task)
1069 t.start()
1070 L2 = self.flows_eth
1071 L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
1072 pkt = L2/L3
1073 log.info('Sending packets to verify if flows are correct')
1074 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
1075 t.join()
1076 assert_equal(self.success, True)
1077
1078 def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
1079 dst_mac = self.igmp.iptomac(group)
1080 eth = Ether(dst= dst_mac)
1081 ip = IP(dst=group,src=source)
1082 data = repr(monotonic.monotonic())
1083 sendp(eth/ip/data,count=20, iface = intf)
1084 pkt = (eth/ip/data)
1085 log.info('multicast traffic packet %s'%pkt.show())
1086
1087 def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
1088 log.info('verifying multicast traffic for group %s from source %s'%(group,source))
1089 self.success = False
1090 def recv_task():
1091 def igmp_recv_cb(pkt):
1092 log.info('multicast data received for group %s from source %s'%(group,source))
1093 self.success = True
1094 sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
1095 t = threading.Thread(target = recv_task)
1096 t.start()
1097 self.send_multicast_data_traffic(group,source=source)
1098 t.join()
1099 return self.success
1100
1101 #pass
1102 def test_cluster_with_igmp_include_exclude_modes_and_restarting_master(self, onos_instances=ONOS_INSTANCES):
1103 status = self.verify_cluster_status(onos_instances=onos_instances)
1104 assert_equal(status, True)
1105 master, standbys = self.get_cluster_current_master_standbys()
1106 assert_equal(len(standbys), (onos_instances-1))
1107 onos_names_ips = self.get_cluster_container_names_ips()
1108 master_onos_name = onos_names_ips[master]
1109 self.igmp.setUp(controller=master)
1110 groups = ['224.2.3.4','230.5.6.7']
1111 src_list = ['2.2.2.2','3.3.3.3']
1112 self.igmp.onos_ssm_table_load(groups, src_list=src_list, controller=master)
1113 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1114 iface = self.V_INF1, delay = 2)
1115 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1116 iface = self.V_INF1, delay = 2)
1117 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1118 assert_equal(status,True)
1119 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1120 assert_equal(status,False)
1121 log.info('restarting cluster master %s'%master)
1122 Container(master_onos_name,Onos.IMAGE).restart()
1123 time.sleep(60)
1124 log.info('verifying multicast data traffic after master restart')
1125 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1126 assert_equal(status,True)
1127 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1128 assert_equal(status,False)
1129
1130 #pass
1131 def test_cluster_with_igmp_include_exclude_modes_and_making_master_down(self, onos_instances=ONOS_INSTANCES):
1132 status = self.verify_cluster_status(onos_instances=onos_instances)
1133 assert_equal(status, True)
1134 master, standbys = self.get_cluster_current_master_standbys()
1135 assert_equal(len(standbys), (onos_instances-1))
1136 onos_names_ips = self.get_cluster_container_names_ips()
1137 master_onos_name = onos_names_ips[master]
1138 self.igmp.setUp(controller=master)
1139 groups = [self.igmp.random_mcast_ip(),self.igmp.random_mcast_ip()]
1140 src_list = [self.igmp.randomsourceip()]
1141 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1142 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1143 iface = self.V_INF1, delay = 2)
1144 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1145 iface = self.V_INF1, delay = 2)
1146 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1147 assert_equal(status,True)
1148 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1149 assert_equal(status,False)
1150 log.info('Killing cluster master %s'%master)
1151 Container(master_onos_name,Onos.IMAGE).kill()
1152 time.sleep(60)
1153 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=standbys[0])
1154 assert_equal(status, True)
1155 log.info('Verifying multicast data traffic after cluster master down')
1156 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1157 assert_equal(status,True)
1158 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1159 assert_equal(status,False)
1160
1161 def test_cluster_with_igmp_include_mode_checking_traffic_recovery_time_after_master_is_down(self, onos_instances=ONOS_INSTANCES):
1162 status = self.verify_cluster_status(onos_instances=onos_instances)
1163 assert_equal(status, True)
1164 master, standbys = self.get_cluster_current_master_standbys()
1165 assert_equal(len(standbys), (onos_instances-1))
1166 onos_names_ips = self.get_cluster_container_names_ips()
1167 master_onos_name = onos_names_ips[master]
1168 self.igmp.setUp(controller=master)
1169 groups = [self.igmp.random_mcast_ip()]
1170 src_list = [self.igmp.randomsourceip()]
1171 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1172 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1173 iface = self.V_INF1, delay = 2)
1174 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1175 assert_equal(status,True)
1176 log.info('Killing clusters master %s'%master)
1177 Container(master_onos_name,Onos.IMAGE).kill()
1178 count = 0
1179 for i in range(60):
1180 log.info('Verifying multicast data traffic after cluster master down')
1181 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1182 if status:
1183 break
1184 else:
1185 count += 1
1186 time.sleep(1)
1187 assert_equal(status, True)
1188 log.info('Time taken to recover traffic after clusters master down is %d seconds'%count)
1189
1190
1191 #pass
1192 def test_cluster_state_with_igmp_leave_group_after_master_change(self, onos_instances=ONOS_INSTANCES):
1193 status = self.verify_cluster_status(onos_instances=onos_instances)
1194 assert_equal(status, True)
1195 master, standbys = self.get_cluster_current_master_standbys()
1196 assert_equal(len(standbys), (onos_instances-1))
1197 self.igmp.setUp(controller=master)
1198 groups = [self.igmp.random_mcast_ip()]
1199 src_list = [self.igmp.randomsourceip()]
1200 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1201 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1202 iface = self.V_INF1, delay = 2)
1203 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1204 assert_equal(status,True)
1205 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1206 self.change_cluster_current_master(new_master=standbys[0])
1207 log.info('Verifying multicast traffic after cluster master change')
1208 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1209 assert_equal(status,True)
1210 log.info('Sending igmp TO_EXCLUDE message to leave the group %s'%groups[0])
1211 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
1212 iface = self.V_INF1, delay = 1)
1213 time.sleep(10)
1214 status = self.verify_igmp_data_traffic(groups[0],intf = self.V_INF1,source= src_list[0])
1215 assert_equal(status,False)
1216
1217 #pass
1218 def test_cluster_state_with_igmp_join_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
1219 status = self.verify_cluster_status(onos_instances=onos_instances)
1220 assert_equal(status, True)
1221 master,standbys = self.get_cluster_current_master_standbys()
1222 assert_equal(len(standbys), (onos_instances-1))
1223 self.igmp.setUp(controller=master)
1224 groups = [self.igmp.random_mcast_ip()]
1225 src_list = [self.igmp.randomsourceip()]
1226 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1227 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1228 self.change_cluster_current_master(new_master = standbys[0])
1229 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1230 iface = self.V_INF1, delay = 2)
1231 time.sleep(1)
1232 self.change_cluster_current_master(new_master = master)
1233 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1234 assert_equal(status,True)
1235
1236 #pass
ChetanGaonker2099d722016-10-07 15:16:58 -07001237 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001238 def test_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001239 status = self.verify_cluster_status(onos_instances=onos_instances)
1240 assert_equal(status, True)
1241 master, standbys = self.get_cluster_current_master_standbys()
1242 assert_equal(len(standbys), (onos_instances-1))
1243 self.tls.setUp(controller=master)
1244 df = defer.Deferred()
1245 def eap_tls_verify(df):
1246 tls = TLSAuthTest()
1247 tls.runTest()
1248 df.callback(0)
1249 reactor.callLater(0, eap_tls_verify, df)
1250 return df
1251
1252 @deferred(120)
ChetanGaonker689b3862016-10-17 16:25:01 -07001253 def test_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001254 master, standbys = self.get_cluster_current_master_standbys()
1255 assert_equal(len(standbys), (onos_instances-1))
1256 self.tls.setUp()
1257 df = defer.Deferred()
1258 def eap_tls_verify2(df2):
1259 tls = TLSAuthTest()
1260 tls.runTest()
1261 df.callback(0)
1262 for i in [0,1]:
1263 if i == 1:
1264 log.info('Changing cluster master %s to %s'%(master, standbys[0]))
1265 self.change_master_current_cluster(new_master=standbys[0])
1266 log.info('Verifying tls authentication after cluster master changed to %s'%standbys[0])
1267 else:
1268 log.info('Verifying tls authentication before cluster master change')
1269 reactor.callLater(0, eap_tls_verify, df)
1270 return df
1271
1272 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001273 def test_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001274 status = self.verify_cluster_status(onos_instances=onos_instances)
1275 assert_equal(status, True)
1276 master, standbys = self.get_cluster_current_master_standbys()
1277 assert_equal(len(standbys), (onos_instances-1))
1278 onos_names_ips = self.get_cluster_container_names_ips()
1279 master_onos_name = onos_names_ips[master]
1280 self.tls.setUp()
1281 df = defer.Deferred()
1282 def eap_tls_verify(df):
1283 tls = TLSAuthTest()
1284 tls.runTest()
1285 df.callback(0)
1286 for i in [0,1]:
1287 if i == 1:
1288 log.info('Killing cluster current master %s'%master)
A R Karthick3b2e0372016-12-14 17:37:43 -08001289 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001290 time.sleep(20)
1291 status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True)
1292 assert_equal(status, True)
1293 log.info('Cluster came up with %d instances after killing master'%(onos_instances-1))
1294 log.info('Verifying tls authentication after killing cluster master')
1295 reactor.callLater(0, eap_tls_verify, df)
1296 return df
1297
1298 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001299 def test_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001300 status = self.verify_cluster_status(onos_instances=onos_instances)
1301 assert_equal(status, True)
1302 master, standbys = self.get_cluster_current_master_standbys()
1303 assert_equal(len(standbys), (onos_instances-1))
1304 onos_names_ips = self.get_cluster_container_names_ips()
1305 member_onos_name = onos_names_ips[standbys[0]]
1306 self.tls.setUp()
1307 df = defer.Deferred()
1308 def eap_tls_no_cert(df):
1309 def tls_no_cert_cb():
1310 log.info('TLS authentication failed with no certificate')
1311 tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
1312 tls.runTest()
1313 assert_equal(tls.failTest, True)
1314 df.callback(0)
1315 for i in [0,1]:
1316 if i == 1:
1317 log.info('Restart cluster member %s'%standbys[0])
1318 Container(member_onos_name,Onos.IMAGE).restart()
1319 time.sleep(20)
1320 status = self.verify_cluster_status(onos_instances=onos_instances)
1321 assert_equal(status, True)
1322 log.info('Cluster came up with %d instances after member restart'%(onos_instances))
1323 log.info('Verifying tls authentication after member restart')
1324 reactor.callLater(0, eap_tls_no_cert, df)
1325 return df
1326
ChetanGaonker689b3862016-10-17 16:25:01 -07001327 #pass
1328 def test_cluster_proxyarp_master_change_and_app_deactivation(self,onos_instances=ONOS_INSTANCES,hosts = 3):
1329 status = self.verify_cluster_status(onos_instances=onos_instances)
1330 assert_equal(status,True)
1331 master,standbys = self.get_cluster_current_master_standbys()
1332 assert_equal(len(standbys),(onos_instances-1))
1333 self.proxyarp.setUpClass()
1334 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1335 ingress = hosts+1
1336 for hostip, hostmac in hosts_config:
1337 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1338 time.sleep(1)
1339 log.info('changing cluster current master from %s to %s'%(master,standbys[0]))
1340 self.change_cluster_current_master(new_master=standbys[0])
1341 log.info('verifying proxyarp after master change')
1342 for hostip, hostmac in hosts_config:
1343 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1344 time.sleep(1)
1345 log.info('Deactivating proxyarp app and expecting proxyarp functionality not to work')
1346 self.proxyarp.proxyarp_activate(deactivate = True,controller=standbys[0])
1347 time.sleep(3)
1348 for hostip, hostmac in hosts_config:
1349 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
1350 time.sleep(1)
1351 log.info('activating proxyarp app and expecting to get arp reply from ONOS')
1352 self.proxyarp.proxyarp_activate(deactivate = False,controller=standbys[0])
1353 time.sleep(3)
1354 for hostip, hostmac in hosts_config:
1355 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1356 time.sleep(1)
ChetanGaonker2099d722016-10-07 15:16:58 -07001357
ChetanGaonker689b3862016-10-17 16:25:01 -07001358 #pass
1359 def test_cluster_with_proxyarp_and_one_member_down(self,hosts=3,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001360 status = self.verify_cluster_status(onos_instances=onos_instances)
1361 assert_equal(status, True)
1362 master, standbys = self.get_cluster_current_master_standbys()
ChetanGaonker689b3862016-10-17 16:25:01 -07001363 assert_equal(len(standbys), (onos_instances-1))
1364 onos_names_ips = self.get_cluster_container_names_ips()
1365 member_onos_name = onos_names_ips[standbys[1]]
1366 self.proxyarp.setUpClass()
1367 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1368 ingress = hosts+1
1369 for hostip, hostmac in hosts_config:
1370 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1371 time.sleep(1)
1372 log.info('killing cluster member %s'%standbys[1])
1373 Container(member_onos_name,Onos.IMAGE).kill()
1374 time.sleep(20)
1375 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=master,verify=True)
1376 assert_equal(status, True)
1377 log.info('cluster came up with %d instances after member down'%(onos_instances-1))
1378 log.info('verifying proxy arp functionality after cluster member down')
1379 for hostip, hostmac in hosts_config:
1380 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1381 time.sleep(1)
1382
1383 #pass
1384 def test_cluster_with_proxyarp_and_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts=10,onos_instances=ONOS_INSTANCES):
1385 status = self.verify_cluster_status(onos_instances=onos_instances)
1386 assert_equal(status, True)
1387 self.proxyarp.setUpClass()
1388 master, standbys = self.get_cluster_current_master_standbys()
1389 assert_equal(len(standbys), (onos_instances-1))
1390 ports_map, egress_map, hosts_config = self.proxyarp.proxyarp_config(hosts = hosts, controller=master)
1391 self.success = True
1392 ingress = hosts+1
1393 ports = range(ingress,ingress+10)
1394 hostmac = []
1395 hostip = []
1396 for ip,mac in hosts_config:
1397 hostmac.append(mac)
1398 hostip.append(ip)
1399 success_dir = {}
1400 def verify_proxyarp(*r):
1401 ingress, hostmac, hostip = r[0],r[1],r[2]
1402 def mac_recv_task():
1403 def recv_cb(pkt):
1404 log.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
1405 success_dir[current_thread().name] = True
1406 sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
1407 prn = recv_cb, iface = self.proxyarp.port_map[ingress])
1408 t = threading.Thread(target = mac_recv_task)
1409 t.start()
1410 pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
1411 log.info('Sending arp request for dest ip %s on interface %s' %
1412 (hostip,self.proxyarp.port_map[ingress]))
1413 sendp(pkt, count = 10,iface = self.proxyarp.port_map[ingress])
1414 t.join()
1415 t = []
1416 for i in range(10):
1417 t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
1418 for i in range(10):
1419 t[i].start()
1420 time.sleep(2)
1421 for i in range(10):
1422 t[i].join()
1423 if len(success_dir) != 10:
1424 self.success = False
1425 assert_equal(self.success, True)
1426
1427 #pass
1428 def test_cluster_with_acl_rule_before_master_change_and_remove_acl_rule_after_master_change(self,onos_instances=ONOS_INSTANCES):
1429 status = self.verify_cluster_status(onos_instances=onos_instances)
1430 assert_equal(status, True)
1431 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001432 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001433 self.acl.setUp()
1434 acl_rule = ACLTest()
1435 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1436 if status is False:
1437 log.info('JSON request returned status %d' %code)
1438 assert_equal(status, True)
1439 result = acl_rule.get_acl_rules(controller=master)
1440 aclRules1 = result.json()['aclRules']
1441 log.info('Added acl rules is %s'%aclRules1)
1442 acl_Id = map(lambda d: d['id'], aclRules1)
1443 log.info('Changing cluster current master from %s to %s'%(master,standbys[0]))
1444 self.change_cluster_current_master(new_master=standbys[0])
1445 status,code = acl_rule.remove_acl_rule(acl_Id[0],controller=standbys[0])
1446 if status is False:
1447 log.info('JSON request returned status %d' %code)
1448 assert_equal(status, True)
1449
1450 #pass
1451 def test_cluster_verifying_acl_rule_in_new_master_after_current_master_is_down(self,onos_instances=ONOS_INSTANCES):
1452 status = self.verify_cluster_status(onos_instances=onos_instances)
1453 assert_equal(status, True)
1454 master,standbys = self.get_cluster_current_master_standbys()
1455 assert_equal(len(standbys),(onos_instances-1))
1456 onos_names_ips = self.get_cluster_container_names_ips()
1457 master_onos_name = onos_names_ips[master]
1458 self.acl.setUp()
1459 acl_rule = ACLTest()
1460 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1461 if status is False:
1462 log.info('JSON request returned status %d' %code)
1463 assert_equal(status, True)
1464 result1 = acl_rule.get_acl_rules(controller=master)
1465 aclRules1 = result1.json()['aclRules']
1466 log.info('Added acl rules is %s'%aclRules1)
1467 acl_Id1 = map(lambda d: d['id'], aclRules1)
1468 log.info('Killing cluster current master %s'%master)
1469 Container(master_onos_name,Onos.IMAGE).kill()
1470 time.sleep(45)
1471 status = self.verify_cluster_status(onos_instances=onos_instances,controller=standbys[0])
1472 assert_equal(status, True)
1473 new_master,standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
1474 assert_equal(len(standbys),(onos_instances-2))
1475 assert_not_equal(new_master,master)
1476 result2 = acl_rule.get_acl_rules(controller=new_master)
1477 aclRules2 = result2.json()['aclRules']
1478 acl_Id2 = map(lambda d: d['id'], aclRules2)
1479 log.info('Acl Ids before and after master down are %s and %s'%(acl_Id1,acl_Id2))
1480 assert_equal(acl_Id2,acl_Id1)
1481
1482 #acl traffic scenario not working as acl rule is not getting added to onos
1483 def test_cluster_with_acl_traffic_before_and_after_two_members_down(self,onos_instances=ONOS_INSTANCES):
1484 status = self.verify_cluster_status(onos_instances=onos_instances)
1485 assert_equal(status, True)
1486 master,standbys = self.get_cluster_current_master_standbys()
1487 assert_equal(len(standbys),(onos_instances-1))
1488 onos_names_ips = self.get_cluster_container_names_ips()
1489 member1_onos_name = onos_names_ips[standbys[0]]
1490 member2_onos_name = onos_names_ips[standbys[1]]
1491 ingress = self.acl.ingress_iface
1492 egress = self.acl.CURRENT_PORT_NUM
1493 acl_rule = ACLTest()
1494 status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.acl.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.acl.HOST_DST_IP)
1495 self.acl.CURRENT_PORT_NUM += 1
1496 time.sleep(5)
1497 if status is False:
1498 log.info('JSON request returned status %d' %code)
1499 assert_equal(status, True)
1500 srcMac = '00:00:00:00:00:11'
1501 dstMac = host_ip_mac[0][1]
1502 self.acl.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1, egress_iface_num = egress )
1503 status, code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'deny',controller=master)
1504 time.sleep(10)
1505 if status is False:
1506 log.info('JSON request returned status %d' %code)
1507 assert_equal(status, True)
1508 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1509 log.info('killing cluster members %s and %s'%(standbys[0],standbys[1]))
1510 Container(member1_onos_name, Onos.IMAGE).kill()
1511 Container(member2_onos_name, Onos.IMAGE).kill()
1512 time.sleep(40)
1513 status = self.verify_cluster_status(onos_instances=onos_instances-2,verify=True,controller=master)
1514 assert_equal(status, True)
1515 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1516 self.acl.acl_hosts_remove(egress_iface_count = 1, egress_iface_num = egress)
1517
1518 #pass
1519 def test_cluster_with_dhcpRelay_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1520 status = self.verify_cluster_status(onos_instances=onos_instances)
1521 assert_equal(status, True)
1522 master,standbys = self.get_cluster_current_master_standbys()
1523 assert_equal(len(standbys),(onos_instances-1))
1524 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001525 mac = self.dhcprelay.get_mac(iface)
1526 self.dhcprelay.host_load(iface)
1527 ##we use the defaults for this test that serves as an example for others
1528 ##You don't need to restart dhcpd server if retaining default config
1529 config = self.dhcprelay.default_config
1530 options = self.dhcprelay.default_options
1531 subnet = self.dhcprelay.default_subnet_config
1532 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1533 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1534 config = config,
1535 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001536 subnet = subnet,
1537 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001538 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
1539 cip, sip = self.dhcprelay.send_recv(mac)
1540 log.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
1541 self.change_master_current_cluster(new_master=standbys[0])
1542 log.info('Releasing ip %s to server %s' %(cip, sip))
1543 assert_equal(self.dhcprelay.dhcp.release(cip), True)
1544 log.info('Triggering DHCP discover again after release')
1545 cip2, sip2 = self.dhcprelay.send_recv(mac)
1546 log.info('Verifying released IP was given back on rediscover')
1547 assert_equal(cip, cip2)
1548 log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
1549 assert_equal(self.dhcprelay.dhcp.release(cip2), True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001550 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001551
ChetanGaonker689b3862016-10-17 16:25:01 -07001552
1553 def test_cluster_with_dhcpRelay_and_verify_dhcp_ip_after_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1554 status = self.verify_cluster_status(onos_instances=onos_instances)
1555 assert_equal(status, True)
1556 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001557 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001558 onos_names_ips = self.get_cluster_container_names_ips()
1559 master_onos_name = onos_names_ips[master]
1560 self.dhcprelay.setUpClass(controller=master)
1561 mac = self.dhcprelay.get_mac(iface)
1562 self.dhcprelay.host_load(iface)
1563 ##we use the defaults for this test that serves as an example for others
1564 ##You don't need to restart dhcpd server if retaining default config
1565 config = self.dhcprelay.default_config
1566 options = self.dhcprelay.default_options
1567 subnet = self.dhcprelay.default_subnet_config
1568 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1569 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1570 config = config,
1571 options = options,
1572 subnet = subnet,
1573 controller=master)
1574 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1575 log.info('Initiating dhcp process from client %s'%mac)
1576 cip, sip = self.dhcprelay.send_recv(mac)
1577 log.info('Killing cluster current master %s'%master)
1578 Container(master_onos_name, Onos.IMAGE).kill()
1579 time.sleep(60)
1580 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=standbys[0])
1581 assert_equal(status, True)
1582 mac = self.dhcprelay.dhcp.get_mac(cip)[0]
1583 log.info("Verifying dhcp clients gets same IP after cluster master restarts")
1584 new_cip, new_sip = self.dhcprelay.dhcp.only_request(cip, mac)
1585 assert_equal(new_cip, cip)
1586 self.dhcprelay.tearDownClass(controller=standbys[0])
1587
1588 #pass
1589 def test_cluster_with_dhcpRelay_and_simulate_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1590 status = self.verify_cluster_status(onos_instances=onos_instances)
1591 assert_equal(status, True)
1592 master,standbys = self.get_cluster_current_master_standbys()
1593 assert_equal(len(standbys),(onos_instances-1))
1594 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001595 macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3']
1596 self.dhcprelay.host_load(iface)
1597 ##we use the defaults for this test that serves as an example for others
1598 ##You don't need to restart dhcpd server if retaining default config
1599 config = self.dhcprelay.default_config
1600 options = self.dhcprelay.default_options
1601 subnet = self.dhcprelay.default_subnet_config
1602 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1603 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1604 config = config,
1605 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001606 subnet = subnet,
1607 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001608 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1609 cip1, sip1 = self.dhcprelay.send_recv(macs[0])
1610 assert_not_equal(cip1,None)
1611 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip1,macs[0],master))
1612 log.info('Changing cluster master from %s to %s'%(master, standbys[0]))
1613 self.change_master_current_cluster(new_master=standbys[0])
1614 cip2, sip2 = self.dhcprelay.send_recv(macs[1])
1615 assert_not_equal(cip2,None)
1616 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[1],standbys[0]))
1617 self.change_master_current_cluster(new_master=master)
1618 log.info('Changing cluster master from %s to %s'%(standbys[0],master))
1619 cip3, sip3 = self.dhcprelay.send_recv(macs[2])
1620 assert_not_equal(cip3,None)
1621 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master))
ChetanGaonker689b3862016-10-17 16:25:01 -07001622 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001623
ChetanGaonker689b3862016-10-17 16:25:01 -07001624 def test_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001625 status = self.verify_cluster_status(onos_instances=onos_instances)
1626 assert_equal(status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001627 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001628 self.subscriber.num_subscribers = 5
1629 self.subscriber.num_channels = 10
1630 for i in [0,1]:
1631 if i == 1:
1632 cord_test_onos_restart()
1633 time.sleep(45)
1634 status = self.verify_cluster_status(onos_instances=onos_instances)
1635 assert_equal(status, True)
1636 log.info('Verifying cord subscriber functionality after cluster restart')
1637 else:
1638 log.info('Verifying cord subscriber functionality before cluster restart')
1639 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1640 num_channels = self.subscriber.num_channels,
1641 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1642 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1643 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1644 self.subscriber.num_channels))
1645 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001646 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001647
ChetanGaonker689b3862016-10-17 16:25:01 -07001648 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1649 def test_cluster_with_cord_subscriber_join_next_channel_before_and_after_cluster_mastership_is_withdrawn(self,onos_instances=ONOS_INSTANCES):
1650 status = self.verify_cluster_status(onos_instances=onos_instances)
1651 assert_equal(status, True)
1652 master,standbys = self.get_cluster_current_master_standbys()
1653 assert_equal(len(standbys),(onos_instances-1))
1654 self.subscriber.setUpClass(controller=master)
1655 self.subscriber.num_subscribers = 5
1656 self.subscriber.num_channels = 10
1657 for i in [0,1]:
1658 if i == 1:
1659 status=self.withdraw_cluster_current_mastership(master_ip=master)
1660 asser_equal(status, True)
1661 master,standbys = self.get_cluster_current_master_standbys()
1662 log.info('verifying cord subscriber functionality after cluster current master withdraw mastership')
1663 else:
1664 log.info('verifying cord subscriber functionality before cluster master withdraw mastership')
1665 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1666 num_channels = self.subscriber.num_channels,
1667 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1668 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1669 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1670 self.subscriber.num_channels),controller=master)
1671 assert_equal(test_status, True)
1672 self.subscriber.tearDownClass(controller=master)
1673
1674 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1675 def test_cluster_with_cord_subscriber_join_recv_traffic_from_10channels_and_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001676 status = self.verify_cluster_status(onos_instances=onos_instances)
1677 assert_equal(status, True)
1678 master, standbys = self.get_cluster_current_master_standbys()
1679 assert_equal(len(standbys),(onos_instances-1))
1680 onos_names_ips = self.get_cluster_container_names_ips()
1681 member_onos_name = onos_names_ips[standbys[0]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001682 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001683 num_subscribers = 1
1684 num_channels = 10
1685 for i in [0,1]:
1686 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -08001687 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001688 time.sleep(30)
ChetanGaonker689b3862016-10-17 16:25:01 -07001689 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001690 assert_equal(status, True)
1691 log.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0])
1692 else:
1693 log.info('Verifying cord subscriber functionality before cluster member %s is down'%standbys[0])
1694 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1695 num_channels = num_channels,
1696 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify,
1697 self.subscriber.igmp_verify, self.subscriber.traffic_verify),
1698 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
ChetanGaonker689b3862016-10-17 16:25:01 -07001699 negative_subscriber_auth = 'all',controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001700 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001701 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001702
ChetanGaonker689b3862016-10-17 16:25:01 -07001703 def test_cluster_with_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001704 status = self.verify_cluster_status(onos_instances=onos_instances)
1705 assert_equal(status, True)
1706 master, standbys = self.get_cluster_current_master_standbys()
1707 assert_equal(len(standbys),(onos_instances-1))
1708 onos_names_ips = self.get_cluster_container_names_ips()
1709 member1_onos_name = onos_names_ips[standbys[0]]
1710 member2_onos_name = onos_names_ips[standbys[1]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001711 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001712 num_subscribers = 1
1713 num_channels = 10
1714 for i in [0,1]:
1715 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -08001716 cord_test_onos_shutdown(node = standbys[0])
1717 cord_test_onos_shutdown(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -07001718 time.sleep(60)
1719 status = self.verify_cluster_status(onos_instances=onos_instances-2)
1720 assert_equal(status, True)
1721 log.info('Verifying cord subscriber funtionality after cluster two members %s and %s down'%(standbys[0],standbys[1]))
1722 else:
1723 log.info('Verifying cord subscriber funtionality before cluster two members %s and %s down'%(standbys[0],standbys[1]))
1724 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1725 num_channels = num_channels,
1726 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1727 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1728 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
1729 negative_subscriber_auth = 'all')
1730 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001731 self.subscriber.tearDownClass(controller=master)
1732
1733 #pass
1734 def test_cluster_with_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1735 status = self.verify_cluster_status(onos_instances=onos_instances)
1736 assert_equal(status, True)
1737 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1738 for device in device_dict.keys():
1739 log.info("Device is %s"%device_dict[device])
1740 assert_not_equal(device_dict[device]['master'],'none')
1741 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1742 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1743
1744 #pass
1745 def test_cluster_state_in_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1746 status = self.verify_cluster_status(onos_instances=onos_instances)
1747 assert_equal(status, True)
1748 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1749 cluster_ips = self.get_cluster_current_member_ips()
1750 for ip in cluster_ips:
1751 device_dict= self.get_cluster_current_master_standbys_of_connected_devices(controller = ip)
1752 assert_equal(len(device_dict.keys()),onos_instances)
1753 for device in device_dict.keys():
1754 log.info("Device is %s"%device_dict[device])
1755 assert_not_equal(device_dict[device]['master'],'none')
1756 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1757 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1758
1759 #pass
1760 def test_cluster_verifying_multiple_ovs_switches_after_master_is_restarted(self,onos_instances = ONOS_INSTANCES):
1761 status = self.verify_cluster_status(onos_instances=onos_instances)
1762 assert_equal(status, True)
1763 onos_names_ips = self.get_cluster_container_names_ips()
1764 master_count = self.get_number_of_devices_of_master()
1765 log.info('Master count information is %s'%master_count)
1766 total_devices = 0
1767 for master in master_count.keys():
1768 total_devices += master_count[master]['size']
1769 if master_count[master]['size'] != 0:
1770 restart_ip = master
1771 assert_equal(total_devices,onos_instances)
1772 member_onos_name = onos_names_ips[restart_ip]
1773 log.info('Restarting cluster member %s having ip %s'%(member_onos_name,restart_ip))
1774 Container(member_onos_name, Onos.IMAGE).restart()
1775 time.sleep(40)
1776 master_count = self.get_number_of_devices_of_master()
1777 log.info('Master count information after restart is %s'%master_count)
1778 total_devices = 0
1779 for master in master_count.keys():
1780 total_devices += master_count[master]['size']
1781 if master == restart_ip:
1782 assert_equal(master_count[master]['size'], 0)
1783 assert_equal(total_devices,onos_instances)
1784
1785 #pass
1786 def test_cluster_verifying_multiple_ovs_switches_with_one_master_down(self,onos_instances = ONOS_INSTANCES):
1787 status = self.verify_cluster_status(onos_instances=onos_instances)
1788 assert_equal(status, True)
1789 onos_names_ips = self.get_cluster_container_names_ips()
1790 master_count = self.get_number_of_devices_of_master()
1791 log.info('Master count information is %s'%master_count)
1792 total_devices = 0
1793 for master in master_count.keys():
1794 total_devices += master_count[master]['size']
1795 if master_count[master]['size'] != 0:
1796 restart_ip = master
1797 assert_equal(total_devices,onos_instances)
1798 master_onos_name = onos_names_ips[restart_ip]
1799 log.info('Shutting down cluster member %s having ip %s'%(master_onos_name,restart_ip))
1800 Container(master_onos_name, Onos.IMAGE).kill()
1801 time.sleep(40)
1802 for ip in onos_names_ips.keys():
1803 if ip != restart_ip:
1804 controller_ip = ip
1805 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=controller_ip)
1806 assert_equal(status, True)
1807 master_count = self.get_number_of_devices_of_master(controller=controller_ip)
1808 log.info('Master count information after restart is %s'%master_count)
1809 total_devices = 0
1810 for master in master_count.keys():
1811 total_devices += master_count[master]['size']
1812 if master == restart_ip:
1813 assert_equal(master_count[master]['size'], 0)
1814 assert_equal(total_devices,onos_instances)
1815
1816 #pass
1817 def test_cluster_verifying_multiple_ovs_switches_with_current_master_withdrawing_mastership(self,onos_instances = ONOS_INSTANCES):
1818 status = self.verify_cluster_status(onos_instances=onos_instances)
1819 assert_equal(status, True)
1820 master_count = self.get_number_of_devices_of_master()
1821 log.info('Master count information is %s'%master_count)
1822 total_devices = 0
1823 for master in master_count.keys():
1824 total_devices += int(master_count[master]['size'])
1825 if master_count[master]['size'] != 0:
1826 master_ip = master
1827 log.info('Devices of master %s are %s'%(master_count[master]['devices'],master))
1828 device_id = str(master_count[master]['devices'][0])
1829 device_count = master_count[master]['size']
1830 assert_equal(total_devices,onos_instances)
1831 log.info('Withdrawing mastership of device %s for controller %s'%(device_id,master_ip))
1832 status=self.withdraw_cluster_current_mastership(master_ip=master_ip,device_id = device_id)
1833 assert_equal(status, True)
1834 master_count = self.get_number_of_devices_of_master()
1835 log.info('Master count information after cluster mastership withdraw is %s'%master_count)
1836 total_devices = 0
1837 for master in master_count.keys():
1838 total_devices += int(master_count[master]['size'])
1839 if master == master_ip:
1840 assert_equal(master_count[master]['size'], device_count-1)
1841 assert_equal(total_devices,onos_instances)
1842
1843 #pass
1844 def test_cluster_verifying_multiple_ovs_switches_and_restarting_cluster(self,onos_instances = ONOS_INSTANCES):
1845 status = self.verify_cluster_status(onos_instances=onos_instances)
1846 assert_equal(status, True)
1847 master_count = self.get_number_of_devices_of_master()
1848 log.info('Master count information is %s'%master_count)
1849 total_devices = 0
1850 for master in master_count.keys():
1851 total_devices += master_count[master]['size']
1852 assert_equal(total_devices,onos_instances)
1853 log.info('Restarting cluster')
1854 cord_test_onos_restart()
1855 time.sleep(60)
1856 master_count = self.get_number_of_devices_of_master()
1857 log.info('Master count information after restart is %s'%master_count)
1858 total_devices = 0
1859 for master in master_count.keys():
1860 total_devices += master_count[master]['size']
1861 assert_equal(total_devices,onos_instances)