blob: af05873ee6036e35893939e67f9b75246d60a932 [file] [log] [blame]
ChetanGaonker2099d722016-10-07 15:16:58 -07001#copyright 2016-present Ciena Corporation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14#
15import unittest
16from nose.tools import *
17from scapy.all import *
18from OnosCtrl import OnosCtrl, get_mac
19from OltConfig import OltConfig
20from socket import socket
21from OnosFlowCtrl import OnosFlowCtrl
22from nose.twistedtools import reactor, deferred
23from twisted.internet import defer
24from onosclidriver import OnosCliDriver
25from CordContainer import Container, Onos, Quagga
A.R Karthick2560f042016-11-30 14:38:52 -080026from CordTestServer import cord_test_onos_restart, cord_test_onos_shutdown, cord_test_onos_add_cluster, cord_test_quagga_restart, cord_test_restart_cluster
ChetanGaonker2099d722016-10-07 15:16:58 -070027from portmaps import g_subscriber_port_map
28from scapy.all import *
29import time, monotonic
30import threading
31from threading import current_thread
32from Cluster import *
33from EapTLS import TLSAuthTest
34from ACL import ACLTest
A R Karthick1f908202016-11-16 17:32:20 -080035from OnosLog import OnosLog
36from CordLogger import CordLogger
ChetanGaonker2099d722016-10-07 15:16:58 -070037import os
38import json
39import random
40import collections
41log.setLevel('INFO')
42
A R Karthick1f908202016-11-16 17:32:20 -080043class cluster_exchange(CordLogger):
ChetanGaonker2099d722016-10-07 15:16:58 -070044 test_path = os.path.dirname(os.path.realpath(__file__))
45 onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
46 mac = RandMAC()._fix()
47 flows_eth = Ether(src = RandMAC()._fix(), dst = RandMAC()._fix())
48 igmp_eth = Ether(dst = '01:00:5e:00:00:16', type = ETH_P_IP)
49 igmp_ip = IP(dst = '224.0.0.22')
50 ONOS_INSTANCES = 3
51 V_INF1 = 'veth0'
52 TLS_TIMEOUT = 100
53 device_id = 'of:' + get_mac()
54 igmp = cluster_igmp()
55 igmp_groups = igmp.mcast_ip_range(start_ip = '224.1.8.10',end_ip = '224.1.10.49')
56 igmp_sources = igmp.source_ip_range(start_ip = '38.24.29.35',end_ip='38.24.35.56')
57 tls = cluster_tls()
58 flows = cluster_flows()
59 proxyarp = cluster_proxyarp()
60 vrouter = cluster_vrouter()
61 acl = cluster_acl()
62 dhcprelay = cluster_dhcprelay()
63 subscriber = cluster_subscriber()
A R Karthick3b2e0372016-12-14 17:37:43 -080064 testcaseLoggers = ('test_cluster_controller_restarts', 'test_cluster_graceful_controller_restarts',
65 'test_cluster_single_controller_restarts', 'test_cluster_restarts')
A R Karthick72e99a82017-01-23 11:01:27 -080066 iterations = int(os.getenv('ITERATIONS', 10))
A R Karthick1f908202016-11-16 17:32:20 -080067
68 def setUp(self):
69 if self._testMethodName not in self.testcaseLoggers:
70 super(cluster_exchange, self).setUp()
71
72 def tearDown(self):
73 if self._testMethodName not in self.testcaseLoggers:
74 super(cluster_exchange, self).tearDown()
ChetanGaonker2099d722016-10-07 15:16:58 -070075
76 def get_controller(self):
77 controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
78 controller = controller.split(',')[0]
79 return controller
80
A R Karthick1f908202016-11-16 17:32:20 -080081 @classmethod
82 def get_controllers(cls):
83 controllers = os.getenv('ONOS_CONTROLLER_IP') or ''
84 return controllers.split(',')
85
A R Karthick6cc8b812016-12-09 10:24:40 -080086 def cliEnter(self, controller = None):
ChetanGaonker2099d722016-10-07 15:16:58 -070087 retries = 0
A R Karthick6cc8b812016-12-09 10:24:40 -080088 while retries < 30:
89 self.cli = OnosCliDriver(controller = controller, connect = True)
ChetanGaonker2099d722016-10-07 15:16:58 -070090 if self.cli.handle:
91 break
92 else:
93 retries += 1
94 time.sleep(2)
95
96 def cliExit(self):
97 self.cli.disconnect()
98
A R Karthick1f908202016-11-16 17:32:20 -080099 def get_leader(self, controller = None):
100 self.cliEnter(controller = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800101 try:
102 result = json.loads(self.cli.leaders(jsonFormat = True))
103 except:
104 result = None
105
A R Karthick1f908202016-11-16 17:32:20 -0800106 if result is None:
107 log.info('Leaders command failure for controller %s' %controller)
108 else:
109 log.info('Leaders returned: %s' %result)
110 self.cliExit()
111 return result
112
A R Karthick3b2e0372016-12-14 17:37:43 -0800113 def onos_shutdown(self, controller = None):
114 status = True
115 self.cliEnter(controller = controller)
116 try:
117 self.cli.shutdown(timeout = 10)
118 except:
119 log.info('Graceful shutdown of ONOS failed for controller: %s' %controller)
120 status = False
121
122 self.cliExit()
123 return status
124
A R Karthicke14fc022016-12-08 14:50:29 -0800125 def log_set(self, level = None, app = 'org.onosproject', controllers = None):
126 CordLogger.logSet(level = level, app = app, controllers = controllers, forced = True)
A R Karthickef1232d2016-12-07 09:18:15 -0800127
A R Karthick1f908202016-11-16 17:32:20 -0800128 def get_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800129 result_map = {}
130 if controller is None:
131 controller = self.get_controller()
A R Karthick1f908202016-11-16 17:32:20 -0800132 if type(controller) in [ list, tuple ]:
133 for c in controller:
134 leaders = self.get_leader(controller = c)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800135 result_map[c] = leaders
A R Karthick1f908202016-11-16 17:32:20 -0800136 else:
137 leaders = self.get_leader(controller = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800138 result_map[controller] = leaders
139 return result_map
A R Karthick1f908202016-11-16 17:32:20 -0800140
A R Karthickec2db322016-11-17 15:06:01 -0800141 def verify_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800142 leaders_map = self.get_leaders(controller = controller)
143 failed = [ k for k,v in leaders_map.items() if v == None ]
A R Karthickec2db322016-11-17 15:06:01 -0800144 return failed
145
ChetanGaonker2099d722016-10-07 15:16:58 -0700146 def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
147 tries = 0
148 try:
149 self.cliEnter(controller = controller)
150 while tries <= 10:
151 cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
152 if cluster_summary:
153 log.info("cluster 'summary' command output is %s"%cluster_summary)
154 nodes = cluster_summary['nodes']
155 if verify:
156 if nodes == onos_instances:
157 self.cliExit()
158 return True
159 else:
160 tries += 1
161 time.sleep(1)
162 else:
163 if nodes >= onos_instances:
164 self.cliExit()
165 return True
166 else:
167 tries += 1
168 time.sleep(1)
169 else:
170 tries += 1
171 time.sleep(1)
172 self.cliExit()
173 return False
174 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700175 raise Exception('Failed to get cluster members')
176 return False
ChetanGaonker2099d722016-10-07 15:16:58 -0700177
A.R Karthick45ab3e12016-11-30 11:25:51 -0800178 def get_cluster_current_member_ips(self, controller = None, nodes_filter = None):
ChetanGaonker2099d722016-10-07 15:16:58 -0700179 tries = 0
180 cluster_ips = []
181 try:
182 self.cliEnter(controller = controller)
183 while tries <= 10:
184 cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
185 if cluster_nodes:
186 log.info("cluster 'nodes' output is %s"%cluster_nodes)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800187 if nodes_filter:
188 cluster_nodes = nodes_filter(cluster_nodes)
ChetanGaonker2099d722016-10-07 15:16:58 -0700189 cluster_ips = map(lambda c: c['id'], cluster_nodes)
190 self.cliExit()
191 cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
192 return cluster_ips
193 else:
194 tries += 1
195 self.cliExit()
196 return cluster_ips
197 except:
198 raise Exception('Failed to get cluster members')
199 return cluster_ips
200
ChetanGaonker689b3862016-10-17 16:25:01 -0700201 def get_cluster_container_names_ips(self,controller=None):
A R Karthick1f908202016-11-16 17:32:20 -0800202 onos_names_ips = {}
A R Karthick0f3f25b2016-12-15 09:50:57 -0800203 controllers = self.get_controllers()
204 i = 0
205 for controller in controllers:
206 if i == 0:
207 name = Onos.NAME
208 else:
209 name = '{}-{}'.format(Onos.NAME, i+1)
210 onos_names_ips[controller] = name
211 onos_names_ips[name] = controller
212 i += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700213 return onos_names_ips
A R Karthick0f3f25b2016-12-15 09:50:57 -0800214 # onos_ips = self.get_cluster_current_member_ips(controller=controller)
215 # onos_names_ips[onos_ips[0]] = Onos.NAME
216 # onos_names_ips[Onos.NAME] = onos_ips[0]
217 # for i in range(1,len(onos_ips)):
218 # name = '{0}-{1}'.format(Onos.NAME,i+1)
219 # onos_names_ips[onos_ips[i]] = name
220 # onos_names_ips[name] = onos_ips[i]
221
222 # return onos_names_ips
ChetanGaonker2099d722016-10-07 15:16:58 -0700223
224 #identifying current master of a connected device, not tested
225 def get_cluster_current_master_standbys(self,controller=None,device_id=device_id):
226 master = None
227 standbys = []
228 tries = 0
229 try:
230 cli = self.cliEnter(controller = controller)
231 while tries <= 10:
232 roles = json.loads(self.cli.roles(jsonFormat = True))
233 log.info("cluster 'roles' command output is %s"%roles)
234 if roles:
235 for device in roles:
236 log.info('Verifying device info in line %s'%device)
237 if device['id'] == device_id:
238 master = str(device['master'])
239 standbys = map(lambda d: str(d), device['standbys'])
240 log.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
241 self.cliExit()
242 return master, standbys
243 self.cliExit()
244 return master, standbys
245 else:
246 tries += 1
ChetanGaonker689b3862016-10-17 16:25:01 -0700247 time.sleep(1)
248 self.cliExit()
249 return master,standbys
250 except:
251 raise Exception('Failed to get cluster members')
252 return master,standbys
253
254 def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
255 ''' returns master and standbys of all the connected devices to ONOS cluster instance'''
256 device_dict = {}
257 tries = 0
258 try:
259 cli = self.cliEnter(controller = controller)
260 while tries <= 10:
261 device_dict = {}
262 roles = json.loads(self.cli.roles(jsonFormat = True))
263 log.info("cluster 'roles' command output is %s"%roles)
264 if roles:
265 for device in roles:
266 device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
267 for i in range(len(device_dict[device['id']]['standbys'])):
268 device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
269 log.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
270 self.cliExit()
271 return device_dict
272 else:
273 tries += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700274 time.sleep(1)
275 self.cliExit()
ChetanGaonker689b3862016-10-17 16:25:01 -0700276 return device_dict
277 except:
278 raise Exception('Failed to get cluster members')
279 return device_dict
280
281 #identify current master of a connected device, not tested
282 def get_cluster_connected_devices(self,controller=None):
283 '''returns all the devices connected to ONOS cluster'''
284 device_list = []
285 tries = 0
286 try:
287 cli = self.cliEnter(controller = controller)
288 while tries <= 10:
289 device_list = []
290 devices = json.loads(self.cli.devices(jsonFormat = True))
291 log.info("cluster 'devices' command output is %s"%devices)
292 if devices:
293 for device in devices:
294 log.info('device id is %s'%device['id'])
295 device_list.append(str(device['id']))
296 self.cliExit()
297 return device_list
298 else:
299 tries += 1
300 time.sleep(1)
301 self.cliExit()
302 return device_list
303 except:
304 raise Exception('Failed to get cluster members')
305 return device_list
306
307 def get_number_of_devices_of_master(self,controller=None):
308 '''returns master-device pairs, which master having what devices'''
309 master_count = {}
310 try:
311 cli = self.cliEnter(controller = controller)
312 masters = json.loads(self.cli.masters(jsonFormat = True))
313 if masters:
314 for master in masters:
315 master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
316 return master_count
317 else:
318 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700319 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700320 raise Exception('Failed to get cluster members')
321 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700322
323 def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None):
324 if new_master is None: return False
ChetanGaonker689b3862016-10-17 16:25:01 -0700325 self.cliEnter(controller=controller)
ChetanGaonker2099d722016-10-07 15:16:58 -0700326 cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
327 command = self.cli.command(cmd = cmd, jsonFormat = False)
328 self.cliExit()
329 time.sleep(60)
330 master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
331 assert_equal(master,new_master)
332 log.info('Cluster master changed to %s successfully'%new_master)
333
ChetanGaonker689b3862016-10-17 16:25:01 -0700334 def withdraw_cluster_current_mastership(self,master_ip=None,device_id=device_id,controller=None):
335 '''current master looses its mastership and hence new master will be elected'''
336 self.cliEnter(controller=controller)
337 cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
338 command = self.cli.command(cmd = cmd, jsonFormat = False)
339 self.cliExit()
340 time.sleep(60)
341 new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
342 assert_not_equal(new_master_ip,master_ip)
343 log.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
344 log.info('Cluster new master is %s'%new_master_ip)
345 return True
346
A R Karthick3b2e0372016-12-14 17:37:43 -0800347 def cluster_controller_restarts(self, graceful = False):
A R Karthick1f908202016-11-16 17:32:20 -0800348 controllers = self.get_controllers()
349 ctlr_len = len(controllers)
350 if ctlr_len <= 1:
351 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
352 assert_greater(ctlr_len, 1)
353
354 #this call would verify the cluster for once
355 onos_map = self.get_cluster_container_names_ips()
356
A R Karthick2a70a2f2016-12-16 14:40:16 -0800357 def check_exception(iteration, controller = None):
A R Karthick1f908202016-11-16 17:32:20 -0800358 adjacent_controller = None
359 adjacent_controllers = None
360 if controller:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800361 adjacent_controllers = list(set(controllers) - set([controller]))
362 adjacent_controller = adjacent_controllers[0]
A R Karthick1f908202016-11-16 17:32:20 -0800363 for node in controllers:
364 onosLog = OnosLog(host = node)
365 ##check the logs for storage exception
366 _, output = onosLog.get_log(('ERROR', 'Exception',))
A R Karthickec2db322016-11-17 15:06:01 -0800367 if output and output.find('StorageException$Timeout') >= 0:
368 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
369 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
370 log.info('\n' + '-' * 50 + '\n')
A R Karthick1f908202016-11-16 17:32:20 -0800371 log.info('%s' %output)
A R Karthickec2db322016-11-17 15:06:01 -0800372 log.info('\n' + '-' * 50 + '\n')
373 failed = self.verify_leaders(controllers)
374 if failed:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800375 log.info('Leaders command failed on nodes: %s' %failed)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800376 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800377 CordLogger.archive_results(self._testMethodName,
378 controllers = controllers,
379 iteration = 'FAILED')
A R Karthickec2db322016-11-17 15:06:01 -0800380 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800381 return controller
382
383 try:
A R Karthickec2db322016-11-17 15:06:01 -0800384 ips = self.get_cluster_current_member_ips(controller = adjacent_controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800385 log.info('ONOS cluster formed with controllers: %s' %ips)
A R Karthick1f908202016-11-16 17:32:20 -0800386 st = True
387 except:
388 st = False
389
A R Karthickec2db322016-11-17 15:06:01 -0800390 failed = self.verify_leaders(controllers)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800391 if failed:
392 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick3396ec42017-01-11 17:12:13 -0800393 CordLogger.archive_results(self._testMethodName,
394 controllers = controllers,
395 iteration = 'FAILED')
A R Karthick1f908202016-11-16 17:32:20 -0800396 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800397 if st is False:
398 log.info('No storage exception and ONOS cluster was not formed successfully')
399 else:
400 controller = None
401
402 return controller
403
404 next_controller = None
A R Karthick5af23712017-01-20 09:49:24 -0800405 tries = self.iterations
A R Karthick1f908202016-11-16 17:32:20 -0800406 for num in range(tries):
407 index = num % ctlr_len
408 #index = random.randrange(0, ctlr_len)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800409 controller_name = onos_map[controllers[index]] if next_controller is None else onos_map[next_controller]
410 controller = onos_map[controller_name]
411 log.info('ITERATION: %d. Restarting Controller %s' %(num + 1, controller_name))
A R Karthick1f908202016-11-16 17:32:20 -0800412 try:
A R Karthickef1232d2016-12-07 09:18:15 -0800413 #enable debug log for the other controllers before restarting this controller
A R Karthicke14fc022016-12-08 14:50:29 -0800414 adjacent_controllers = list( set(controllers) - set([controller]) )
415 self.log_set(controllers = adjacent_controllers)
416 self.log_set(app = 'io.atomix', controllers = adjacent_controllers)
A R Karthick3b2e0372016-12-14 17:37:43 -0800417 if graceful is True:
A R Karthick0f3f25b2016-12-15 09:50:57 -0800418 log.info('Gracefully shutting down controller: %s' %controller)
A R Karthick3b2e0372016-12-14 17:37:43 -0800419 self.onos_shutdown(controller)
420 cord_test_onos_restart(node = controller, timeout = 0)
A R Karthicke14fc022016-12-08 14:50:29 -0800421 self.log_set(controllers = controller)
422 self.log_set(app = 'io.atomix', controllers = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800423 time.sleep(60)
A R Karthick1f908202016-11-16 17:32:20 -0800424 except:
425 time.sleep(5)
426 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800427
428 #first archive the test case logs for this run
A R Karthick3b2e0372016-12-14 17:37:43 -0800429 CordLogger.archive_results(self._testMethodName,
A R Karthicke8935c62016-12-08 18:17:17 -0800430 controllers = controllers,
431 iteration = 'iteration_{}'.format(num+1))
A R Karthick2a70a2f2016-12-16 14:40:16 -0800432 next_controller = check_exception(num, controller = controller)
A R Karthick1f908202016-11-16 17:32:20 -0800433
A R Karthick3b2e0372016-12-14 17:37:43 -0800434 def test_cluster_controller_restarts(self):
435 '''Test the cluster by repeatedly killing the controllers'''
436 self.cluster_controller_restarts()
437
438 def test_cluster_graceful_controller_restarts(self):
439 '''Test the cluster by repeatedly restarting the controllers gracefully'''
440 self.cluster_controller_restarts(graceful = True)
441
A.R Karthick45ab3e12016-11-30 11:25:51 -0800442 def test_cluster_single_controller_restarts(self):
443 '''Test the cluster by repeatedly restarting the same controller'''
444 controllers = self.get_controllers()
445 ctlr_len = len(controllers)
446 if ctlr_len <= 1:
447 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
448 assert_greater(ctlr_len, 1)
449
450 #this call would verify the cluster for once
451 onos_map = self.get_cluster_container_names_ips()
452
A R Karthick2a70a2f2016-12-16 14:40:16 -0800453 def check_exception(iteration, controller, inclusive = False):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800454 adjacent_controllers = list(set(controllers) - set([controller]))
455 adjacent_controller = adjacent_controllers[0]
456 controller_list = adjacent_controllers if inclusive == False else controllers
457 storage_exceptions = []
458 for node in controller_list:
459 onosLog = OnosLog(host = node)
460 ##check the logs for storage exception
461 _, output = onosLog.get_log(('ERROR', 'Exception',))
462 if output and output.find('StorageException$Timeout') >= 0:
463 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
464 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
465 log.info('\n' + '-' * 50 + '\n')
466 log.info('%s' %output)
467 log.info('\n' + '-' * 50 + '\n')
468 storage_exceptions.append(node)
469
470 failed = self.verify_leaders(controller_list)
471 if failed:
472 log.info('Leaders command failed on nodes: %s' %failed)
473 if storage_exceptions:
474 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800475 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800476 CordLogger.archive_results('test_cluster_single_controller_restarts',
477 controllers = controllers,
478 iteration = 'FAILED')
A.R Karthick45ab3e12016-11-30 11:25:51 -0800479 assert_equal(len(failed), 0)
480 return controller
481
482 for ctlr in controller_list:
483 ips = self.get_cluster_current_member_ips(controller = ctlr,
484 nodes_filter = \
485 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
486 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
487 if controller in ips and inclusive is False:
488 log.info('Controller %s still ACTIVE on Node %s after it was shutdown' %(controller, ctlr))
489 if controller not in ips and inclusive is True:
A R Karthick6cc8b812016-12-09 10:24:40 -0800490 log.info('Controller %s still INACTIVE on Node %s after it was restarted' %(controller, ctlr))
A.R Karthick45ab3e12016-11-30 11:25:51 -0800491
492 return controller
493
A R Karthick5af23712017-01-20 09:49:24 -0800494 tries = self.iterations
A.R Karthick45ab3e12016-11-30 11:25:51 -0800495 #chose a random controller for shutdown/restarts
496 controller = controllers[random.randrange(0, ctlr_len)]
497 controller_name = onos_map[controller]
A R Karthick6cc8b812016-12-09 10:24:40 -0800498 ##enable the log level for the controllers
499 self.log_set(controllers = controllers)
500 self.log_set(app = 'io.atomix', controllers = controllers)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800501 for num in range(tries):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800502 log.info('ITERATION: %d. Shutting down Controller %s' %(num + 1, controller_name))
503 try:
A R Karthick3b2e0372016-12-14 17:37:43 -0800504 cord_test_onos_shutdown(node = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800505 time.sleep(20)
506 except:
507 time.sleep(5)
508 continue
509 #check for exceptions on the adjacent nodes
A R Karthick2a70a2f2016-12-16 14:40:16 -0800510 check_exception(num, controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800511 #Now restart the controller back
512 log.info('Restarting back the controller %s' %controller_name)
A R Karthick3b2e0372016-12-14 17:37:43 -0800513 cord_test_onos_restart(node = controller)
A R Karthick6cc8b812016-12-09 10:24:40 -0800514 self.log_set(controllers = controller)
515 self.log_set(app = 'io.atomix', controllers = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800516 time.sleep(60)
A R Karthicke8935c62016-12-08 18:17:17 -0800517 #archive the logs for this run
518 CordLogger.archive_results('test_cluster_single_controller_restarts',
519 controllers = controllers,
520 iteration = 'iteration_{}'.format(num+1))
A R Karthick2a70a2f2016-12-16 14:40:16 -0800521 check_exception(num, controller, inclusive = True)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800522
A.R Karthick2560f042016-11-30 14:38:52 -0800523 def test_cluster_restarts(self):
524 '''Test the cluster by repeatedly restarting the entire cluster'''
525 controllers = self.get_controllers()
526 ctlr_len = len(controllers)
527 if ctlr_len <= 1:
528 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
529 assert_greater(ctlr_len, 1)
530
531 #this call would verify the cluster for once
532 onos_map = self.get_cluster_container_names_ips()
533
A R Karthick2a70a2f2016-12-16 14:40:16 -0800534 def check_exception(iteration):
A.R Karthick2560f042016-11-30 14:38:52 -0800535 controller_list = controllers
536 storage_exceptions = []
537 for node in controller_list:
538 onosLog = OnosLog(host = node)
539 ##check the logs for storage exception
540 _, output = onosLog.get_log(('ERROR', 'Exception',))
541 if output and output.find('StorageException$Timeout') >= 0:
542 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
543 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
544 log.info('\n' + '-' * 50 + '\n')
545 log.info('%s' %output)
546 log.info('\n' + '-' * 50 + '\n')
547 storage_exceptions.append(node)
548
549 failed = self.verify_leaders(controller_list)
550 if failed:
551 log.info('Leaders command failed on nodes: %s' %failed)
552 if storage_exceptions:
553 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800554 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800555 CordLogger.archive_results('test_cluster_restarts',
556 controllers = controllers,
557 iteration = 'FAILED')
A.R Karthick2560f042016-11-30 14:38:52 -0800558 assert_equal(len(failed), 0)
559 return
560
561 for ctlr in controller_list:
562 ips = self.get_cluster_current_member_ips(controller = ctlr,
563 nodes_filter = \
564 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
565 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
A R Karthick2a70a2f2016-12-16 14:40:16 -0800566 if len(ips) != len(controllers):
567 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800568 CordLogger.archive_results('test_cluster_restarts',
569 controllers = controllers,
570 iteration = 'FAILED')
A.R Karthick2560f042016-11-30 14:38:52 -0800571 assert_equal(len(ips), len(controllers))
572
A R Karthick5af23712017-01-20 09:49:24 -0800573 tries = self.iterations
A.R Karthick2560f042016-11-30 14:38:52 -0800574 for num in range(tries):
575 log.info('ITERATION: %d. Restarting cluster with controllers at %s' %(num+1, controllers))
576 try:
577 cord_test_restart_cluster()
A R Karthick6cc8b812016-12-09 10:24:40 -0800578 self.log_set(controllers = controllers)
579 self.log_set(app = 'io.atomix', controllers = controllers)
A.R Karthick2560f042016-11-30 14:38:52 -0800580 log.info('Delaying before verifying cluster status')
581 time.sleep(60)
582 except:
583 time.sleep(10)
584 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800585
586 #archive the logs for this run before verification
587 CordLogger.archive_results('test_cluster_restarts',
588 controllers = controllers,
589 iteration = 'iteration_{}'.format(num+1))
A.R Karthick2560f042016-11-30 14:38:52 -0800590 #check for exceptions on the adjacent nodes
A R Karthick2a70a2f2016-12-16 14:40:16 -0800591 check_exception(num)
A.R Karthick2560f042016-11-30 14:38:52 -0800592
ChetanGaonker2099d722016-10-07 15:16:58 -0700593 #pass
ChetanGaonker689b3862016-10-17 16:25:01 -0700594 def test_cluster_formation_and_verification(self,onos_instances = ONOS_INSTANCES):
595 status = self.verify_cluster_status(onos_instances = onos_instances)
596 assert_equal(status, True)
597 log.info('Cluster exists with %d ONOS instances'%onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -0700598
599 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700600 def test_cluster_adding_members(self, add = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700601 status = self.verify_cluster_status(onos_instances = onos_instances)
602 assert_equal(status, True)
603 onos_ips = self.get_cluster_current_member_ips()
604 onos_instances = len(onos_ips)+add
605 log.info('Adding %d nodes to the ONOS cluster' %add)
606 cord_test_onos_add_cluster(count = add)
607 status = self.verify_cluster_status(onos_instances=onos_instances)
608 assert_equal(status, True)
609
ChetanGaonker689b3862016-10-17 16:25:01 -0700610 def test_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700611 status = self.verify_cluster_status(onos_instances = onos_instances)
612 assert_equal(status, True)
613 master, standbys = self.get_cluster_current_master_standbys()
614 assert_equal(len(standbys),(onos_instances-1))
615 onos_names_ips = self.get_cluster_container_names_ips()
616 master_onos_name = onos_names_ips[master]
617 log.info('Removing cluster current master %s'%(master))
A R Karthick3b2e0372016-12-14 17:37:43 -0800618 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700619 time.sleep(60)
620 onos_instances -= 1
621 status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0])
622 assert_equal(status, True)
623 new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
624 assert_not_equal(master,new_master)
ChetanGaonker689b3862016-10-17 16:25:01 -0700625 log.info('Successfully removed clusters master instance')
ChetanGaonker2099d722016-10-07 15:16:58 -0700626
ChetanGaonker689b3862016-10-17 16:25:01 -0700627 def test_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700628 status = self.verify_cluster_status(onos_instances = onos_instances)
629 assert_equal(status, True)
630 master, standbys = self.get_cluster_current_master_standbys()
631 assert_equal(len(standbys),(onos_instances-1))
632 onos_names_ips = self.get_cluster_container_names_ips()
633 member_onos_name = onos_names_ips[standbys[0]]
634 log.info('Removing cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800635 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700636 time.sleep(60)
637 onos_instances -= 1
638 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
639 assert_equal(status, True)
640
ChetanGaonker689b3862016-10-17 16:25:01 -0700641 def test_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700642 status = self.verify_cluster_status(onos_instances = onos_instances)
643 assert_equal(status, True)
644 master, standbys = self.get_cluster_current_master_standbys()
645 assert_equal(len(standbys),(onos_instances-1))
646 onos_names_ips = self.get_cluster_container_names_ips()
647 member1_onos_name = onos_names_ips[standbys[0]]
648 member2_onos_name = onos_names_ips[standbys[1]]
649 log.info('Removing cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800650 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700651 log.info('Removing cluster member %s'%standbys[1])
A R Karthick3b2e0372016-12-14 17:37:43 -0800652 cord_test_onos_shutdown(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700653 time.sleep(60)
654 onos_instances = onos_instances - 2
655 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
656 assert_equal(status, True)
657
ChetanGaonker689b3862016-10-17 16:25:01 -0700658 def test_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700659 status = self.verify_cluster_status(onos_instances = onos_instances)
660 assert_equal(status, True)
661 master, standbys = self.get_cluster_current_master_standbys()
662 assert_equal(len(standbys),(onos_instances-1))
663 onos_names_ips = self.get_cluster_container_names_ips()
664 for i in range(remove):
665 member_onos_name = onos_names_ips[standbys[i]]
666 log.info('Removing onos container with name %s'%standbys[i])
A R Karthick3b2e0372016-12-14 17:37:43 -0800667 cord_test_onos_shutdown(node = standbys[i])
ChetanGaonker2099d722016-10-07 15:16:58 -0700668 time.sleep(60)
669 onos_instances = onos_instances - remove
670 status = self.verify_cluster_status(onos_instances = onos_instances, controller=master)
671 assert_equal(status, True)
672
673 #nottest test cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700674 def test_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES , add = 2, remove = 2):
ChetanGaonker2099d722016-10-07 15:16:58 -0700675 status = self.verify_cluster_status(onos_instances = onos_instances)
676 assert_equal(status, True)
677 onos_ips = self.get_cluster_current_member_ips()
678 onos_instances = len(onos_ips)+add
679 log.info('Adding %d ONOS instances to the cluster'%add)
680 cord_test_onos_add_cluster(count = add)
681 status = self.verify_cluster_status(onos_instances=onos_instances)
682 assert_equal(status, True)
683 log.info('Removing %d ONOS instances from the cluster'%remove)
684 for i in range(remove):
685 name = '{}-{}'.format(Onos.NAME, onos_instances - i)
686 log.info('Removing onos container with name %s'%name)
687 cord_test_onos_shutdown(node = name)
688 time.sleep(60)
689 onos_instances = onos_instances-remove
690 status = self.verify_cluster_status(onos_instances=onos_instances)
691 assert_equal(status, True)
692
693 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700694 def test_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
ChetanGaonker2099d722016-10-07 15:16:58 -0700695 status = self.verify_cluster_status(onos_instances = onos_instances)
696 assert_equal(status, True)
697 onos_ips = self.get_cluster_current_member_ips()
698 onos_instances = onos_instances-remove
699 log.info('Removing %d ONOS instances from the cluster'%remove)
700 for i in range(remove):
701 name = '{}-{}'.format(Onos.NAME, len(onos_ips)-i)
702 log.info('Removing onos container with name %s'%name)
703 cord_test_onos_shutdown(node = name)
704 time.sleep(60)
705 status = self.verify_cluster_status(onos_instances=onos_instances)
706 assert_equal(status, True)
707 log.info('Adding %d ONOS instances to the cluster'%add)
708 cord_test_onos_add_cluster(count = add)
709 onos_instances = onos_instances+add
710 status = self.verify_cluster_status(onos_instances=onos_instances)
711 assert_equal(status, True)
712
ChetanGaonker689b3862016-10-17 16:25:01 -0700713 def test_cluster_restart(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700714 status = self.verify_cluster_status(onos_instances = onos_instances)
715 assert_equal(status, True)
716 log.info('Restarting cluster')
717 cord_test_onos_restart()
718 status = self.verify_cluster_status(onos_instances = onos_instances)
719 assert_equal(status, True)
720
ChetanGaonker689b3862016-10-17 16:25:01 -0700721 def test_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700722 status = self.verify_cluster_status(onos_instances = onos_instances)
723 assert_equal(status, True)
724 master, standbys = self.get_cluster_current_master_standbys()
725 onos_names_ips = self.get_cluster_container_names_ips()
726 master_onos_name = onos_names_ips[master]
727 log.info('Restarting cluster master %s'%master)
A R Karthick3b2e0372016-12-14 17:37:43 -0800728 cord_test_onos_restart(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700729 status = self.verify_cluster_status(onos_instances = onos_instances)
730 assert_equal(status, True)
731 log.info('Cluster came up after master restart as expected')
732
733 #test fail. master changing after restart. Need to check correct behavior.
ChetanGaonker689b3862016-10-17 16:25:01 -0700734 def test_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700735 status = self.verify_cluster_status(onos_instances = onos_instances)
736 assert_equal(status, True)
737 master1, standbys = self.get_cluster_current_master_standbys()
738 onos_names_ips = self.get_cluster_container_names_ips()
739 master_onos_name = onos_names_ips[master1]
A R Karthick3b2e0372016-12-14 17:37:43 -0800740 log.info('Restarting cluster master %s'%master1)
741 cord_test_onos_restart(node = master1)
ChetanGaonker2099d722016-10-07 15:16:58 -0700742 status = self.verify_cluster_status(onos_instances = onos_instances)
743 assert_equal(status, True)
744 master2, standbys = self.get_cluster_current_master_standbys()
745 assert_equal(master1,master2)
746 log.info('Cluster master is same before and after cluster master restart as expected')
747
ChetanGaonker689b3862016-10-17 16:25:01 -0700748 def test_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700749 status = self.verify_cluster_status(onos_instances = onos_instances)
750 assert_equal(status, True)
751 master, standbys = self.get_cluster_current_master_standbys()
752 assert_equal(len(standbys),(onos_instances-1))
753 onos_names_ips = self.get_cluster_container_names_ips()
754 member_onos_name = onos_names_ips[standbys[0]]
755 log.info('Restarting cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800756 cord_test_onos_restart(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700757 status = self.verify_cluster_status(onos_instances = onos_instances)
758 assert_equal(status, True)
759 log.info('Cluster came up as expected after restarting one member')
760
ChetanGaonker689b3862016-10-17 16:25:01 -0700761 def test_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700762 status = self.verify_cluster_status(onos_instances = onos_instances)
763 assert_equal(status, True)
764 master, standbys = self.get_cluster_current_master_standbys()
765 assert_equal(len(standbys),(onos_instances-1))
766 onos_names_ips = self.get_cluster_container_names_ips()
767 member1_onos_name = onos_names_ips[standbys[0]]
768 member2_onos_name = onos_names_ips[standbys[1]]
769 log.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1]))
A R Karthick3b2e0372016-12-14 17:37:43 -0800770 cord_test_onos_restart(node = standbys[0])
771 cord_test_onos_restart(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700772 status = self.verify_cluster_status(onos_instances = onos_instances)
773 assert_equal(status, True)
774 log.info('Cluster came up as expected after restarting two members')
775
ChetanGaonker689b3862016-10-17 16:25:01 -0700776 def test_cluster_state_with_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700777 status = self.verify_cluster_status(onos_instances = onos_instances)
778 assert_equal(status,True)
779 master, standbys = self.get_cluster_current_master_standbys()
780 assert_equal(len(standbys),(onos_instances-1))
781 onos_names_ips = self.get_cluster_container_names_ips()
782 for i in range(members):
783 member_onos_name = onos_names_ips[standbys[i]]
784 log.info('Restarting cluster member %s'%standbys[i])
A R Karthick3b2e0372016-12-14 17:37:43 -0800785 cord_test_onos_restart(node = standbys[i])
ChetanGaonker2099d722016-10-07 15:16:58 -0700786
787 status = self.verify_cluster_status(onos_instances = onos_instances)
788 assert_equal(status, True)
789 log.info('Cluster came up as expected after restarting %d members'%members)
790
ChetanGaonker689b3862016-10-17 16:25:01 -0700791 def test_cluster_state_with_master_change(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700792 status = self.verify_cluster_status(onos_instances=onos_instances)
793 assert_equal(status, True)
794 master, standbys = self.get_cluster_current_master_standbys()
795 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -0700796 log.info('Cluster current master of devices is %s'%master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700797 self.change_master_current_cluster(new_master=standbys[0])
798 log.info('Cluster master changed successfully')
799
800 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700801 def test_cluster_with_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700802 status = self.verify_cluster_status(onos_instances = onos_instances)
803 assert_equal(status, True)
804 onos_ips = self.get_cluster_current_member_ips()
805 self.vrouter.setUpClass()
806 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
807 assert_equal(res, True)
808 for onos_ip in onos_ips:
809 tries = 0
810 flag = False
811 try:
812 self.cliEnter(controller = onos_ip)
813 while tries <= 5:
814 routes = json.loads(self.cli.routes(jsonFormat = True))
815 if routes:
816 assert_equal(len(routes['routes4']), networks)
817 self.cliExit()
818 flag = True
819 break
820 else:
821 tries += 1
822 time.sleep(1)
823 assert_equal(flag, True)
824 except:
825 log.info('Exception occured while checking routes in onos instance %s'%onos_ip)
826 raise
827
828 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700829 def test_cluster_with_vrouter_and_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700830 status = self.verify_cluster_status(onos_instances = onos_instances)
831 assert_equal(status, True)
832 onos_ips = self.get_cluster_current_member_ips()
833 master, standbys = self.get_cluster_current_master_standbys()
834 onos_names_ips = self.get_cluster_container_names_ips()
835 master_onos_name = onos_names_ips[master]
836 self.vrouter.setUpClass()
837 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
838 assert_equal(res,True)
A R Karthick3b2e0372016-12-14 17:37:43 -0800839 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700840 time.sleep(60)
ChetanGaonker689b3862016-10-17 16:25:01 -0700841 log.info('Verifying vrouter traffic after cluster master is down')
ChetanGaonker2099d722016-10-07 15:16:58 -0700842 self.vrouter.vrouter_traffic_verify()
843
844 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700845 def test_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700846 status = self.verify_cluster_status(onos_instances = onos_instances)
847 assert_equal(status, True)
848 onos_ips = self.get_cluster_current_member_ips()
849 master, standbys = self.get_cluster_current_master_standbys()
850 onos_names_ips = self.get_cluster_container_names_ips()
851 master_onos_name = onos_names_ips[master]
852 self.vrouter.setUpClass()
853 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
854 assert_equal(res, True)
855 cord_test_onos_restart()
856 self.vrouter.vrouter_traffic_verify()
857
858 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700859 def test_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700860 status = self.verify_cluster_status(onos_instances = onos_instances)
861 assert_equal(status, True)
862 self.vrouter.setUpClass()
863 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
864 assert_equal(res, True)
865 self.vrouter.vrouter_activate(deactivate=True)
866 time.sleep(15)
867 self.vrouter.vrouter_traffic_verify(positive_test=False)
868 self.vrouter.vrouter_activate(deactivate=False)
869
870 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700871 def test_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700872 status = self.verify_cluster_status(onos_instances = onos_instances)
873 assert_equal(status, True)
874 master, standbys = self.get_cluster_current_master_standbys()
875 onos_names_ips = self.get_cluster_container_names_ips()
876 master_onos_name = onos_names_ips[master]
877 self.vrouter.setUpClass()
878 log.info('Verifying vrouter before master down')
879 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
880 assert_equal(res, True)
881 self.vrouter.vrouter_activate(deactivate=True)
882 log.info('Verifying vrouter traffic after app deactivated')
883 time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down
884 self.vrouter.vrouter_traffic_verify(positive_test=False)
885 log.info('Verifying vrouter traffic after master down')
A R Karthick3b2e0372016-12-14 17:37:43 -0800886 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700887 time.sleep(60)
888 self.vrouter.vrouter_traffic_verify(positive_test=False)
889 self.vrouter.vrouter_activate(deactivate=False)
890
891 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700892 def test_cluster_for_vrouter_app_and_making_member_down(self, networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700893 status = self.verify_cluster_status(onos_instances = onos_instances)
894 assert_equal(status, True)
895 master, standbys = self.get_cluster_current_master_standbys()
896 onos_names_ips = self.get_cluster_container_names_ips()
897 member_onos_name = onos_names_ips[standbys[0]]
898 self.vrouter.setUpClass()
899 log.info('Verifying vrouter before cluster member down')
900 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
901 assert_equal(res, True) # Expecting vrouter should work properly
902 log.info('Verifying vrouter after cluster member down')
A R Karthick3b2e0372016-12-14 17:37:43 -0800903 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700904 time.sleep(60)
905 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
906
907 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700908 def test_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700909 status = self.verify_cluster_status(onos_instances = onos_instances)
910 assert_equal(status, True)
911 master, standbys = self.get_cluster_current_master_standbys()
912 onos_names_ips = self.get_cluster_container_names_ips()
913 member_onos_name = onos_names_ips[standbys[1]]
914 self.vrouter.setUpClass()
915 log.info('Verifying vrouter traffic before cluster member restart')
916 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
917 assert_equal(res, True) # Expecting vrouter should work properly
A R Karthick3b2e0372016-12-14 17:37:43 -0800918 cord_test_onos_restart(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700919 log.info('Verifying vrouter traffic after cluster member restart')
920 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
921
922 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700923 def test_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700924 status = self.verify_cluster_status(onos_instances = onos_instances)
925 assert_equal(status, True)
926 self.vrouter.setUpClass()
927 log.info('Verifying vrouter traffic before cluster restart')
928 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
929 assert_equal(res, True) # Expecting vrouter should work properly
930 cord_test_onos_restart()
931 log.info('Verifying vrouter traffic after cluster restart')
932 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
933
934
935 #test fails because flow state is in pending_add in onos
ChetanGaonker689b3862016-10-17 16:25:01 -0700936 def test_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700937 status = self.verify_cluster_status(onos_instances = onos_instances)
938 assert_equal(status, True)
939 master, standbys = self.get_cluster_current_master_standbys()
940 onos_names_ips = self.get_cluster_container_names_ips()
941 master_onos_name = onos_names_ips[master]
942 self.flows.setUpClass()
943 egress = 1
944 ingress = 2
945 egress_map = { 'ip': '192.168.30.1', 'udp_port': 9500 }
946 ingress_map = { 'ip': '192.168.40.1', 'udp_port': 9000 }
947 flow = OnosFlowCtrl(deviceId = self.device_id,
948 egressPort = egress,
949 ingressPort = ingress,
950 udpSrc = ingress_map['udp_port'],
951 udpDst = egress_map['udp_port'],
952 controller=master
953 )
954 result = flow.addFlow()
955 assert_equal(result, True)
956 time.sleep(1)
957 self.success = False
958 def mac_recv_task():
959 def recv_cb(pkt):
960 log.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
961 self.success = True
962 sniff(timeout=2,
963 lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
964 and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.flows.port_map[egress])
965
966 for i in [0,1]:
967 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -0800968 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700969 log.info('Verifying flows traffic after master killed')
970 time.sleep(45)
971 else:
972 log.info('Verifying flows traffic before master killed')
973 t = threading.Thread(target = mac_recv_task)
974 t.start()
975 L2 = self.flows_eth #Ether(src = ingress_map['ether'], dst = egress_map['ether'])
976 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
977 L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
978 pkt = L2/L3/L4
979 log.info('Sending packets to verify if flows are correct')
980 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
981 t.join()
982 assert_equal(self.success, True)
983
ChetanGaonker689b3862016-10-17 16:25:01 -0700984 def test_cluster_state_changing_master_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700985 status = self.verify_cluster_status(onos_instances=onos_instances)
986 assert_equal(status, True)
987 master, standbys = self.get_cluster_current_master_standbys()
988 self.flows.setUpClass()
989 egress = 1
990 ingress = 2
991 egress_map = { 'ip': '192.168.30.1' }
992 ingress_map = { 'ip': '192.168.40.1' }
993 flow = OnosFlowCtrl(deviceId = self.device_id,
994 egressPort = egress,
995 ingressPort = ingress,
996 ecn = 1,
997 controller=master
998 )
999 result = flow.addFlow()
1000 assert_equal(result, True)
1001 ##wait for flows to be added to ONOS
1002 time.sleep(1)
1003 self.success = False
1004 def mac_recv_task():
1005 def recv_cb(pkt):
1006 log.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
1007 self.success = True
1008 sniff(count=2, timeout=5,
1009 lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
1010 and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb,
1011 iface = self.flows.port_map[egress])
1012 for i in [0,1]:
1013 if i == 1:
1014 log.info('Changing cluster master to %s'%standbys[0])
1015 self.change_master_current_cluster(new_master=standbys[0])
1016 log.info('Verifying flow traffic after cluster master chnaged')
1017 else:
1018 log.info('Verifying flow traffic before cluster master changed')
1019 t = threading.Thread(target = mac_recv_task)
1020 t.start()
1021 L2 = self.flows_eth # Ether(src = ingress_map['ether'], dst = egress_map['ether'])
1022 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1)
1023 pkt = L2/L3
1024 log.info('Sending a packet to verify if flows are correct')
1025 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
1026 t.join()
1027 assert_equal(self.success, True)
1028
ChetanGaonker689b3862016-10-17 16:25:01 -07001029 #pass
1030 def test_cluster_flow_for_ipv6_extension_header_and_master_restart(self,onos_instances = ONOS_INSTANCES):
1031 status = self.verify_cluster_status(onos_instances=onos_instances)
1032 assert_equal(status, True)
1033 master,standbys = self.get_cluster_current_master_standbys()
1034 onos_names_ips = self.get_cluster_container_names_ips()
1035 master_onos_name = onos_names_ips[master]
1036 self.flows.setUpClass()
1037 egress = 1
1038 ingress = 2
1039 egress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
1040 ingress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
1041 flow = OnosFlowCtrl(deviceId = self.device_id,
1042 egressPort = egress,
1043 ingressPort = ingress,
1044 ipv6_extension = 0,
1045 controller=master
1046 )
1047
1048 result = flow.addFlow()
1049 assert_equal(result, True)
1050 ##wait for flows to be added to ONOS
1051 time.sleep(1)
1052 self.success = False
1053 def mac_recv_task():
1054 def recv_cb(pkt):
1055 log.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s'%(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
1056 self.success = True
1057 sniff(timeout=2,count=5,
1058 lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.flows.port_map[egress])
1059 for i in [0,1]:
1060 if i == 1:
1061 log.info('Restart cluster current master %s'%master)
1062 Container(master_onos_name,Onos.IMAGE).restart()
1063 time.sleep(45)
1064 log.info('Verifying flow traffic after master restart')
1065 else:
1066 log.info('Verifying flow traffic before master restart')
1067 t = threading.Thread(target = mac_recv_task)
1068 t.start()
1069 L2 = self.flows_eth
1070 L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
1071 pkt = L2/L3
1072 log.info('Sending packets to verify if flows are correct')
1073 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
1074 t.join()
1075 assert_equal(self.success, True)
1076
1077 def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
1078 dst_mac = self.igmp.iptomac(group)
1079 eth = Ether(dst= dst_mac)
1080 ip = IP(dst=group,src=source)
1081 data = repr(monotonic.monotonic())
1082 sendp(eth/ip/data,count=20, iface = intf)
1083 pkt = (eth/ip/data)
1084 log.info('multicast traffic packet %s'%pkt.show())
1085
1086 def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
1087 log.info('verifying multicast traffic for group %s from source %s'%(group,source))
1088 self.success = False
1089 def recv_task():
1090 def igmp_recv_cb(pkt):
1091 log.info('multicast data received for group %s from source %s'%(group,source))
1092 self.success = True
1093 sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
1094 t = threading.Thread(target = recv_task)
1095 t.start()
1096 self.send_multicast_data_traffic(group,source=source)
1097 t.join()
1098 return self.success
1099
1100 #pass
1101 def test_cluster_with_igmp_include_exclude_modes_and_restarting_master(self, onos_instances=ONOS_INSTANCES):
1102 status = self.verify_cluster_status(onos_instances=onos_instances)
1103 assert_equal(status, True)
1104 master, standbys = self.get_cluster_current_master_standbys()
1105 assert_equal(len(standbys), (onos_instances-1))
1106 onos_names_ips = self.get_cluster_container_names_ips()
1107 master_onos_name = onos_names_ips[master]
1108 self.igmp.setUp(controller=master)
1109 groups = ['224.2.3.4','230.5.6.7']
1110 src_list = ['2.2.2.2','3.3.3.3']
1111 self.igmp.onos_ssm_table_load(groups, src_list=src_list, controller=master)
1112 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1113 iface = self.V_INF1, delay = 2)
1114 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1115 iface = self.V_INF1, delay = 2)
1116 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1117 assert_equal(status,True)
1118 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1119 assert_equal(status,False)
1120 log.info('restarting cluster master %s'%master)
1121 Container(master_onos_name,Onos.IMAGE).restart()
1122 time.sleep(60)
1123 log.info('verifying multicast data traffic after master restart')
1124 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1125 assert_equal(status,True)
1126 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1127 assert_equal(status,False)
1128
1129 #pass
1130 def test_cluster_with_igmp_include_exclude_modes_and_making_master_down(self, onos_instances=ONOS_INSTANCES):
1131 status = self.verify_cluster_status(onos_instances=onos_instances)
1132 assert_equal(status, True)
1133 master, standbys = self.get_cluster_current_master_standbys()
1134 assert_equal(len(standbys), (onos_instances-1))
1135 onos_names_ips = self.get_cluster_container_names_ips()
1136 master_onos_name = onos_names_ips[master]
1137 self.igmp.setUp(controller=master)
1138 groups = [self.igmp.random_mcast_ip(),self.igmp.random_mcast_ip()]
1139 src_list = [self.igmp.randomsourceip()]
1140 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1141 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1142 iface = self.V_INF1, delay = 2)
1143 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1144 iface = self.V_INF1, delay = 2)
1145 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1146 assert_equal(status,True)
1147 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1148 assert_equal(status,False)
1149 log.info('Killing cluster master %s'%master)
1150 Container(master_onos_name,Onos.IMAGE).kill()
1151 time.sleep(60)
1152 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=standbys[0])
1153 assert_equal(status, True)
1154 log.info('Verifying multicast data traffic after cluster master down')
1155 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1156 assert_equal(status,True)
1157 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1158 assert_equal(status,False)
1159
1160 def test_cluster_with_igmp_include_mode_checking_traffic_recovery_time_after_master_is_down(self, onos_instances=ONOS_INSTANCES):
1161 status = self.verify_cluster_status(onos_instances=onos_instances)
1162 assert_equal(status, True)
1163 master, standbys = self.get_cluster_current_master_standbys()
1164 assert_equal(len(standbys), (onos_instances-1))
1165 onos_names_ips = self.get_cluster_container_names_ips()
1166 master_onos_name = onos_names_ips[master]
1167 self.igmp.setUp(controller=master)
1168 groups = [self.igmp.random_mcast_ip()]
1169 src_list = [self.igmp.randomsourceip()]
1170 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1171 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1172 iface = self.V_INF1, delay = 2)
1173 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1174 assert_equal(status,True)
1175 log.info('Killing clusters master %s'%master)
1176 Container(master_onos_name,Onos.IMAGE).kill()
1177 count = 0
1178 for i in range(60):
1179 log.info('Verifying multicast data traffic after cluster master down')
1180 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1181 if status:
1182 break
1183 else:
1184 count += 1
1185 time.sleep(1)
1186 assert_equal(status, True)
1187 log.info('Time taken to recover traffic after clusters master down is %d seconds'%count)
1188
1189
1190 #pass
1191 def test_cluster_state_with_igmp_leave_group_after_master_change(self, onos_instances=ONOS_INSTANCES):
1192 status = self.verify_cluster_status(onos_instances=onos_instances)
1193 assert_equal(status, True)
1194 master, standbys = self.get_cluster_current_master_standbys()
1195 assert_equal(len(standbys), (onos_instances-1))
1196 self.igmp.setUp(controller=master)
1197 groups = [self.igmp.random_mcast_ip()]
1198 src_list = [self.igmp.randomsourceip()]
1199 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1200 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1201 iface = self.V_INF1, delay = 2)
1202 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1203 assert_equal(status,True)
1204 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1205 self.change_cluster_current_master(new_master=standbys[0])
1206 log.info('Verifying multicast traffic after cluster master change')
1207 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1208 assert_equal(status,True)
1209 log.info('Sending igmp TO_EXCLUDE message to leave the group %s'%groups[0])
1210 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
1211 iface = self.V_INF1, delay = 1)
1212 time.sleep(10)
1213 status = self.verify_igmp_data_traffic(groups[0],intf = self.V_INF1,source= src_list[0])
1214 assert_equal(status,False)
1215
1216 #pass
1217 def test_cluster_state_with_igmp_join_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
1218 status = self.verify_cluster_status(onos_instances=onos_instances)
1219 assert_equal(status, True)
1220 master,standbys = self.get_cluster_current_master_standbys()
1221 assert_equal(len(standbys), (onos_instances-1))
1222 self.igmp.setUp(controller=master)
1223 groups = [self.igmp.random_mcast_ip()]
1224 src_list = [self.igmp.randomsourceip()]
1225 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1226 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1227 self.change_cluster_current_master(new_master = standbys[0])
1228 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1229 iface = self.V_INF1, delay = 2)
1230 time.sleep(1)
1231 self.change_cluster_current_master(new_master = master)
1232 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1233 assert_equal(status,True)
1234
1235 #pass
ChetanGaonker2099d722016-10-07 15:16:58 -07001236 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001237 def test_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001238 status = self.verify_cluster_status(onos_instances=onos_instances)
1239 assert_equal(status, True)
1240 master, standbys = self.get_cluster_current_master_standbys()
1241 assert_equal(len(standbys), (onos_instances-1))
1242 self.tls.setUp(controller=master)
1243 df = defer.Deferred()
1244 def eap_tls_verify(df):
1245 tls = TLSAuthTest()
1246 tls.runTest()
1247 df.callback(0)
1248 reactor.callLater(0, eap_tls_verify, df)
1249 return df
1250
1251 @deferred(120)
ChetanGaonker689b3862016-10-17 16:25:01 -07001252 def test_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001253 master, standbys = self.get_cluster_current_master_standbys()
1254 assert_equal(len(standbys), (onos_instances-1))
1255 self.tls.setUp()
1256 df = defer.Deferred()
1257 def eap_tls_verify2(df2):
1258 tls = TLSAuthTest()
1259 tls.runTest()
1260 df.callback(0)
1261 for i in [0,1]:
1262 if i == 1:
1263 log.info('Changing cluster master %s to %s'%(master, standbys[0]))
1264 self.change_master_current_cluster(new_master=standbys[0])
1265 log.info('Verifying tls authentication after cluster master changed to %s'%standbys[0])
1266 else:
1267 log.info('Verifying tls authentication before cluster master change')
1268 reactor.callLater(0, eap_tls_verify, df)
1269 return df
1270
1271 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001272 def test_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001273 status = self.verify_cluster_status(onos_instances=onos_instances)
1274 assert_equal(status, True)
1275 master, standbys = self.get_cluster_current_master_standbys()
1276 assert_equal(len(standbys), (onos_instances-1))
1277 onos_names_ips = self.get_cluster_container_names_ips()
1278 master_onos_name = onos_names_ips[master]
1279 self.tls.setUp()
1280 df = defer.Deferred()
1281 def eap_tls_verify(df):
1282 tls = TLSAuthTest()
1283 tls.runTest()
1284 df.callback(0)
1285 for i in [0,1]:
1286 if i == 1:
1287 log.info('Killing cluster current master %s'%master)
A R Karthick3b2e0372016-12-14 17:37:43 -08001288 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001289 time.sleep(20)
1290 status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True)
1291 assert_equal(status, True)
1292 log.info('Cluster came up with %d instances after killing master'%(onos_instances-1))
1293 log.info('Verifying tls authentication after killing cluster master')
1294 reactor.callLater(0, eap_tls_verify, df)
1295 return df
1296
1297 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001298 def test_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001299 status = self.verify_cluster_status(onos_instances=onos_instances)
1300 assert_equal(status, True)
1301 master, standbys = self.get_cluster_current_master_standbys()
1302 assert_equal(len(standbys), (onos_instances-1))
1303 onos_names_ips = self.get_cluster_container_names_ips()
1304 member_onos_name = onos_names_ips[standbys[0]]
1305 self.tls.setUp()
1306 df = defer.Deferred()
1307 def eap_tls_no_cert(df):
1308 def tls_no_cert_cb():
1309 log.info('TLS authentication failed with no certificate')
1310 tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
1311 tls.runTest()
1312 assert_equal(tls.failTest, True)
1313 df.callback(0)
1314 for i in [0,1]:
1315 if i == 1:
1316 log.info('Restart cluster member %s'%standbys[0])
1317 Container(member_onos_name,Onos.IMAGE).restart()
1318 time.sleep(20)
1319 status = self.verify_cluster_status(onos_instances=onos_instances)
1320 assert_equal(status, True)
1321 log.info('Cluster came up with %d instances after member restart'%(onos_instances))
1322 log.info('Verifying tls authentication after member restart')
1323 reactor.callLater(0, eap_tls_no_cert, df)
1324 return df
1325
ChetanGaonker689b3862016-10-17 16:25:01 -07001326 #pass
1327 def test_cluster_proxyarp_master_change_and_app_deactivation(self,onos_instances=ONOS_INSTANCES,hosts = 3):
1328 status = self.verify_cluster_status(onos_instances=onos_instances)
1329 assert_equal(status,True)
1330 master,standbys = self.get_cluster_current_master_standbys()
1331 assert_equal(len(standbys),(onos_instances-1))
1332 self.proxyarp.setUpClass()
1333 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1334 ingress = hosts+1
1335 for hostip, hostmac in hosts_config:
1336 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1337 time.sleep(1)
1338 log.info('changing cluster current master from %s to %s'%(master,standbys[0]))
1339 self.change_cluster_current_master(new_master=standbys[0])
1340 log.info('verifying proxyarp after master change')
1341 for hostip, hostmac in hosts_config:
1342 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1343 time.sleep(1)
1344 log.info('Deactivating proxyarp app and expecting proxyarp functionality not to work')
1345 self.proxyarp.proxyarp_activate(deactivate = True,controller=standbys[0])
1346 time.sleep(3)
1347 for hostip, hostmac in hosts_config:
1348 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
1349 time.sleep(1)
1350 log.info('activating proxyarp app and expecting to get arp reply from ONOS')
1351 self.proxyarp.proxyarp_activate(deactivate = False,controller=standbys[0])
1352 time.sleep(3)
1353 for hostip, hostmac in hosts_config:
1354 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1355 time.sleep(1)
ChetanGaonker2099d722016-10-07 15:16:58 -07001356
ChetanGaonker689b3862016-10-17 16:25:01 -07001357 #pass
1358 def test_cluster_with_proxyarp_and_one_member_down(self,hosts=3,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001359 status = self.verify_cluster_status(onos_instances=onos_instances)
1360 assert_equal(status, True)
1361 master, standbys = self.get_cluster_current_master_standbys()
ChetanGaonker689b3862016-10-17 16:25:01 -07001362 assert_equal(len(standbys), (onos_instances-1))
1363 onos_names_ips = self.get_cluster_container_names_ips()
1364 member_onos_name = onos_names_ips[standbys[1]]
1365 self.proxyarp.setUpClass()
1366 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1367 ingress = hosts+1
1368 for hostip, hostmac in hosts_config:
1369 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1370 time.sleep(1)
1371 log.info('killing cluster member %s'%standbys[1])
1372 Container(member_onos_name,Onos.IMAGE).kill()
1373 time.sleep(20)
1374 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=master,verify=True)
1375 assert_equal(status, True)
1376 log.info('cluster came up with %d instances after member down'%(onos_instances-1))
1377 log.info('verifying proxy arp functionality after cluster member down')
1378 for hostip, hostmac in hosts_config:
1379 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1380 time.sleep(1)
1381
1382 #pass
1383 def test_cluster_with_proxyarp_and_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts=10,onos_instances=ONOS_INSTANCES):
1384 status = self.verify_cluster_status(onos_instances=onos_instances)
1385 assert_equal(status, True)
1386 self.proxyarp.setUpClass()
1387 master, standbys = self.get_cluster_current_master_standbys()
1388 assert_equal(len(standbys), (onos_instances-1))
1389 ports_map, egress_map, hosts_config = self.proxyarp.proxyarp_config(hosts = hosts, controller=master)
1390 self.success = True
1391 ingress = hosts+1
1392 ports = range(ingress,ingress+10)
1393 hostmac = []
1394 hostip = []
1395 for ip,mac in hosts_config:
1396 hostmac.append(mac)
1397 hostip.append(ip)
1398 success_dir = {}
1399 def verify_proxyarp(*r):
1400 ingress, hostmac, hostip = r[0],r[1],r[2]
1401 def mac_recv_task():
1402 def recv_cb(pkt):
1403 log.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
1404 success_dir[current_thread().name] = True
1405 sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
1406 prn = recv_cb, iface = self.proxyarp.port_map[ingress])
1407 t = threading.Thread(target = mac_recv_task)
1408 t.start()
1409 pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
1410 log.info('Sending arp request for dest ip %s on interface %s' %
1411 (hostip,self.proxyarp.port_map[ingress]))
1412 sendp(pkt, count = 10,iface = self.proxyarp.port_map[ingress])
1413 t.join()
1414 t = []
1415 for i in range(10):
1416 t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
1417 for i in range(10):
1418 t[i].start()
1419 time.sleep(2)
1420 for i in range(10):
1421 t[i].join()
1422 if len(success_dir) != 10:
1423 self.success = False
1424 assert_equal(self.success, True)
1425
1426 #pass
1427 def test_cluster_with_acl_rule_before_master_change_and_remove_acl_rule_after_master_change(self,onos_instances=ONOS_INSTANCES):
1428 status = self.verify_cluster_status(onos_instances=onos_instances)
1429 assert_equal(status, True)
1430 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001431 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001432 self.acl.setUp()
1433 acl_rule = ACLTest()
1434 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1435 if status is False:
1436 log.info('JSON request returned status %d' %code)
1437 assert_equal(status, True)
1438 result = acl_rule.get_acl_rules(controller=master)
1439 aclRules1 = result.json()['aclRules']
1440 log.info('Added acl rules is %s'%aclRules1)
1441 acl_Id = map(lambda d: d['id'], aclRules1)
1442 log.info('Changing cluster current master from %s to %s'%(master,standbys[0]))
1443 self.change_cluster_current_master(new_master=standbys[0])
1444 status,code = acl_rule.remove_acl_rule(acl_Id[0],controller=standbys[0])
1445 if status is False:
1446 log.info('JSON request returned status %d' %code)
1447 assert_equal(status, True)
1448
1449 #pass
1450 def test_cluster_verifying_acl_rule_in_new_master_after_current_master_is_down(self,onos_instances=ONOS_INSTANCES):
1451 status = self.verify_cluster_status(onos_instances=onos_instances)
1452 assert_equal(status, True)
1453 master,standbys = self.get_cluster_current_master_standbys()
1454 assert_equal(len(standbys),(onos_instances-1))
1455 onos_names_ips = self.get_cluster_container_names_ips()
1456 master_onos_name = onos_names_ips[master]
1457 self.acl.setUp()
1458 acl_rule = ACLTest()
1459 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1460 if status is False:
1461 log.info('JSON request returned status %d' %code)
1462 assert_equal(status, True)
1463 result1 = acl_rule.get_acl_rules(controller=master)
1464 aclRules1 = result1.json()['aclRules']
1465 log.info('Added acl rules is %s'%aclRules1)
1466 acl_Id1 = map(lambda d: d['id'], aclRules1)
1467 log.info('Killing cluster current master %s'%master)
1468 Container(master_onos_name,Onos.IMAGE).kill()
1469 time.sleep(45)
1470 status = self.verify_cluster_status(onos_instances=onos_instances,controller=standbys[0])
1471 assert_equal(status, True)
1472 new_master,standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
1473 assert_equal(len(standbys),(onos_instances-2))
1474 assert_not_equal(new_master,master)
1475 result2 = acl_rule.get_acl_rules(controller=new_master)
1476 aclRules2 = result2.json()['aclRules']
1477 acl_Id2 = map(lambda d: d['id'], aclRules2)
1478 log.info('Acl Ids before and after master down are %s and %s'%(acl_Id1,acl_Id2))
1479 assert_equal(acl_Id2,acl_Id1)
1480
1481 #acl traffic scenario not working as acl rule is not getting added to onos
1482 def test_cluster_with_acl_traffic_before_and_after_two_members_down(self,onos_instances=ONOS_INSTANCES):
1483 status = self.verify_cluster_status(onos_instances=onos_instances)
1484 assert_equal(status, True)
1485 master,standbys = self.get_cluster_current_master_standbys()
1486 assert_equal(len(standbys),(onos_instances-1))
1487 onos_names_ips = self.get_cluster_container_names_ips()
1488 member1_onos_name = onos_names_ips[standbys[0]]
1489 member2_onos_name = onos_names_ips[standbys[1]]
1490 ingress = self.acl.ingress_iface
1491 egress = self.acl.CURRENT_PORT_NUM
1492 acl_rule = ACLTest()
1493 status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.acl.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.acl.HOST_DST_IP)
1494 self.acl.CURRENT_PORT_NUM += 1
1495 time.sleep(5)
1496 if status is False:
1497 log.info('JSON request returned status %d' %code)
1498 assert_equal(status, True)
1499 srcMac = '00:00:00:00:00:11'
1500 dstMac = host_ip_mac[0][1]
1501 self.acl.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1, egress_iface_num = egress )
1502 status, code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'deny',controller=master)
1503 time.sleep(10)
1504 if status is False:
1505 log.info('JSON request returned status %d' %code)
1506 assert_equal(status, True)
1507 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1508 log.info('killing cluster members %s and %s'%(standbys[0],standbys[1]))
1509 Container(member1_onos_name, Onos.IMAGE).kill()
1510 Container(member2_onos_name, Onos.IMAGE).kill()
1511 time.sleep(40)
1512 status = self.verify_cluster_status(onos_instances=onos_instances-2,verify=True,controller=master)
1513 assert_equal(status, True)
1514 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1515 self.acl.acl_hosts_remove(egress_iface_count = 1, egress_iface_num = egress)
1516
1517 #pass
1518 def test_cluster_with_dhcpRelay_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1519 status = self.verify_cluster_status(onos_instances=onos_instances)
1520 assert_equal(status, True)
1521 master,standbys = self.get_cluster_current_master_standbys()
1522 assert_equal(len(standbys),(onos_instances-1))
1523 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001524 mac = self.dhcprelay.get_mac(iface)
1525 self.dhcprelay.host_load(iface)
1526 ##we use the defaults for this test that serves as an example for others
1527 ##You don't need to restart dhcpd server if retaining default config
1528 config = self.dhcprelay.default_config
1529 options = self.dhcprelay.default_options
1530 subnet = self.dhcprelay.default_subnet_config
1531 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1532 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1533 config = config,
1534 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001535 subnet = subnet,
1536 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001537 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
1538 cip, sip = self.dhcprelay.send_recv(mac)
1539 log.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
1540 self.change_master_current_cluster(new_master=standbys[0])
1541 log.info('Releasing ip %s to server %s' %(cip, sip))
1542 assert_equal(self.dhcprelay.dhcp.release(cip), True)
1543 log.info('Triggering DHCP discover again after release')
1544 cip2, sip2 = self.dhcprelay.send_recv(mac)
1545 log.info('Verifying released IP was given back on rediscover')
1546 assert_equal(cip, cip2)
1547 log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
1548 assert_equal(self.dhcprelay.dhcp.release(cip2), True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001549 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001550
ChetanGaonker689b3862016-10-17 16:25:01 -07001551
1552 def test_cluster_with_dhcpRelay_and_verify_dhcp_ip_after_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1553 status = self.verify_cluster_status(onos_instances=onos_instances)
1554 assert_equal(status, True)
1555 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001556 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001557 onos_names_ips = self.get_cluster_container_names_ips()
1558 master_onos_name = onos_names_ips[master]
1559 self.dhcprelay.setUpClass(controller=master)
1560 mac = self.dhcprelay.get_mac(iface)
1561 self.dhcprelay.host_load(iface)
1562 ##we use the defaults for this test that serves as an example for others
1563 ##You don't need to restart dhcpd server if retaining default config
1564 config = self.dhcprelay.default_config
1565 options = self.dhcprelay.default_options
1566 subnet = self.dhcprelay.default_subnet_config
1567 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1568 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1569 config = config,
1570 options = options,
1571 subnet = subnet,
1572 controller=master)
1573 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1574 log.info('Initiating dhcp process from client %s'%mac)
1575 cip, sip = self.dhcprelay.send_recv(mac)
1576 log.info('Killing cluster current master %s'%master)
1577 Container(master_onos_name, Onos.IMAGE).kill()
1578 time.sleep(60)
1579 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=standbys[0])
1580 assert_equal(status, True)
1581 mac = self.dhcprelay.dhcp.get_mac(cip)[0]
1582 log.info("Verifying dhcp clients gets same IP after cluster master restarts")
1583 new_cip, new_sip = self.dhcprelay.dhcp.only_request(cip, mac)
1584 assert_equal(new_cip, cip)
1585 self.dhcprelay.tearDownClass(controller=standbys[0])
1586
1587 #pass
1588 def test_cluster_with_dhcpRelay_and_simulate_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1589 status = self.verify_cluster_status(onos_instances=onos_instances)
1590 assert_equal(status, True)
1591 master,standbys = self.get_cluster_current_master_standbys()
1592 assert_equal(len(standbys),(onos_instances-1))
1593 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001594 macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3']
1595 self.dhcprelay.host_load(iface)
1596 ##we use the defaults for this test that serves as an example for others
1597 ##You don't need to restart dhcpd server if retaining default config
1598 config = self.dhcprelay.default_config
1599 options = self.dhcprelay.default_options
1600 subnet = self.dhcprelay.default_subnet_config
1601 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1602 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1603 config = config,
1604 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001605 subnet = subnet,
1606 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001607 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1608 cip1, sip1 = self.dhcprelay.send_recv(macs[0])
1609 assert_not_equal(cip1,None)
1610 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip1,macs[0],master))
1611 log.info('Changing cluster master from %s to %s'%(master, standbys[0]))
1612 self.change_master_current_cluster(new_master=standbys[0])
1613 cip2, sip2 = self.dhcprelay.send_recv(macs[1])
1614 assert_not_equal(cip2,None)
1615 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[1],standbys[0]))
1616 self.change_master_current_cluster(new_master=master)
1617 log.info('Changing cluster master from %s to %s'%(standbys[0],master))
1618 cip3, sip3 = self.dhcprelay.send_recv(macs[2])
1619 assert_not_equal(cip3,None)
1620 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master))
ChetanGaonker689b3862016-10-17 16:25:01 -07001621 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001622
ChetanGaonker689b3862016-10-17 16:25:01 -07001623 def test_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001624 status = self.verify_cluster_status(onos_instances=onos_instances)
1625 assert_equal(status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001626 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001627 self.subscriber.num_subscribers = 5
1628 self.subscriber.num_channels = 10
1629 for i in [0,1]:
1630 if i == 1:
1631 cord_test_onos_restart()
1632 time.sleep(45)
1633 status = self.verify_cluster_status(onos_instances=onos_instances)
1634 assert_equal(status, True)
1635 log.info('Verifying cord subscriber functionality after cluster restart')
1636 else:
1637 log.info('Verifying cord subscriber functionality before cluster restart')
1638 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1639 num_channels = self.subscriber.num_channels,
1640 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1641 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1642 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1643 self.subscriber.num_channels))
1644 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001645 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001646
ChetanGaonker689b3862016-10-17 16:25:01 -07001647 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1648 def test_cluster_with_cord_subscriber_join_next_channel_before_and_after_cluster_mastership_is_withdrawn(self,onos_instances=ONOS_INSTANCES):
1649 status = self.verify_cluster_status(onos_instances=onos_instances)
1650 assert_equal(status, True)
1651 master,standbys = self.get_cluster_current_master_standbys()
1652 assert_equal(len(standbys),(onos_instances-1))
1653 self.subscriber.setUpClass(controller=master)
1654 self.subscriber.num_subscribers = 5
1655 self.subscriber.num_channels = 10
1656 for i in [0,1]:
1657 if i == 1:
1658 status=self.withdraw_cluster_current_mastership(master_ip=master)
1659 asser_equal(status, True)
1660 master,standbys = self.get_cluster_current_master_standbys()
1661 log.info('verifying cord subscriber functionality after cluster current master withdraw mastership')
1662 else:
1663 log.info('verifying cord subscriber functionality before cluster master withdraw mastership')
1664 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1665 num_channels = self.subscriber.num_channels,
1666 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1667 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1668 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1669 self.subscriber.num_channels),controller=master)
1670 assert_equal(test_status, True)
1671 self.subscriber.tearDownClass(controller=master)
1672
1673 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1674 def test_cluster_with_cord_subscriber_join_recv_traffic_from_10channels_and_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001675 status = self.verify_cluster_status(onos_instances=onos_instances)
1676 assert_equal(status, True)
1677 master, standbys = self.get_cluster_current_master_standbys()
1678 assert_equal(len(standbys),(onos_instances-1))
1679 onos_names_ips = self.get_cluster_container_names_ips()
1680 member_onos_name = onos_names_ips[standbys[0]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001681 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001682 num_subscribers = 1
1683 num_channels = 10
1684 for i in [0,1]:
1685 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -08001686 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001687 time.sleep(30)
ChetanGaonker689b3862016-10-17 16:25:01 -07001688 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001689 assert_equal(status, True)
1690 log.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0])
1691 else:
1692 log.info('Verifying cord subscriber functionality before cluster member %s is down'%standbys[0])
1693 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1694 num_channels = num_channels,
1695 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify,
1696 self.subscriber.igmp_verify, self.subscriber.traffic_verify),
1697 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
ChetanGaonker689b3862016-10-17 16:25:01 -07001698 negative_subscriber_auth = 'all',controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001699 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001700 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001701
ChetanGaonker689b3862016-10-17 16:25:01 -07001702 def test_cluster_with_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001703 status = self.verify_cluster_status(onos_instances=onos_instances)
1704 assert_equal(status, True)
1705 master, standbys = self.get_cluster_current_master_standbys()
1706 assert_equal(len(standbys),(onos_instances-1))
1707 onos_names_ips = self.get_cluster_container_names_ips()
1708 member1_onos_name = onos_names_ips[standbys[0]]
1709 member2_onos_name = onos_names_ips[standbys[1]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001710 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001711 num_subscribers = 1
1712 num_channels = 10
1713 for i in [0,1]:
1714 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -08001715 cord_test_onos_shutdown(node = standbys[0])
1716 cord_test_onos_shutdown(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -07001717 time.sleep(60)
1718 status = self.verify_cluster_status(onos_instances=onos_instances-2)
1719 assert_equal(status, True)
1720 log.info('Verifying cord subscriber funtionality after cluster two members %s and %s down'%(standbys[0],standbys[1]))
1721 else:
1722 log.info('Verifying cord subscriber funtionality before cluster two members %s and %s down'%(standbys[0],standbys[1]))
1723 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1724 num_channels = num_channels,
1725 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1726 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1727 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
1728 negative_subscriber_auth = 'all')
1729 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001730 self.subscriber.tearDownClass(controller=master)
1731
1732 #pass
1733 def test_cluster_with_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1734 status = self.verify_cluster_status(onos_instances=onos_instances)
1735 assert_equal(status, True)
1736 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1737 for device in device_dict.keys():
1738 log.info("Device is %s"%device_dict[device])
1739 assert_not_equal(device_dict[device]['master'],'none')
1740 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1741 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1742
1743 #pass
1744 def test_cluster_state_in_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1745 status = self.verify_cluster_status(onos_instances=onos_instances)
1746 assert_equal(status, True)
1747 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1748 cluster_ips = self.get_cluster_current_member_ips()
1749 for ip in cluster_ips:
1750 device_dict= self.get_cluster_current_master_standbys_of_connected_devices(controller = ip)
1751 assert_equal(len(device_dict.keys()),onos_instances)
1752 for device in device_dict.keys():
1753 log.info("Device is %s"%device_dict[device])
1754 assert_not_equal(device_dict[device]['master'],'none')
1755 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1756 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1757
1758 #pass
1759 def test_cluster_verifying_multiple_ovs_switches_after_master_is_restarted(self,onos_instances = ONOS_INSTANCES):
1760 status = self.verify_cluster_status(onos_instances=onos_instances)
1761 assert_equal(status, True)
1762 onos_names_ips = self.get_cluster_container_names_ips()
1763 master_count = self.get_number_of_devices_of_master()
1764 log.info('Master count information is %s'%master_count)
1765 total_devices = 0
1766 for master in master_count.keys():
1767 total_devices += master_count[master]['size']
1768 if master_count[master]['size'] != 0:
1769 restart_ip = master
1770 assert_equal(total_devices,onos_instances)
1771 member_onos_name = onos_names_ips[restart_ip]
1772 log.info('Restarting cluster member %s having ip %s'%(member_onos_name,restart_ip))
1773 Container(member_onos_name, Onos.IMAGE).restart()
1774 time.sleep(40)
1775 master_count = self.get_number_of_devices_of_master()
1776 log.info('Master count information after restart is %s'%master_count)
1777 total_devices = 0
1778 for master in master_count.keys():
1779 total_devices += master_count[master]['size']
1780 if master == restart_ip:
1781 assert_equal(master_count[master]['size'], 0)
1782 assert_equal(total_devices,onos_instances)
1783
1784 #pass
1785 def test_cluster_verifying_multiple_ovs_switches_with_one_master_down(self,onos_instances = ONOS_INSTANCES):
1786 status = self.verify_cluster_status(onos_instances=onos_instances)
1787 assert_equal(status, True)
1788 onos_names_ips = self.get_cluster_container_names_ips()
1789 master_count = self.get_number_of_devices_of_master()
1790 log.info('Master count information is %s'%master_count)
1791 total_devices = 0
1792 for master in master_count.keys():
1793 total_devices += master_count[master]['size']
1794 if master_count[master]['size'] != 0:
1795 restart_ip = master
1796 assert_equal(total_devices,onos_instances)
1797 master_onos_name = onos_names_ips[restart_ip]
1798 log.info('Shutting down cluster member %s having ip %s'%(master_onos_name,restart_ip))
1799 Container(master_onos_name, Onos.IMAGE).kill()
1800 time.sleep(40)
1801 for ip in onos_names_ips.keys():
1802 if ip != restart_ip:
1803 controller_ip = ip
1804 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=controller_ip)
1805 assert_equal(status, True)
1806 master_count = self.get_number_of_devices_of_master(controller=controller_ip)
1807 log.info('Master count information after restart is %s'%master_count)
1808 total_devices = 0
1809 for master in master_count.keys():
1810 total_devices += master_count[master]['size']
1811 if master == restart_ip:
1812 assert_equal(master_count[master]['size'], 0)
1813 assert_equal(total_devices,onos_instances)
1814
1815 #pass
1816 def test_cluster_verifying_multiple_ovs_switches_with_current_master_withdrawing_mastership(self,onos_instances = ONOS_INSTANCES):
1817 status = self.verify_cluster_status(onos_instances=onos_instances)
1818 assert_equal(status, True)
1819 master_count = self.get_number_of_devices_of_master()
1820 log.info('Master count information is %s'%master_count)
1821 total_devices = 0
1822 for master in master_count.keys():
1823 total_devices += int(master_count[master]['size'])
1824 if master_count[master]['size'] != 0:
1825 master_ip = master
1826 log.info('Devices of master %s are %s'%(master_count[master]['devices'],master))
1827 device_id = str(master_count[master]['devices'][0])
1828 device_count = master_count[master]['size']
1829 assert_equal(total_devices,onos_instances)
1830 log.info('Withdrawing mastership of device %s for controller %s'%(device_id,master_ip))
1831 status=self.withdraw_cluster_current_mastership(master_ip=master_ip,device_id = device_id)
1832 assert_equal(status, True)
1833 master_count = self.get_number_of_devices_of_master()
1834 log.info('Master count information after cluster mastership withdraw is %s'%master_count)
1835 total_devices = 0
1836 for master in master_count.keys():
1837 total_devices += int(master_count[master]['size'])
1838 if master == master_ip:
1839 assert_equal(master_count[master]['size'], device_count-1)
1840 assert_equal(total_devices,onos_instances)
1841
1842 #pass
1843 def test_cluster_verifying_multiple_ovs_switches_and_restarting_cluster(self,onos_instances = ONOS_INSTANCES):
1844 status = self.verify_cluster_status(onos_instances=onos_instances)
1845 assert_equal(status, True)
1846 master_count = self.get_number_of_devices_of_master()
1847 log.info('Master count information is %s'%master_count)
1848 total_devices = 0
1849 for master in master_count.keys():
1850 total_devices += master_count[master]['size']
1851 assert_equal(total_devices,onos_instances)
1852 log.info('Restarting cluster')
1853 cord_test_onos_restart()
1854 time.sleep(60)
1855 master_count = self.get_number_of_devices_of_master()
1856 log.info('Master count information after restart is %s'%master_count)
1857 total_devices = 0
1858 for master in master_count.keys():
1859 total_devices += master_count[master]['size']
1860 assert_equal(total_devices,onos_instances)