blob: ab919a05e51d51aecd8073e1942da33eadcf2bde [file] [log] [blame]
ChetanGaonker2099d722016-10-07 15:16:58 -07001#copyright 2016-present Ciena Corporation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14#
15import unittest
16from nose.tools import *
17from scapy.all import *
A.R Karthickbe7768c2017-03-17 11:39:41 -070018from OnosCtrl import OnosCtrl
ChetanGaonker2099d722016-10-07 15:16:58 -070019from OltConfig import OltConfig
A.R Karthickbe7768c2017-03-17 11:39:41 -070020from CordTestUtils import get_mac, get_controller, get_controllers
ChetanGaonker2099d722016-10-07 15:16:58 -070021from OnosFlowCtrl import OnosFlowCtrl
22from nose.twistedtools import reactor, deferred
23from twisted.internet import defer
24from onosclidriver import OnosCliDriver
25from CordContainer import Container, Onos, Quagga
A.R Karthick2560f042016-11-30 14:38:52 -080026from CordTestServer import cord_test_onos_restart, cord_test_onos_shutdown, cord_test_onos_add_cluster, cord_test_quagga_restart, cord_test_restart_cluster
ChetanGaonker2099d722016-10-07 15:16:58 -070027from portmaps import g_subscriber_port_map
28from scapy.all import *
29import time, monotonic
30import threading
31from threading import current_thread
32from Cluster import *
33from EapTLS import TLSAuthTest
34from ACL import ACLTest
A R Karthick1f908202016-11-16 17:32:20 -080035from OnosLog import OnosLog
36from CordLogger import CordLogger
A.R Karthick99044822017-02-09 14:04:20 -080037from CordTestConfig import setup_module
ChetanGaonker2099d722016-10-07 15:16:58 -070038import os
39import json
40import random
41import collections
42log.setLevel('INFO')
43
A R Karthick1f908202016-11-16 17:32:20 -080044class cluster_exchange(CordLogger):
ChetanGaonker2099d722016-10-07 15:16:58 -070045 test_path = os.path.dirname(os.path.realpath(__file__))
46 onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
47 mac = RandMAC()._fix()
48 flows_eth = Ether(src = RandMAC()._fix(), dst = RandMAC()._fix())
49 igmp_eth = Ether(dst = '01:00:5e:00:00:16', type = ETH_P_IP)
50 igmp_ip = IP(dst = '224.0.0.22')
51 ONOS_INSTANCES = 3
52 V_INF1 = 'veth0'
53 TLS_TIMEOUT = 100
54 device_id = 'of:' + get_mac()
55 igmp = cluster_igmp()
56 igmp_groups = igmp.mcast_ip_range(start_ip = '224.1.8.10',end_ip = '224.1.10.49')
57 igmp_sources = igmp.source_ip_range(start_ip = '38.24.29.35',end_ip='38.24.35.56')
58 tls = cluster_tls()
59 flows = cluster_flows()
60 proxyarp = cluster_proxyarp()
61 vrouter = cluster_vrouter()
62 acl = cluster_acl()
63 dhcprelay = cluster_dhcprelay()
64 subscriber = cluster_subscriber()
A R Karthick3b2e0372016-12-14 17:37:43 -080065 testcaseLoggers = ('test_cluster_controller_restarts', 'test_cluster_graceful_controller_restarts',
66 'test_cluster_single_controller_restarts', 'test_cluster_restarts')
A.R Karthick99044822017-02-09 14:04:20 -080067 ITERATIONS = int(os.getenv('ITERATIONS', 10))
A.R Karthick53d92702017-03-13 10:10:38 -070068 ARCHIVE_PARTITION = False
A R Karthick1f908202016-11-16 17:32:20 -080069
70 def setUp(self):
71 if self._testMethodName not in self.testcaseLoggers:
72 super(cluster_exchange, self).setUp()
73
74 def tearDown(self):
75 if self._testMethodName not in self.testcaseLoggers:
76 super(cluster_exchange, self).tearDown()
ChetanGaonker2099d722016-10-07 15:16:58 -070077
A R Karthick6cc8b812016-12-09 10:24:40 -080078 def cliEnter(self, controller = None):
ChetanGaonker2099d722016-10-07 15:16:58 -070079 retries = 0
A R Karthick6cc8b812016-12-09 10:24:40 -080080 while retries < 30:
81 self.cli = OnosCliDriver(controller = controller, connect = True)
ChetanGaonker2099d722016-10-07 15:16:58 -070082 if self.cli.handle:
83 break
84 else:
85 retries += 1
86 time.sleep(2)
87
88 def cliExit(self):
89 self.cli.disconnect()
90
A R Karthick1f908202016-11-16 17:32:20 -080091 def get_leader(self, controller = None):
92 self.cliEnter(controller = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -080093 try:
94 result = json.loads(self.cli.leaders(jsonFormat = True))
95 except:
96 result = None
97
A R Karthick1f908202016-11-16 17:32:20 -080098 if result is None:
99 log.info('Leaders command failure for controller %s' %controller)
100 else:
101 log.info('Leaders returned: %s' %result)
102 self.cliExit()
103 return result
104
A R Karthick3b2e0372016-12-14 17:37:43 -0800105 def onos_shutdown(self, controller = None):
106 status = True
107 self.cliEnter(controller = controller)
108 try:
109 self.cli.shutdown(timeout = 10)
110 except:
111 log.info('Graceful shutdown of ONOS failed for controller: %s' %controller)
112 status = False
113
114 self.cliExit()
115 return status
116
A R Karthicke14fc022016-12-08 14:50:29 -0800117 def log_set(self, level = None, app = 'org.onosproject', controllers = None):
118 CordLogger.logSet(level = level, app = app, controllers = controllers, forced = True)
A R Karthickef1232d2016-12-07 09:18:15 -0800119
A R Karthick1f908202016-11-16 17:32:20 -0800120 def get_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800121 result_map = {}
122 if controller is None:
A.R Karthickbe7768c2017-03-17 11:39:41 -0700123 controller = get_controller()
A R Karthick1f908202016-11-16 17:32:20 -0800124 if type(controller) in [ list, tuple ]:
125 for c in controller:
126 leaders = self.get_leader(controller = c)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800127 result_map[c] = leaders
A R Karthick1f908202016-11-16 17:32:20 -0800128 else:
129 leaders = self.get_leader(controller = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800130 result_map[controller] = leaders
131 return result_map
A R Karthick1f908202016-11-16 17:32:20 -0800132
A R Karthickec2db322016-11-17 15:06:01 -0800133 def verify_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800134 leaders_map = self.get_leaders(controller = controller)
135 failed = [ k for k,v in leaders_map.items() if v == None ]
A R Karthickec2db322016-11-17 15:06:01 -0800136 return failed
137
ChetanGaonker2099d722016-10-07 15:16:58 -0700138 def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
139 tries = 0
140 try:
141 self.cliEnter(controller = controller)
142 while tries <= 10:
143 cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
144 if cluster_summary:
145 log.info("cluster 'summary' command output is %s"%cluster_summary)
146 nodes = cluster_summary['nodes']
147 if verify:
148 if nodes == onos_instances:
149 self.cliExit()
150 return True
151 else:
152 tries += 1
153 time.sleep(1)
154 else:
155 if nodes >= onos_instances:
156 self.cliExit()
157 return True
158 else:
159 tries += 1
160 time.sleep(1)
161 else:
162 tries += 1
163 time.sleep(1)
164 self.cliExit()
165 return False
166 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700167 raise Exception('Failed to get cluster members')
168 return False
ChetanGaonker2099d722016-10-07 15:16:58 -0700169
A.R Karthick45ab3e12016-11-30 11:25:51 -0800170 def get_cluster_current_member_ips(self, controller = None, nodes_filter = None):
ChetanGaonker2099d722016-10-07 15:16:58 -0700171 tries = 0
172 cluster_ips = []
173 try:
174 self.cliEnter(controller = controller)
175 while tries <= 10:
176 cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
177 if cluster_nodes:
178 log.info("cluster 'nodes' output is %s"%cluster_nodes)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800179 if nodes_filter:
180 cluster_nodes = nodes_filter(cluster_nodes)
ChetanGaonker2099d722016-10-07 15:16:58 -0700181 cluster_ips = map(lambda c: c['id'], cluster_nodes)
182 self.cliExit()
183 cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
184 return cluster_ips
185 else:
186 tries += 1
187 self.cliExit()
188 return cluster_ips
189 except:
190 raise Exception('Failed to get cluster members')
191 return cluster_ips
192
ChetanGaonker689b3862016-10-17 16:25:01 -0700193 def get_cluster_container_names_ips(self,controller=None):
A R Karthick1f908202016-11-16 17:32:20 -0800194 onos_names_ips = {}
A.R Karthickbe7768c2017-03-17 11:39:41 -0700195 controllers = get_controllers()
A R Karthick0f3f25b2016-12-15 09:50:57 -0800196 i = 0
197 for controller in controllers:
198 if i == 0:
199 name = Onos.NAME
200 else:
201 name = '{}-{}'.format(Onos.NAME, i+1)
202 onos_names_ips[controller] = name
203 onos_names_ips[name] = controller
204 i += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700205 return onos_names_ips
A R Karthick0f3f25b2016-12-15 09:50:57 -0800206 # onos_ips = self.get_cluster_current_member_ips(controller=controller)
207 # onos_names_ips[onos_ips[0]] = Onos.NAME
208 # onos_names_ips[Onos.NAME] = onos_ips[0]
209 # for i in range(1,len(onos_ips)):
210 # name = '{0}-{1}'.format(Onos.NAME,i+1)
211 # onos_names_ips[onos_ips[i]] = name
212 # onos_names_ips[name] = onos_ips[i]
213
214 # return onos_names_ips
ChetanGaonker2099d722016-10-07 15:16:58 -0700215
216 #identifying current master of a connected device, not tested
217 def get_cluster_current_master_standbys(self,controller=None,device_id=device_id):
218 master = None
219 standbys = []
220 tries = 0
221 try:
222 cli = self.cliEnter(controller = controller)
223 while tries <= 10:
224 roles = json.loads(self.cli.roles(jsonFormat = True))
225 log.info("cluster 'roles' command output is %s"%roles)
226 if roles:
227 for device in roles:
228 log.info('Verifying device info in line %s'%device)
229 if device['id'] == device_id:
230 master = str(device['master'])
231 standbys = map(lambda d: str(d), device['standbys'])
232 log.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
233 self.cliExit()
234 return master, standbys
235 self.cliExit()
236 return master, standbys
237 else:
238 tries += 1
ChetanGaonker689b3862016-10-17 16:25:01 -0700239 time.sleep(1)
240 self.cliExit()
241 return master,standbys
242 except:
243 raise Exception('Failed to get cluster members')
244 return master,standbys
245
246 def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
247 ''' returns master and standbys of all the connected devices to ONOS cluster instance'''
248 device_dict = {}
249 tries = 0
250 try:
251 cli = self.cliEnter(controller = controller)
252 while tries <= 10:
253 device_dict = {}
254 roles = json.loads(self.cli.roles(jsonFormat = True))
255 log.info("cluster 'roles' command output is %s"%roles)
256 if roles:
257 for device in roles:
258 device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
259 for i in range(len(device_dict[device['id']]['standbys'])):
260 device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
261 log.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
262 self.cliExit()
263 return device_dict
264 else:
265 tries += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700266 time.sleep(1)
267 self.cliExit()
ChetanGaonker689b3862016-10-17 16:25:01 -0700268 return device_dict
269 except:
270 raise Exception('Failed to get cluster members')
271 return device_dict
272
273 #identify current master of a connected device, not tested
274 def get_cluster_connected_devices(self,controller=None):
275 '''returns all the devices connected to ONOS cluster'''
276 device_list = []
277 tries = 0
278 try:
279 cli = self.cliEnter(controller = controller)
280 while tries <= 10:
281 device_list = []
282 devices = json.loads(self.cli.devices(jsonFormat = True))
283 log.info("cluster 'devices' command output is %s"%devices)
284 if devices:
285 for device in devices:
286 log.info('device id is %s'%device['id'])
287 device_list.append(str(device['id']))
288 self.cliExit()
289 return device_list
290 else:
291 tries += 1
292 time.sleep(1)
293 self.cliExit()
294 return device_list
295 except:
296 raise Exception('Failed to get cluster members')
297 return device_list
298
299 def get_number_of_devices_of_master(self,controller=None):
300 '''returns master-device pairs, which master having what devices'''
301 master_count = {}
302 try:
303 cli = self.cliEnter(controller = controller)
304 masters = json.loads(self.cli.masters(jsonFormat = True))
305 if masters:
306 for master in masters:
307 master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
308 return master_count
309 else:
310 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700311 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700312 raise Exception('Failed to get cluster members')
313 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700314
315 def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None):
316 if new_master is None: return False
ChetanGaonker689b3862016-10-17 16:25:01 -0700317 self.cliEnter(controller=controller)
ChetanGaonker2099d722016-10-07 15:16:58 -0700318 cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
319 command = self.cli.command(cmd = cmd, jsonFormat = False)
320 self.cliExit()
321 time.sleep(60)
322 master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
323 assert_equal(master,new_master)
324 log.info('Cluster master changed to %s successfully'%new_master)
325
ChetanGaonker689b3862016-10-17 16:25:01 -0700326 def withdraw_cluster_current_mastership(self,master_ip=None,device_id=device_id,controller=None):
327 '''current master looses its mastership and hence new master will be elected'''
328 self.cliEnter(controller=controller)
329 cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
330 command = self.cli.command(cmd = cmd, jsonFormat = False)
331 self.cliExit()
332 time.sleep(60)
333 new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
334 assert_not_equal(new_master_ip,master_ip)
335 log.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
336 log.info('Cluster new master is %s'%new_master_ip)
337 return True
338
A R Karthick3b2e0372016-12-14 17:37:43 -0800339 def cluster_controller_restarts(self, graceful = False):
A.R Karthickbe7768c2017-03-17 11:39:41 -0700340 controllers = get_controllers()
A R Karthick1f908202016-11-16 17:32:20 -0800341 ctlr_len = len(controllers)
342 if ctlr_len <= 1:
343 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
344 assert_greater(ctlr_len, 1)
345
346 #this call would verify the cluster for once
347 onos_map = self.get_cluster_container_names_ips()
348
A R Karthick2a70a2f2016-12-16 14:40:16 -0800349 def check_exception(iteration, controller = None):
A R Karthick1f908202016-11-16 17:32:20 -0800350 adjacent_controller = None
351 adjacent_controllers = None
352 if controller:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800353 adjacent_controllers = list(set(controllers) - set([controller]))
354 adjacent_controller = adjacent_controllers[0]
A R Karthick1f908202016-11-16 17:32:20 -0800355 for node in controllers:
356 onosLog = OnosLog(host = node)
357 ##check the logs for storage exception
358 _, output = onosLog.get_log(('ERROR', 'Exception',))
A R Karthickec2db322016-11-17 15:06:01 -0800359 if output and output.find('StorageException$Timeout') >= 0:
360 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
361 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
362 log.info('\n' + '-' * 50 + '\n')
A R Karthick1f908202016-11-16 17:32:20 -0800363 log.info('%s' %output)
A R Karthickec2db322016-11-17 15:06:01 -0800364 log.info('\n' + '-' * 50 + '\n')
365 failed = self.verify_leaders(controllers)
366 if failed:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800367 log.info('Leaders command failed on nodes: %s' %failed)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800368 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800369 CordLogger.archive_results(self._testMethodName,
370 controllers = controllers,
A.R Karthick53d92702017-03-13 10:10:38 -0700371 iteration = 'FAILED',
372 archive_partition = self.ARCHIVE_PARTITION)
A R Karthickec2db322016-11-17 15:06:01 -0800373 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800374 return controller
375
376 try:
A R Karthickec2db322016-11-17 15:06:01 -0800377 ips = self.get_cluster_current_member_ips(controller = adjacent_controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800378 log.info('ONOS cluster formed with controllers: %s' %ips)
A R Karthick1f908202016-11-16 17:32:20 -0800379 st = True
380 except:
381 st = False
382
A R Karthickec2db322016-11-17 15:06:01 -0800383 failed = self.verify_leaders(controllers)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800384 if failed:
385 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick3396ec42017-01-11 17:12:13 -0800386 CordLogger.archive_results(self._testMethodName,
387 controllers = controllers,
A.R Karthick53d92702017-03-13 10:10:38 -0700388 iteration = 'FAILED',
389 archive_partition = self.ARCHIVE_PARTITION)
A R Karthick1f908202016-11-16 17:32:20 -0800390 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800391 if st is False:
392 log.info('No storage exception and ONOS cluster was not formed successfully')
393 else:
394 controller = None
395
396 return controller
397
398 next_controller = None
A.R Karthick99044822017-02-09 14:04:20 -0800399 tries = self.ITERATIONS
A R Karthick1f908202016-11-16 17:32:20 -0800400 for num in range(tries):
401 index = num % ctlr_len
402 #index = random.randrange(0, ctlr_len)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800403 controller_name = onos_map[controllers[index]] if next_controller is None else onos_map[next_controller]
404 controller = onos_map[controller_name]
405 log.info('ITERATION: %d. Restarting Controller %s' %(num + 1, controller_name))
A R Karthick1f908202016-11-16 17:32:20 -0800406 try:
A R Karthickef1232d2016-12-07 09:18:15 -0800407 #enable debug log for the other controllers before restarting this controller
A R Karthicke14fc022016-12-08 14:50:29 -0800408 adjacent_controllers = list( set(controllers) - set([controller]) )
409 self.log_set(controllers = adjacent_controllers)
410 self.log_set(app = 'io.atomix', controllers = adjacent_controllers)
A R Karthick3b2e0372016-12-14 17:37:43 -0800411 if graceful is True:
A R Karthick0f3f25b2016-12-15 09:50:57 -0800412 log.info('Gracefully shutting down controller: %s' %controller)
A R Karthick3b2e0372016-12-14 17:37:43 -0800413 self.onos_shutdown(controller)
414 cord_test_onos_restart(node = controller, timeout = 0)
A R Karthicke14fc022016-12-08 14:50:29 -0800415 self.log_set(controllers = controller)
416 self.log_set(app = 'io.atomix', controllers = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800417 time.sleep(60)
A R Karthick1f908202016-11-16 17:32:20 -0800418 except:
419 time.sleep(5)
420 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800421
422 #first archive the test case logs for this run
A R Karthick3b2e0372016-12-14 17:37:43 -0800423 CordLogger.archive_results(self._testMethodName,
A R Karthicke8935c62016-12-08 18:17:17 -0800424 controllers = controllers,
A.R Karthick53d92702017-03-13 10:10:38 -0700425 iteration = 'iteration_{}'.format(num+1),
426 archive_partition = self.ARCHIVE_PARTITION)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800427 next_controller = check_exception(num, controller = controller)
A R Karthick1f908202016-11-16 17:32:20 -0800428
A R Karthick3b2e0372016-12-14 17:37:43 -0800429 def test_cluster_controller_restarts(self):
430 '''Test the cluster by repeatedly killing the controllers'''
431 self.cluster_controller_restarts()
432
433 def test_cluster_graceful_controller_restarts(self):
434 '''Test the cluster by repeatedly restarting the controllers gracefully'''
435 self.cluster_controller_restarts(graceful = True)
436
A.R Karthick45ab3e12016-11-30 11:25:51 -0800437 def test_cluster_single_controller_restarts(self):
438 '''Test the cluster by repeatedly restarting the same controller'''
A.R Karthickbe7768c2017-03-17 11:39:41 -0700439 controllers = get_controllers()
A.R Karthick45ab3e12016-11-30 11:25:51 -0800440 ctlr_len = len(controllers)
441 if ctlr_len <= 1:
442 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
443 assert_greater(ctlr_len, 1)
444
445 #this call would verify the cluster for once
446 onos_map = self.get_cluster_container_names_ips()
447
A R Karthick2a70a2f2016-12-16 14:40:16 -0800448 def check_exception(iteration, controller, inclusive = False):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800449 adjacent_controllers = list(set(controllers) - set([controller]))
450 adjacent_controller = adjacent_controllers[0]
451 controller_list = adjacent_controllers if inclusive == False else controllers
452 storage_exceptions = []
453 for node in controller_list:
454 onosLog = OnosLog(host = node)
455 ##check the logs for storage exception
456 _, output = onosLog.get_log(('ERROR', 'Exception',))
457 if output and output.find('StorageException$Timeout') >= 0:
458 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
459 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
460 log.info('\n' + '-' * 50 + '\n')
461 log.info('%s' %output)
462 log.info('\n' + '-' * 50 + '\n')
463 storage_exceptions.append(node)
464
465 failed = self.verify_leaders(controller_list)
466 if failed:
467 log.info('Leaders command failed on nodes: %s' %failed)
468 if storage_exceptions:
469 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800470 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800471 CordLogger.archive_results('test_cluster_single_controller_restarts',
472 controllers = controllers,
A.R Karthick53d92702017-03-13 10:10:38 -0700473 iteration = 'FAILED',
474 archive_partition = self.ARCHIVE_PARTITION)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800475 assert_equal(len(failed), 0)
476 return controller
477
478 for ctlr in controller_list:
479 ips = self.get_cluster_current_member_ips(controller = ctlr,
480 nodes_filter = \
481 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
482 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
483 if controller in ips and inclusive is False:
484 log.info('Controller %s still ACTIVE on Node %s after it was shutdown' %(controller, ctlr))
485 if controller not in ips and inclusive is True:
A R Karthick6cc8b812016-12-09 10:24:40 -0800486 log.info('Controller %s still INACTIVE on Node %s after it was restarted' %(controller, ctlr))
A.R Karthick45ab3e12016-11-30 11:25:51 -0800487
488 return controller
489
A.R Karthick99044822017-02-09 14:04:20 -0800490 tries = self.ITERATIONS
A.R Karthick45ab3e12016-11-30 11:25:51 -0800491 #chose a random controller for shutdown/restarts
492 controller = controllers[random.randrange(0, ctlr_len)]
493 controller_name = onos_map[controller]
A R Karthick6cc8b812016-12-09 10:24:40 -0800494 ##enable the log level for the controllers
495 self.log_set(controllers = controllers)
496 self.log_set(app = 'io.atomix', controllers = controllers)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800497 for num in range(tries):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800498 log.info('ITERATION: %d. Shutting down Controller %s' %(num + 1, controller_name))
499 try:
A R Karthick3b2e0372016-12-14 17:37:43 -0800500 cord_test_onos_shutdown(node = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800501 time.sleep(20)
502 except:
503 time.sleep(5)
504 continue
505 #check for exceptions on the adjacent nodes
A R Karthick2a70a2f2016-12-16 14:40:16 -0800506 check_exception(num, controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800507 #Now restart the controller back
508 log.info('Restarting back the controller %s' %controller_name)
A R Karthick3b2e0372016-12-14 17:37:43 -0800509 cord_test_onos_restart(node = controller)
A R Karthick6cc8b812016-12-09 10:24:40 -0800510 self.log_set(controllers = controller)
511 self.log_set(app = 'io.atomix', controllers = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800512 time.sleep(60)
A R Karthicke8935c62016-12-08 18:17:17 -0800513 #archive the logs for this run
514 CordLogger.archive_results('test_cluster_single_controller_restarts',
515 controllers = controllers,
A.R Karthick53d92702017-03-13 10:10:38 -0700516 iteration = 'iteration_{}'.format(num+1),
517 archive_partition = self.ARCHIVE_PARTITION)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800518 check_exception(num, controller, inclusive = True)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800519
A.R Karthick2560f042016-11-30 14:38:52 -0800520 def test_cluster_restarts(self):
521 '''Test the cluster by repeatedly restarting the entire cluster'''
A.R Karthickbe7768c2017-03-17 11:39:41 -0700522 controllers = get_controllers()
A.R Karthick2560f042016-11-30 14:38:52 -0800523 ctlr_len = len(controllers)
524 if ctlr_len <= 1:
525 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
526 assert_greater(ctlr_len, 1)
527
528 #this call would verify the cluster for once
529 onos_map = self.get_cluster_container_names_ips()
530
A R Karthick2a70a2f2016-12-16 14:40:16 -0800531 def check_exception(iteration):
A.R Karthick2560f042016-11-30 14:38:52 -0800532 controller_list = controllers
533 storage_exceptions = []
534 for node in controller_list:
535 onosLog = OnosLog(host = node)
536 ##check the logs for storage exception
537 _, output = onosLog.get_log(('ERROR', 'Exception',))
538 if output and output.find('StorageException$Timeout') >= 0:
539 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
540 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
541 log.info('\n' + '-' * 50 + '\n')
542 log.info('%s' %output)
543 log.info('\n' + '-' * 50 + '\n')
544 storage_exceptions.append(node)
545
546 failed = self.verify_leaders(controller_list)
547 if failed:
548 log.info('Leaders command failed on nodes: %s' %failed)
549 if storage_exceptions:
550 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800551 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800552 CordLogger.archive_results('test_cluster_restarts',
553 controllers = controllers,
A.R Karthick53d92702017-03-13 10:10:38 -0700554 iteration = 'FAILED',
555 archive_partition = self.ARCHIVE_PARTITION)
A.R Karthick2560f042016-11-30 14:38:52 -0800556 assert_equal(len(failed), 0)
557 return
558
559 for ctlr in controller_list:
560 ips = self.get_cluster_current_member_ips(controller = ctlr,
561 nodes_filter = \
562 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
563 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
A R Karthick2a70a2f2016-12-16 14:40:16 -0800564 if len(ips) != len(controllers):
565 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800566 CordLogger.archive_results('test_cluster_restarts',
567 controllers = controllers,
A.R Karthick53d92702017-03-13 10:10:38 -0700568 iteration = 'FAILED',
569 archive_partition = self.ARCHIVE_PARTITION)
A.R Karthick2560f042016-11-30 14:38:52 -0800570 assert_equal(len(ips), len(controllers))
571
A.R Karthick99044822017-02-09 14:04:20 -0800572 tries = self.ITERATIONS
A.R Karthick2560f042016-11-30 14:38:52 -0800573 for num in range(tries):
574 log.info('ITERATION: %d. Restarting cluster with controllers at %s' %(num+1, controllers))
575 try:
576 cord_test_restart_cluster()
A R Karthick6cc8b812016-12-09 10:24:40 -0800577 self.log_set(controllers = controllers)
578 self.log_set(app = 'io.atomix', controllers = controllers)
A.R Karthick2560f042016-11-30 14:38:52 -0800579 log.info('Delaying before verifying cluster status')
580 time.sleep(60)
581 except:
582 time.sleep(10)
583 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800584
585 #archive the logs for this run before verification
586 CordLogger.archive_results('test_cluster_restarts',
587 controllers = controllers,
A.R Karthick53d92702017-03-13 10:10:38 -0700588 iteration = 'iteration_{}'.format(num+1),
589 archive_partition = self.ARCHIVE_PARTITION)
A.R Karthick2560f042016-11-30 14:38:52 -0800590 #check for exceptions on the adjacent nodes
A R Karthick2a70a2f2016-12-16 14:40:16 -0800591 check_exception(num)
A.R Karthick2560f042016-11-30 14:38:52 -0800592
ChetanGaonker2099d722016-10-07 15:16:58 -0700593 #pass
ChetanGaonker689b3862016-10-17 16:25:01 -0700594 def test_cluster_formation_and_verification(self,onos_instances = ONOS_INSTANCES):
595 status = self.verify_cluster_status(onos_instances = onos_instances)
596 assert_equal(status, True)
597 log.info('Cluster exists with %d ONOS instances'%onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -0700598
599 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700600 def test_cluster_adding_members(self, add = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700601 status = self.verify_cluster_status(onos_instances = onos_instances)
602 assert_equal(status, True)
603 onos_ips = self.get_cluster_current_member_ips()
604 onos_instances = len(onos_ips)+add
605 log.info('Adding %d nodes to the ONOS cluster' %add)
606 cord_test_onos_add_cluster(count = add)
607 status = self.verify_cluster_status(onos_instances=onos_instances)
608 assert_equal(status, True)
609
ChetanGaonker689b3862016-10-17 16:25:01 -0700610 def test_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700611 status = self.verify_cluster_status(onos_instances = onos_instances)
612 assert_equal(status, True)
613 master, standbys = self.get_cluster_current_master_standbys()
614 assert_equal(len(standbys),(onos_instances-1))
615 onos_names_ips = self.get_cluster_container_names_ips()
616 master_onos_name = onos_names_ips[master]
617 log.info('Removing cluster current master %s'%(master))
A R Karthick3b2e0372016-12-14 17:37:43 -0800618 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700619 time.sleep(60)
620 onos_instances -= 1
621 status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0])
622 assert_equal(status, True)
623 new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
624 assert_not_equal(master,new_master)
ChetanGaonker689b3862016-10-17 16:25:01 -0700625 log.info('Successfully removed clusters master instance')
ChetanGaonker2099d722016-10-07 15:16:58 -0700626
ChetanGaonker689b3862016-10-17 16:25:01 -0700627 def test_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700628 status = self.verify_cluster_status(onos_instances = onos_instances)
629 assert_equal(status, True)
630 master, standbys = self.get_cluster_current_master_standbys()
631 assert_equal(len(standbys),(onos_instances-1))
632 onos_names_ips = self.get_cluster_container_names_ips()
633 member_onos_name = onos_names_ips[standbys[0]]
634 log.info('Removing cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800635 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700636 time.sleep(60)
637 onos_instances -= 1
638 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
639 assert_equal(status, True)
640
ChetanGaonker689b3862016-10-17 16:25:01 -0700641 def test_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700642 status = self.verify_cluster_status(onos_instances = onos_instances)
643 assert_equal(status, True)
644 master, standbys = self.get_cluster_current_master_standbys()
645 assert_equal(len(standbys),(onos_instances-1))
646 onos_names_ips = self.get_cluster_container_names_ips()
647 member1_onos_name = onos_names_ips[standbys[0]]
648 member2_onos_name = onos_names_ips[standbys[1]]
649 log.info('Removing cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800650 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700651 log.info('Removing cluster member %s'%standbys[1])
A R Karthick3b2e0372016-12-14 17:37:43 -0800652 cord_test_onos_shutdown(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700653 time.sleep(60)
654 onos_instances = onos_instances - 2
655 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
656 assert_equal(status, True)
657
ChetanGaonker689b3862016-10-17 16:25:01 -0700658 def test_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700659 status = self.verify_cluster_status(onos_instances = onos_instances)
660 assert_equal(status, True)
661 master, standbys = self.get_cluster_current_master_standbys()
662 assert_equal(len(standbys),(onos_instances-1))
663 onos_names_ips = self.get_cluster_container_names_ips()
664 for i in range(remove):
665 member_onos_name = onos_names_ips[standbys[i]]
666 log.info('Removing onos container with name %s'%standbys[i])
A R Karthick3b2e0372016-12-14 17:37:43 -0800667 cord_test_onos_shutdown(node = standbys[i])
ChetanGaonker2099d722016-10-07 15:16:58 -0700668 time.sleep(60)
669 onos_instances = onos_instances - remove
670 status = self.verify_cluster_status(onos_instances = onos_instances, controller=master)
671 assert_equal(status, True)
672
673 #nottest test cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700674 def test_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES , add = 2, remove = 2):
ChetanGaonker2099d722016-10-07 15:16:58 -0700675 status = self.verify_cluster_status(onos_instances = onos_instances)
676 assert_equal(status, True)
677 onos_ips = self.get_cluster_current_member_ips()
678 onos_instances = len(onos_ips)+add
679 log.info('Adding %d ONOS instances to the cluster'%add)
680 cord_test_onos_add_cluster(count = add)
681 status = self.verify_cluster_status(onos_instances=onos_instances)
682 assert_equal(status, True)
683 log.info('Removing %d ONOS instances from the cluster'%remove)
684 for i in range(remove):
685 name = '{}-{}'.format(Onos.NAME, onos_instances - i)
686 log.info('Removing onos container with name %s'%name)
687 cord_test_onos_shutdown(node = name)
688 time.sleep(60)
689 onos_instances = onos_instances-remove
690 status = self.verify_cluster_status(onos_instances=onos_instances)
691 assert_equal(status, True)
692
693 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700694 def test_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
ChetanGaonker2099d722016-10-07 15:16:58 -0700695 status = self.verify_cluster_status(onos_instances = onos_instances)
696 assert_equal(status, True)
697 onos_ips = self.get_cluster_current_member_ips()
698 onos_instances = onos_instances-remove
699 log.info('Removing %d ONOS instances from the cluster'%remove)
700 for i in range(remove):
701 name = '{}-{}'.format(Onos.NAME, len(onos_ips)-i)
702 log.info('Removing onos container with name %s'%name)
703 cord_test_onos_shutdown(node = name)
704 time.sleep(60)
705 status = self.verify_cluster_status(onos_instances=onos_instances)
706 assert_equal(status, True)
707 log.info('Adding %d ONOS instances to the cluster'%add)
708 cord_test_onos_add_cluster(count = add)
709 onos_instances = onos_instances+add
710 status = self.verify_cluster_status(onos_instances=onos_instances)
711 assert_equal(status, True)
712
ChetanGaonker689b3862016-10-17 16:25:01 -0700713 def test_cluster_restart(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700714 status = self.verify_cluster_status(onos_instances = onos_instances)
715 assert_equal(status, True)
716 log.info('Restarting cluster')
717 cord_test_onos_restart()
718 status = self.verify_cluster_status(onos_instances = onos_instances)
719 assert_equal(status, True)
720
ChetanGaonker689b3862016-10-17 16:25:01 -0700721 def test_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700722 status = self.verify_cluster_status(onos_instances = onos_instances)
723 assert_equal(status, True)
724 master, standbys = self.get_cluster_current_master_standbys()
725 onos_names_ips = self.get_cluster_container_names_ips()
726 master_onos_name = onos_names_ips[master]
727 log.info('Restarting cluster master %s'%master)
A R Karthick3b2e0372016-12-14 17:37:43 -0800728 cord_test_onos_restart(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700729 status = self.verify_cluster_status(onos_instances = onos_instances)
730 assert_equal(status, True)
731 log.info('Cluster came up after master restart as expected')
732
733 #test fail. master changing after restart. Need to check correct behavior.
ChetanGaonker689b3862016-10-17 16:25:01 -0700734 def test_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700735 status = self.verify_cluster_status(onos_instances = onos_instances)
736 assert_equal(status, True)
737 master1, standbys = self.get_cluster_current_master_standbys()
738 onos_names_ips = self.get_cluster_container_names_ips()
739 master_onos_name = onos_names_ips[master1]
A R Karthick3b2e0372016-12-14 17:37:43 -0800740 log.info('Restarting cluster master %s'%master1)
741 cord_test_onos_restart(node = master1)
ChetanGaonker2099d722016-10-07 15:16:58 -0700742 status = self.verify_cluster_status(onos_instances = onos_instances)
743 assert_equal(status, True)
744 master2, standbys = self.get_cluster_current_master_standbys()
745 assert_equal(master1,master2)
746 log.info('Cluster master is same before and after cluster master restart as expected')
747
ChetanGaonker689b3862016-10-17 16:25:01 -0700748 def test_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700749 status = self.verify_cluster_status(onos_instances = onos_instances)
750 assert_equal(status, True)
751 master, standbys = self.get_cluster_current_master_standbys()
752 assert_equal(len(standbys),(onos_instances-1))
753 onos_names_ips = self.get_cluster_container_names_ips()
754 member_onos_name = onos_names_ips[standbys[0]]
755 log.info('Restarting cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800756 cord_test_onos_restart(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700757 status = self.verify_cluster_status(onos_instances = onos_instances)
758 assert_equal(status, True)
759 log.info('Cluster came up as expected after restarting one member')
760
ChetanGaonker689b3862016-10-17 16:25:01 -0700761 def test_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700762 status = self.verify_cluster_status(onos_instances = onos_instances)
763 assert_equal(status, True)
764 master, standbys = self.get_cluster_current_master_standbys()
765 assert_equal(len(standbys),(onos_instances-1))
766 onos_names_ips = self.get_cluster_container_names_ips()
767 member1_onos_name = onos_names_ips[standbys[0]]
768 member2_onos_name = onos_names_ips[standbys[1]]
769 log.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1]))
A R Karthick3b2e0372016-12-14 17:37:43 -0800770 cord_test_onos_restart(node = standbys[0])
771 cord_test_onos_restart(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700772 status = self.verify_cluster_status(onos_instances = onos_instances)
773 assert_equal(status, True)
774 log.info('Cluster came up as expected after restarting two members')
775
ChetanGaonker689b3862016-10-17 16:25:01 -0700776 def test_cluster_state_with_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700777 status = self.verify_cluster_status(onos_instances = onos_instances)
778 assert_equal(status,True)
779 master, standbys = self.get_cluster_current_master_standbys()
780 assert_equal(len(standbys),(onos_instances-1))
781 onos_names_ips = self.get_cluster_container_names_ips()
782 for i in range(members):
783 member_onos_name = onos_names_ips[standbys[i]]
784 log.info('Restarting cluster member %s'%standbys[i])
A R Karthick3b2e0372016-12-14 17:37:43 -0800785 cord_test_onos_restart(node = standbys[i])
ChetanGaonker2099d722016-10-07 15:16:58 -0700786
787 status = self.verify_cluster_status(onos_instances = onos_instances)
788 assert_equal(status, True)
789 log.info('Cluster came up as expected after restarting %d members'%members)
790
ChetanGaonker689b3862016-10-17 16:25:01 -0700791 def test_cluster_state_with_master_change(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700792 status = self.verify_cluster_status(onos_instances=onos_instances)
793 assert_equal(status, True)
794 master, standbys = self.get_cluster_current_master_standbys()
795 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -0700796 log.info('Cluster current master of devices is %s'%master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700797 self.change_master_current_cluster(new_master=standbys[0])
798 log.info('Cluster master changed successfully')
799
800 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700801 def test_cluster_with_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700802 status = self.verify_cluster_status(onos_instances = onos_instances)
803 assert_equal(status, True)
804 onos_ips = self.get_cluster_current_member_ips()
805 self.vrouter.setUpClass()
806 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
807 assert_equal(res, True)
808 for onos_ip in onos_ips:
809 tries = 0
810 flag = False
811 try:
812 self.cliEnter(controller = onos_ip)
813 while tries <= 5:
814 routes = json.loads(self.cli.routes(jsonFormat = True))
815 if routes:
816 assert_equal(len(routes['routes4']), networks)
817 self.cliExit()
818 flag = True
819 break
820 else:
821 tries += 1
822 time.sleep(1)
823 assert_equal(flag, True)
824 except:
825 log.info('Exception occured while checking routes in onos instance %s'%onos_ip)
826 raise
827
828 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700829 def test_cluster_with_vrouter_and_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700830 status = self.verify_cluster_status(onos_instances = onos_instances)
831 assert_equal(status, True)
832 onos_ips = self.get_cluster_current_member_ips()
833 master, standbys = self.get_cluster_current_master_standbys()
834 onos_names_ips = self.get_cluster_container_names_ips()
835 master_onos_name = onos_names_ips[master]
836 self.vrouter.setUpClass()
837 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
838 assert_equal(res,True)
A R Karthick3b2e0372016-12-14 17:37:43 -0800839 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700840 time.sleep(60)
ChetanGaonker689b3862016-10-17 16:25:01 -0700841 log.info('Verifying vrouter traffic after cluster master is down')
ChetanGaonker2099d722016-10-07 15:16:58 -0700842 self.vrouter.vrouter_traffic_verify()
843
844 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700845 def test_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700846 status = self.verify_cluster_status(onos_instances = onos_instances)
847 assert_equal(status, True)
848 onos_ips = self.get_cluster_current_member_ips()
849 master, standbys = self.get_cluster_current_master_standbys()
850 onos_names_ips = self.get_cluster_container_names_ips()
851 master_onos_name = onos_names_ips[master]
852 self.vrouter.setUpClass()
853 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
854 assert_equal(res, True)
855 cord_test_onos_restart()
856 self.vrouter.vrouter_traffic_verify()
857
858 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700859 def test_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700860 status = self.verify_cluster_status(onos_instances = onos_instances)
861 assert_equal(status, True)
862 self.vrouter.setUpClass()
863 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
864 assert_equal(res, True)
865 self.vrouter.vrouter_activate(deactivate=True)
866 time.sleep(15)
867 self.vrouter.vrouter_traffic_verify(positive_test=False)
868 self.vrouter.vrouter_activate(deactivate=False)
869
870 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700871 def test_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700872 status = self.verify_cluster_status(onos_instances = onos_instances)
873 assert_equal(status, True)
874 master, standbys = self.get_cluster_current_master_standbys()
875 onos_names_ips = self.get_cluster_container_names_ips()
876 master_onos_name = onos_names_ips[master]
877 self.vrouter.setUpClass()
878 log.info('Verifying vrouter before master down')
879 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
880 assert_equal(res, True)
881 self.vrouter.vrouter_activate(deactivate=True)
882 log.info('Verifying vrouter traffic after app deactivated')
883 time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down
884 self.vrouter.vrouter_traffic_verify(positive_test=False)
885 log.info('Verifying vrouter traffic after master down')
A R Karthick3b2e0372016-12-14 17:37:43 -0800886 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700887 time.sleep(60)
888 self.vrouter.vrouter_traffic_verify(positive_test=False)
889 self.vrouter.vrouter_activate(deactivate=False)
890
891 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700892 def test_cluster_for_vrouter_app_and_making_member_down(self, networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700893 status = self.verify_cluster_status(onos_instances = onos_instances)
894 assert_equal(status, True)
895 master, standbys = self.get_cluster_current_master_standbys()
896 onos_names_ips = self.get_cluster_container_names_ips()
897 member_onos_name = onos_names_ips[standbys[0]]
898 self.vrouter.setUpClass()
899 log.info('Verifying vrouter before cluster member down')
900 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
901 assert_equal(res, True) # Expecting vrouter should work properly
902 log.info('Verifying vrouter after cluster member down')
A R Karthick3b2e0372016-12-14 17:37:43 -0800903 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700904 time.sleep(60)
905 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
906
907 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700908 def test_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700909 status = self.verify_cluster_status(onos_instances = onos_instances)
910 assert_equal(status, True)
911 master, standbys = self.get_cluster_current_master_standbys()
912 onos_names_ips = self.get_cluster_container_names_ips()
913 member_onos_name = onos_names_ips[standbys[1]]
914 self.vrouter.setUpClass()
915 log.info('Verifying vrouter traffic before cluster member restart')
916 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
917 assert_equal(res, True) # Expecting vrouter should work properly
A R Karthick3b2e0372016-12-14 17:37:43 -0800918 cord_test_onos_restart(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700919 log.info('Verifying vrouter traffic after cluster member restart')
920 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
921
922 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700923 def test_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700924 status = self.verify_cluster_status(onos_instances = onos_instances)
925 assert_equal(status, True)
926 self.vrouter.setUpClass()
927 log.info('Verifying vrouter traffic before cluster restart')
928 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
929 assert_equal(res, True) # Expecting vrouter should work properly
930 cord_test_onos_restart()
931 log.info('Verifying vrouter traffic after cluster restart')
932 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
933
934
935 #test fails because flow state is in pending_add in onos
ChetanGaonker689b3862016-10-17 16:25:01 -0700936 def test_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700937 status = self.verify_cluster_status(onos_instances = onos_instances)
938 assert_equal(status, True)
939 master, standbys = self.get_cluster_current_master_standbys()
940 onos_names_ips = self.get_cluster_container_names_ips()
941 master_onos_name = onos_names_ips[master]
942 self.flows.setUpClass()
943 egress = 1
944 ingress = 2
945 egress_map = { 'ip': '192.168.30.1', 'udp_port': 9500 }
946 ingress_map = { 'ip': '192.168.40.1', 'udp_port': 9000 }
947 flow = OnosFlowCtrl(deviceId = self.device_id,
948 egressPort = egress,
949 ingressPort = ingress,
950 udpSrc = ingress_map['udp_port'],
951 udpDst = egress_map['udp_port'],
952 controller=master
953 )
954 result = flow.addFlow()
955 assert_equal(result, True)
956 time.sleep(1)
957 self.success = False
958 def mac_recv_task():
959 def recv_cb(pkt):
960 log.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
961 self.success = True
962 sniff(timeout=2,
963 lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
964 and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.flows.port_map[egress])
965
966 for i in [0,1]:
967 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -0800968 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700969 log.info('Verifying flows traffic after master killed')
970 time.sleep(45)
971 else:
972 log.info('Verifying flows traffic before master killed')
973 t = threading.Thread(target = mac_recv_task)
974 t.start()
975 L2 = self.flows_eth #Ether(src = ingress_map['ether'], dst = egress_map['ether'])
976 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
977 L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
978 pkt = L2/L3/L4
979 log.info('Sending packets to verify if flows are correct')
980 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
981 t.join()
982 assert_equal(self.success, True)
983
ChetanGaonker689b3862016-10-17 16:25:01 -0700984 def test_cluster_state_changing_master_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700985 status = self.verify_cluster_status(onos_instances=onos_instances)
986 assert_equal(status, True)
987 master, standbys = self.get_cluster_current_master_standbys()
988 self.flows.setUpClass()
989 egress = 1
990 ingress = 2
991 egress_map = { 'ip': '192.168.30.1' }
992 ingress_map = { 'ip': '192.168.40.1' }
993 flow = OnosFlowCtrl(deviceId = self.device_id,
994 egressPort = egress,
995 ingressPort = ingress,
996 ecn = 1,
997 controller=master
998 )
999 result = flow.addFlow()
1000 assert_equal(result, True)
1001 ##wait for flows to be added to ONOS
1002 time.sleep(1)
1003 self.success = False
1004 def mac_recv_task():
1005 def recv_cb(pkt):
1006 log.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
1007 self.success = True
1008 sniff(count=2, timeout=5,
1009 lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
1010 and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb,
1011 iface = self.flows.port_map[egress])
1012 for i in [0,1]:
1013 if i == 1:
1014 log.info('Changing cluster master to %s'%standbys[0])
1015 self.change_master_current_cluster(new_master=standbys[0])
1016 log.info('Verifying flow traffic after cluster master chnaged')
1017 else:
1018 log.info('Verifying flow traffic before cluster master changed')
1019 t = threading.Thread(target = mac_recv_task)
1020 t.start()
1021 L2 = self.flows_eth # Ether(src = ingress_map['ether'], dst = egress_map['ether'])
1022 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1)
1023 pkt = L2/L3
1024 log.info('Sending a packet to verify if flows are correct')
1025 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
1026 t.join()
1027 assert_equal(self.success, True)
1028
ChetanGaonker689b3862016-10-17 16:25:01 -07001029 #pass
1030 def test_cluster_flow_for_ipv6_extension_header_and_master_restart(self,onos_instances = ONOS_INSTANCES):
1031 status = self.verify_cluster_status(onos_instances=onos_instances)
1032 assert_equal(status, True)
1033 master,standbys = self.get_cluster_current_master_standbys()
1034 onos_names_ips = self.get_cluster_container_names_ips()
1035 master_onos_name = onos_names_ips[master]
1036 self.flows.setUpClass()
1037 egress = 1
1038 ingress = 2
1039 egress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
1040 ingress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
1041 flow = OnosFlowCtrl(deviceId = self.device_id,
1042 egressPort = egress,
1043 ingressPort = ingress,
1044 ipv6_extension = 0,
1045 controller=master
1046 )
1047
1048 result = flow.addFlow()
1049 assert_equal(result, True)
1050 ##wait for flows to be added to ONOS
1051 time.sleep(1)
1052 self.success = False
1053 def mac_recv_task():
1054 def recv_cb(pkt):
1055 log.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s'%(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
1056 self.success = True
1057 sniff(timeout=2,count=5,
1058 lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.flows.port_map[egress])
1059 for i in [0,1]:
1060 if i == 1:
1061 log.info('Restart cluster current master %s'%master)
1062 Container(master_onos_name,Onos.IMAGE).restart()
1063 time.sleep(45)
1064 log.info('Verifying flow traffic after master restart')
1065 else:
1066 log.info('Verifying flow traffic before master restart')
1067 t = threading.Thread(target = mac_recv_task)
1068 t.start()
1069 L2 = self.flows_eth
1070 L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
1071 pkt = L2/L3
1072 log.info('Sending packets to verify if flows are correct')
1073 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
1074 t.join()
1075 assert_equal(self.success, True)
1076
1077 def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
1078 dst_mac = self.igmp.iptomac(group)
1079 eth = Ether(dst= dst_mac)
1080 ip = IP(dst=group,src=source)
1081 data = repr(monotonic.monotonic())
1082 sendp(eth/ip/data,count=20, iface = intf)
1083 pkt = (eth/ip/data)
1084 log.info('multicast traffic packet %s'%pkt.show())
1085
1086 def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
1087 log.info('verifying multicast traffic for group %s from source %s'%(group,source))
1088 self.success = False
1089 def recv_task():
1090 def igmp_recv_cb(pkt):
1091 log.info('multicast data received for group %s from source %s'%(group,source))
1092 self.success = True
1093 sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
1094 t = threading.Thread(target = recv_task)
1095 t.start()
1096 self.send_multicast_data_traffic(group,source=source)
1097 t.join()
1098 return self.success
1099
1100 #pass
1101 def test_cluster_with_igmp_include_exclude_modes_and_restarting_master(self, onos_instances=ONOS_INSTANCES):
1102 status = self.verify_cluster_status(onos_instances=onos_instances)
1103 assert_equal(status, True)
1104 master, standbys = self.get_cluster_current_master_standbys()
1105 assert_equal(len(standbys), (onos_instances-1))
1106 onos_names_ips = self.get_cluster_container_names_ips()
1107 master_onos_name = onos_names_ips[master]
1108 self.igmp.setUp(controller=master)
1109 groups = ['224.2.3.4','230.5.6.7']
1110 src_list = ['2.2.2.2','3.3.3.3']
1111 self.igmp.onos_ssm_table_load(groups, src_list=src_list, controller=master)
1112 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1113 iface = self.V_INF1, delay = 2)
1114 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1115 iface = self.V_INF1, delay = 2)
1116 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1117 assert_equal(status,True)
1118 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1119 assert_equal(status,False)
1120 log.info('restarting cluster master %s'%master)
1121 Container(master_onos_name,Onos.IMAGE).restart()
1122 time.sleep(60)
1123 log.info('verifying multicast data traffic after master restart')
1124 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1125 assert_equal(status,True)
1126 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1127 assert_equal(status,False)
1128
1129 #pass
1130 def test_cluster_with_igmp_include_exclude_modes_and_making_master_down(self, onos_instances=ONOS_INSTANCES):
1131 status = self.verify_cluster_status(onos_instances=onos_instances)
1132 assert_equal(status, True)
1133 master, standbys = self.get_cluster_current_master_standbys()
1134 assert_equal(len(standbys), (onos_instances-1))
1135 onos_names_ips = self.get_cluster_container_names_ips()
1136 master_onos_name = onos_names_ips[master]
1137 self.igmp.setUp(controller=master)
1138 groups = [self.igmp.random_mcast_ip(),self.igmp.random_mcast_ip()]
1139 src_list = [self.igmp.randomsourceip()]
1140 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1141 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1142 iface = self.V_INF1, delay = 2)
1143 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1144 iface = self.V_INF1, delay = 2)
1145 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1146 assert_equal(status,True)
1147 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1148 assert_equal(status,False)
1149 log.info('Killing cluster master %s'%master)
1150 Container(master_onos_name,Onos.IMAGE).kill()
1151 time.sleep(60)
1152 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=standbys[0])
1153 assert_equal(status, True)
1154 log.info('Verifying multicast data traffic after cluster master down')
1155 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1156 assert_equal(status,True)
1157 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1158 assert_equal(status,False)
1159
1160 def test_cluster_with_igmp_include_mode_checking_traffic_recovery_time_after_master_is_down(self, onos_instances=ONOS_INSTANCES):
1161 status = self.verify_cluster_status(onos_instances=onos_instances)
1162 assert_equal(status, True)
1163 master, standbys = self.get_cluster_current_master_standbys()
1164 assert_equal(len(standbys), (onos_instances-1))
1165 onos_names_ips = self.get_cluster_container_names_ips()
1166 master_onos_name = onos_names_ips[master]
1167 self.igmp.setUp(controller=master)
1168 groups = [self.igmp.random_mcast_ip()]
1169 src_list = [self.igmp.randomsourceip()]
1170 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1171 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1172 iface = self.V_INF1, delay = 2)
1173 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1174 assert_equal(status,True)
1175 log.info('Killing clusters master %s'%master)
1176 Container(master_onos_name,Onos.IMAGE).kill()
1177 count = 0
1178 for i in range(60):
1179 log.info('Verifying multicast data traffic after cluster master down')
1180 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1181 if status:
1182 break
1183 else:
1184 count += 1
1185 time.sleep(1)
1186 assert_equal(status, True)
1187 log.info('Time taken to recover traffic after clusters master down is %d seconds'%count)
1188
1189
1190 #pass
1191 def test_cluster_state_with_igmp_leave_group_after_master_change(self, onos_instances=ONOS_INSTANCES):
1192 status = self.verify_cluster_status(onos_instances=onos_instances)
1193 assert_equal(status, True)
1194 master, standbys = self.get_cluster_current_master_standbys()
1195 assert_equal(len(standbys), (onos_instances-1))
1196 self.igmp.setUp(controller=master)
1197 groups = [self.igmp.random_mcast_ip()]
1198 src_list = [self.igmp.randomsourceip()]
1199 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1200 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1201 iface = self.V_INF1, delay = 2)
1202 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1203 assert_equal(status,True)
1204 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1205 self.change_cluster_current_master(new_master=standbys[0])
1206 log.info('Verifying multicast traffic after cluster master change')
1207 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1208 assert_equal(status,True)
1209 log.info('Sending igmp TO_EXCLUDE message to leave the group %s'%groups[0])
1210 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
1211 iface = self.V_INF1, delay = 1)
1212 time.sleep(10)
1213 status = self.verify_igmp_data_traffic(groups[0],intf = self.V_INF1,source= src_list[0])
1214 assert_equal(status,False)
1215
1216 #pass
1217 def test_cluster_state_with_igmp_join_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
1218 status = self.verify_cluster_status(onos_instances=onos_instances)
1219 assert_equal(status, True)
1220 master,standbys = self.get_cluster_current_master_standbys()
1221 assert_equal(len(standbys), (onos_instances-1))
1222 self.igmp.setUp(controller=master)
1223 groups = [self.igmp.random_mcast_ip()]
1224 src_list = [self.igmp.randomsourceip()]
1225 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1226 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1227 self.change_cluster_current_master(new_master = standbys[0])
1228 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1229 iface = self.V_INF1, delay = 2)
1230 time.sleep(1)
1231 self.change_cluster_current_master(new_master = master)
1232 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1233 assert_equal(status,True)
1234
1235 #pass
ChetanGaonker2099d722016-10-07 15:16:58 -07001236 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001237 def test_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001238 status = self.verify_cluster_status(onos_instances=onos_instances)
1239 assert_equal(status, True)
1240 master, standbys = self.get_cluster_current_master_standbys()
1241 assert_equal(len(standbys), (onos_instances-1))
1242 self.tls.setUp(controller=master)
1243 df = defer.Deferred()
1244 def eap_tls_verify(df):
1245 tls = TLSAuthTest()
1246 tls.runTest()
1247 df.callback(0)
1248 reactor.callLater(0, eap_tls_verify, df)
1249 return df
1250
1251 @deferred(120)
ChetanGaonker689b3862016-10-17 16:25:01 -07001252 def test_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001253 master, standbys = self.get_cluster_current_master_standbys()
1254 assert_equal(len(standbys), (onos_instances-1))
1255 self.tls.setUp()
1256 df = defer.Deferred()
1257 def eap_tls_verify2(df2):
1258 tls = TLSAuthTest()
1259 tls.runTest()
1260 df.callback(0)
1261 for i in [0,1]:
1262 if i == 1:
1263 log.info('Changing cluster master %s to %s'%(master, standbys[0]))
1264 self.change_master_current_cluster(new_master=standbys[0])
1265 log.info('Verifying tls authentication after cluster master changed to %s'%standbys[0])
1266 else:
1267 log.info('Verifying tls authentication before cluster master change')
1268 reactor.callLater(0, eap_tls_verify, df)
1269 return df
1270
1271 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001272 def test_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001273 status = self.verify_cluster_status(onos_instances=onos_instances)
1274 assert_equal(status, True)
1275 master, standbys = self.get_cluster_current_master_standbys()
1276 assert_equal(len(standbys), (onos_instances-1))
1277 onos_names_ips = self.get_cluster_container_names_ips()
1278 master_onos_name = onos_names_ips[master]
1279 self.tls.setUp()
1280 df = defer.Deferred()
1281 def eap_tls_verify(df):
1282 tls = TLSAuthTest()
1283 tls.runTest()
1284 df.callback(0)
1285 for i in [0,1]:
1286 if i == 1:
1287 log.info('Killing cluster current master %s'%master)
A R Karthick3b2e0372016-12-14 17:37:43 -08001288 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001289 time.sleep(20)
1290 status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True)
1291 assert_equal(status, True)
1292 log.info('Cluster came up with %d instances after killing master'%(onos_instances-1))
1293 log.info('Verifying tls authentication after killing cluster master')
1294 reactor.callLater(0, eap_tls_verify, df)
1295 return df
1296
1297 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001298 def test_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001299 status = self.verify_cluster_status(onos_instances=onos_instances)
1300 assert_equal(status, True)
1301 master, standbys = self.get_cluster_current_master_standbys()
1302 assert_equal(len(standbys), (onos_instances-1))
1303 onos_names_ips = self.get_cluster_container_names_ips()
1304 member_onos_name = onos_names_ips[standbys[0]]
1305 self.tls.setUp()
1306 df = defer.Deferred()
1307 def eap_tls_no_cert(df):
1308 def tls_no_cert_cb():
1309 log.info('TLS authentication failed with no certificate')
1310 tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
1311 tls.runTest()
1312 assert_equal(tls.failTest, True)
1313 df.callback(0)
1314 for i in [0,1]:
1315 if i == 1:
1316 log.info('Restart cluster member %s'%standbys[0])
1317 Container(member_onos_name,Onos.IMAGE).restart()
1318 time.sleep(20)
1319 status = self.verify_cluster_status(onos_instances=onos_instances)
1320 assert_equal(status, True)
1321 log.info('Cluster came up with %d instances after member restart'%(onos_instances))
1322 log.info('Verifying tls authentication after member restart')
1323 reactor.callLater(0, eap_tls_no_cert, df)
1324 return df
1325
ChetanGaonker689b3862016-10-17 16:25:01 -07001326 #pass
1327 def test_cluster_proxyarp_master_change_and_app_deactivation(self,onos_instances=ONOS_INSTANCES,hosts = 3):
1328 status = self.verify_cluster_status(onos_instances=onos_instances)
1329 assert_equal(status,True)
1330 master,standbys = self.get_cluster_current_master_standbys()
1331 assert_equal(len(standbys),(onos_instances-1))
1332 self.proxyarp.setUpClass()
1333 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1334 ingress = hosts+1
1335 for hostip, hostmac in hosts_config:
1336 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1337 time.sleep(1)
1338 log.info('changing cluster current master from %s to %s'%(master,standbys[0]))
1339 self.change_cluster_current_master(new_master=standbys[0])
1340 log.info('verifying proxyarp after master change')
1341 for hostip, hostmac in hosts_config:
1342 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1343 time.sleep(1)
1344 log.info('Deactivating proxyarp app and expecting proxyarp functionality not to work')
1345 self.proxyarp.proxyarp_activate(deactivate = True,controller=standbys[0])
1346 time.sleep(3)
1347 for hostip, hostmac in hosts_config:
1348 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
1349 time.sleep(1)
1350 log.info('activating proxyarp app and expecting to get arp reply from ONOS')
1351 self.proxyarp.proxyarp_activate(deactivate = False,controller=standbys[0])
1352 time.sleep(3)
1353 for hostip, hostmac in hosts_config:
1354 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1355 time.sleep(1)
ChetanGaonker2099d722016-10-07 15:16:58 -07001356
ChetanGaonker689b3862016-10-17 16:25:01 -07001357 #pass
1358 def test_cluster_with_proxyarp_and_one_member_down(self,hosts=3,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001359 status = self.verify_cluster_status(onos_instances=onos_instances)
1360 assert_equal(status, True)
1361 master, standbys = self.get_cluster_current_master_standbys()
ChetanGaonker689b3862016-10-17 16:25:01 -07001362 assert_equal(len(standbys), (onos_instances-1))
1363 onos_names_ips = self.get_cluster_container_names_ips()
1364 member_onos_name = onos_names_ips[standbys[1]]
1365 self.proxyarp.setUpClass()
1366 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1367 ingress = hosts+1
1368 for hostip, hostmac in hosts_config:
1369 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1370 time.sleep(1)
1371 log.info('killing cluster member %s'%standbys[1])
1372 Container(member_onos_name,Onos.IMAGE).kill()
1373 time.sleep(20)
1374 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=master,verify=True)
1375 assert_equal(status, True)
1376 log.info('cluster came up with %d instances after member down'%(onos_instances-1))
1377 log.info('verifying proxy arp functionality after cluster member down')
1378 for hostip, hostmac in hosts_config:
1379 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1380 time.sleep(1)
1381
1382 #pass
1383 def test_cluster_with_proxyarp_and_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts=10,onos_instances=ONOS_INSTANCES):
1384 status = self.verify_cluster_status(onos_instances=onos_instances)
1385 assert_equal(status, True)
1386 self.proxyarp.setUpClass()
1387 master, standbys = self.get_cluster_current_master_standbys()
1388 assert_equal(len(standbys), (onos_instances-1))
1389 ports_map, egress_map, hosts_config = self.proxyarp.proxyarp_config(hosts = hosts, controller=master)
1390 self.success = True
1391 ingress = hosts+1
1392 ports = range(ingress,ingress+10)
1393 hostmac = []
1394 hostip = []
1395 for ip,mac in hosts_config:
1396 hostmac.append(mac)
1397 hostip.append(ip)
1398 success_dir = {}
1399 def verify_proxyarp(*r):
1400 ingress, hostmac, hostip = r[0],r[1],r[2]
1401 def mac_recv_task():
1402 def recv_cb(pkt):
1403 log.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
1404 success_dir[current_thread().name] = True
1405 sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
1406 prn = recv_cb, iface = self.proxyarp.port_map[ingress])
1407 t = threading.Thread(target = mac_recv_task)
1408 t.start()
1409 pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
1410 log.info('Sending arp request for dest ip %s on interface %s' %
1411 (hostip,self.proxyarp.port_map[ingress]))
1412 sendp(pkt, count = 10,iface = self.proxyarp.port_map[ingress])
1413 t.join()
1414 t = []
1415 for i in range(10):
1416 t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
1417 for i in range(10):
1418 t[i].start()
1419 time.sleep(2)
1420 for i in range(10):
1421 t[i].join()
1422 if len(success_dir) != 10:
1423 self.success = False
1424 assert_equal(self.success, True)
1425
1426 #pass
1427 def test_cluster_with_acl_rule_before_master_change_and_remove_acl_rule_after_master_change(self,onos_instances=ONOS_INSTANCES):
1428 status = self.verify_cluster_status(onos_instances=onos_instances)
1429 assert_equal(status, True)
1430 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001431 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001432 self.acl.setUp()
1433 acl_rule = ACLTest()
1434 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1435 if status is False:
1436 log.info('JSON request returned status %d' %code)
1437 assert_equal(status, True)
1438 result = acl_rule.get_acl_rules(controller=master)
1439 aclRules1 = result.json()['aclRules']
1440 log.info('Added acl rules is %s'%aclRules1)
1441 acl_Id = map(lambda d: d['id'], aclRules1)
1442 log.info('Changing cluster current master from %s to %s'%(master,standbys[0]))
1443 self.change_cluster_current_master(new_master=standbys[0])
1444 status,code = acl_rule.remove_acl_rule(acl_Id[0],controller=standbys[0])
1445 if status is False:
1446 log.info('JSON request returned status %d' %code)
1447 assert_equal(status, True)
1448
1449 #pass
1450 def test_cluster_verifying_acl_rule_in_new_master_after_current_master_is_down(self,onos_instances=ONOS_INSTANCES):
1451 status = self.verify_cluster_status(onos_instances=onos_instances)
1452 assert_equal(status, True)
1453 master,standbys = self.get_cluster_current_master_standbys()
1454 assert_equal(len(standbys),(onos_instances-1))
1455 onos_names_ips = self.get_cluster_container_names_ips()
1456 master_onos_name = onos_names_ips[master]
1457 self.acl.setUp()
1458 acl_rule = ACLTest()
1459 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1460 if status is False:
1461 log.info('JSON request returned status %d' %code)
1462 assert_equal(status, True)
1463 result1 = acl_rule.get_acl_rules(controller=master)
1464 aclRules1 = result1.json()['aclRules']
1465 log.info('Added acl rules is %s'%aclRules1)
1466 acl_Id1 = map(lambda d: d['id'], aclRules1)
1467 log.info('Killing cluster current master %s'%master)
1468 Container(master_onos_name,Onos.IMAGE).kill()
1469 time.sleep(45)
1470 status = self.verify_cluster_status(onos_instances=onos_instances,controller=standbys[0])
1471 assert_equal(status, True)
1472 new_master,standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
1473 assert_equal(len(standbys),(onos_instances-2))
1474 assert_not_equal(new_master,master)
1475 result2 = acl_rule.get_acl_rules(controller=new_master)
1476 aclRules2 = result2.json()['aclRules']
1477 acl_Id2 = map(lambda d: d['id'], aclRules2)
1478 log.info('Acl Ids before and after master down are %s and %s'%(acl_Id1,acl_Id2))
1479 assert_equal(acl_Id2,acl_Id1)
1480
1481 #acl traffic scenario not working as acl rule is not getting added to onos
1482 def test_cluster_with_acl_traffic_before_and_after_two_members_down(self,onos_instances=ONOS_INSTANCES):
1483 status = self.verify_cluster_status(onos_instances=onos_instances)
1484 assert_equal(status, True)
1485 master,standbys = self.get_cluster_current_master_standbys()
1486 assert_equal(len(standbys),(onos_instances-1))
1487 onos_names_ips = self.get_cluster_container_names_ips()
1488 member1_onos_name = onos_names_ips[standbys[0]]
1489 member2_onos_name = onos_names_ips[standbys[1]]
1490 ingress = self.acl.ingress_iface
1491 egress = self.acl.CURRENT_PORT_NUM
1492 acl_rule = ACLTest()
1493 status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.acl.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.acl.HOST_DST_IP)
1494 self.acl.CURRENT_PORT_NUM += 1
1495 time.sleep(5)
1496 if status is False:
1497 log.info('JSON request returned status %d' %code)
1498 assert_equal(status, True)
1499 srcMac = '00:00:00:00:00:11'
1500 dstMac = host_ip_mac[0][1]
1501 self.acl.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1, egress_iface_num = egress )
1502 status, code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'deny',controller=master)
1503 time.sleep(10)
1504 if status is False:
1505 log.info('JSON request returned status %d' %code)
1506 assert_equal(status, True)
1507 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1508 log.info('killing cluster members %s and %s'%(standbys[0],standbys[1]))
1509 Container(member1_onos_name, Onos.IMAGE).kill()
1510 Container(member2_onos_name, Onos.IMAGE).kill()
1511 time.sleep(40)
1512 status = self.verify_cluster_status(onos_instances=onos_instances-2,verify=True,controller=master)
1513 assert_equal(status, True)
1514 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1515 self.acl.acl_hosts_remove(egress_iface_count = 1, egress_iface_num = egress)
1516
1517 #pass
1518 def test_cluster_with_dhcpRelay_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1519 status = self.verify_cluster_status(onos_instances=onos_instances)
1520 assert_equal(status, True)
1521 master,standbys = self.get_cluster_current_master_standbys()
1522 assert_equal(len(standbys),(onos_instances-1))
1523 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001524 mac = self.dhcprelay.get_mac(iface)
1525 self.dhcprelay.host_load(iface)
1526 ##we use the defaults for this test that serves as an example for others
1527 ##You don't need to restart dhcpd server if retaining default config
1528 config = self.dhcprelay.default_config
1529 options = self.dhcprelay.default_options
1530 subnet = self.dhcprelay.default_subnet_config
1531 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1532 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1533 config = config,
1534 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001535 subnet = subnet,
1536 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001537 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
1538 cip, sip = self.dhcprelay.send_recv(mac)
1539 log.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
1540 self.change_master_current_cluster(new_master=standbys[0])
1541 log.info('Releasing ip %s to server %s' %(cip, sip))
1542 assert_equal(self.dhcprelay.dhcp.release(cip), True)
1543 log.info('Triggering DHCP discover again after release')
1544 cip2, sip2 = self.dhcprelay.send_recv(mac)
1545 log.info('Verifying released IP was given back on rediscover')
1546 assert_equal(cip, cip2)
1547 log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
1548 assert_equal(self.dhcprelay.dhcp.release(cip2), True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001549 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001550
ChetanGaonker689b3862016-10-17 16:25:01 -07001551
1552 def test_cluster_with_dhcpRelay_and_verify_dhcp_ip_after_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1553 status = self.verify_cluster_status(onos_instances=onos_instances)
1554 assert_equal(status, True)
1555 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001556 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001557 onos_names_ips = self.get_cluster_container_names_ips()
1558 master_onos_name = onos_names_ips[master]
1559 self.dhcprelay.setUpClass(controller=master)
1560 mac = self.dhcprelay.get_mac(iface)
1561 self.dhcprelay.host_load(iface)
1562 ##we use the defaults for this test that serves as an example for others
1563 ##You don't need to restart dhcpd server if retaining default config
1564 config = self.dhcprelay.default_config
1565 options = self.dhcprelay.default_options
1566 subnet = self.dhcprelay.default_subnet_config
1567 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1568 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1569 config = config,
1570 options = options,
1571 subnet = subnet,
1572 controller=master)
1573 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1574 log.info('Initiating dhcp process from client %s'%mac)
1575 cip, sip = self.dhcprelay.send_recv(mac)
1576 log.info('Killing cluster current master %s'%master)
1577 Container(master_onos_name, Onos.IMAGE).kill()
1578 time.sleep(60)
1579 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=standbys[0])
1580 assert_equal(status, True)
1581 mac = self.dhcprelay.dhcp.get_mac(cip)[0]
1582 log.info("Verifying dhcp clients gets same IP after cluster master restarts")
1583 new_cip, new_sip = self.dhcprelay.dhcp.only_request(cip, mac)
1584 assert_equal(new_cip, cip)
1585 self.dhcprelay.tearDownClass(controller=standbys[0])
1586
1587 #pass
1588 def test_cluster_with_dhcpRelay_and_simulate_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1589 status = self.verify_cluster_status(onos_instances=onos_instances)
1590 assert_equal(status, True)
1591 master,standbys = self.get_cluster_current_master_standbys()
1592 assert_equal(len(standbys),(onos_instances-1))
1593 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001594 macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3']
1595 self.dhcprelay.host_load(iface)
1596 ##we use the defaults for this test that serves as an example for others
1597 ##You don't need to restart dhcpd server if retaining default config
1598 config = self.dhcprelay.default_config
1599 options = self.dhcprelay.default_options
1600 subnet = self.dhcprelay.default_subnet_config
1601 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1602 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1603 config = config,
1604 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001605 subnet = subnet,
1606 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001607 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1608 cip1, sip1 = self.dhcprelay.send_recv(macs[0])
1609 assert_not_equal(cip1,None)
1610 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip1,macs[0],master))
1611 log.info('Changing cluster master from %s to %s'%(master, standbys[0]))
1612 self.change_master_current_cluster(new_master=standbys[0])
1613 cip2, sip2 = self.dhcprelay.send_recv(macs[1])
1614 assert_not_equal(cip2,None)
1615 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[1],standbys[0]))
1616 self.change_master_current_cluster(new_master=master)
1617 log.info('Changing cluster master from %s to %s'%(standbys[0],master))
1618 cip3, sip3 = self.dhcprelay.send_recv(macs[2])
1619 assert_not_equal(cip3,None)
1620 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master))
ChetanGaonker689b3862016-10-17 16:25:01 -07001621 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001622
ChetanGaonker689b3862016-10-17 16:25:01 -07001623 def test_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001624 status = self.verify_cluster_status(onos_instances=onos_instances)
1625 assert_equal(status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001626 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001627 self.subscriber.num_subscribers = 5
1628 self.subscriber.num_channels = 10
1629 for i in [0,1]:
1630 if i == 1:
1631 cord_test_onos_restart()
1632 time.sleep(45)
1633 status = self.verify_cluster_status(onos_instances=onos_instances)
1634 assert_equal(status, True)
1635 log.info('Verifying cord subscriber functionality after cluster restart')
1636 else:
1637 log.info('Verifying cord subscriber functionality before cluster restart')
1638 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1639 num_channels = self.subscriber.num_channels,
1640 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1641 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1642 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1643 self.subscriber.num_channels))
1644 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001645 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001646
ChetanGaonker689b3862016-10-17 16:25:01 -07001647 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1648 def test_cluster_with_cord_subscriber_join_next_channel_before_and_after_cluster_mastership_is_withdrawn(self,onos_instances=ONOS_INSTANCES):
1649 status = self.verify_cluster_status(onos_instances=onos_instances)
1650 assert_equal(status, True)
1651 master,standbys = self.get_cluster_current_master_standbys()
1652 assert_equal(len(standbys),(onos_instances-1))
1653 self.subscriber.setUpClass(controller=master)
1654 self.subscriber.num_subscribers = 5
1655 self.subscriber.num_channels = 10
1656 for i in [0,1]:
1657 if i == 1:
1658 status=self.withdraw_cluster_current_mastership(master_ip=master)
1659 asser_equal(status, True)
1660 master,standbys = self.get_cluster_current_master_standbys()
1661 log.info('verifying cord subscriber functionality after cluster current master withdraw mastership')
1662 else:
1663 log.info('verifying cord subscriber functionality before cluster master withdraw mastership')
1664 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1665 num_channels = self.subscriber.num_channels,
1666 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1667 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1668 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1669 self.subscriber.num_channels),controller=master)
1670 assert_equal(test_status, True)
1671 self.subscriber.tearDownClass(controller=master)
1672
1673 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1674 def test_cluster_with_cord_subscriber_join_recv_traffic_from_10channels_and_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001675 status = self.verify_cluster_status(onos_instances=onos_instances)
1676 assert_equal(status, True)
1677 master, standbys = self.get_cluster_current_master_standbys()
1678 assert_equal(len(standbys),(onos_instances-1))
1679 onos_names_ips = self.get_cluster_container_names_ips()
1680 member_onos_name = onos_names_ips[standbys[0]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001681 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001682 num_subscribers = 1
1683 num_channels = 10
1684 for i in [0,1]:
1685 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -08001686 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001687 time.sleep(30)
ChetanGaonker689b3862016-10-17 16:25:01 -07001688 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001689 assert_equal(status, True)
1690 log.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0])
1691 else:
1692 log.info('Verifying cord subscriber functionality before cluster member %s is down'%standbys[0])
1693 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1694 num_channels = num_channels,
1695 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify,
1696 self.subscriber.igmp_verify, self.subscriber.traffic_verify),
1697 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
ChetanGaonker689b3862016-10-17 16:25:01 -07001698 negative_subscriber_auth = 'all',controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001699 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001700 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001701
ChetanGaonker689b3862016-10-17 16:25:01 -07001702 def test_cluster_with_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001703 status = self.verify_cluster_status(onos_instances=onos_instances)
1704 assert_equal(status, True)
1705 master, standbys = self.get_cluster_current_master_standbys()
1706 assert_equal(len(standbys),(onos_instances-1))
1707 onos_names_ips = self.get_cluster_container_names_ips()
1708 member1_onos_name = onos_names_ips[standbys[0]]
1709 member2_onos_name = onos_names_ips[standbys[1]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001710 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001711 num_subscribers = 1
1712 num_channels = 10
1713 for i in [0,1]:
1714 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -08001715 cord_test_onos_shutdown(node = standbys[0])
1716 cord_test_onos_shutdown(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -07001717 time.sleep(60)
1718 status = self.verify_cluster_status(onos_instances=onos_instances-2)
1719 assert_equal(status, True)
1720 log.info('Verifying cord subscriber funtionality after cluster two members %s and %s down'%(standbys[0],standbys[1]))
1721 else:
1722 log.info('Verifying cord subscriber funtionality before cluster two members %s and %s down'%(standbys[0],standbys[1]))
1723 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1724 num_channels = num_channels,
1725 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1726 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1727 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
1728 negative_subscriber_auth = 'all')
1729 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001730 self.subscriber.tearDownClass(controller=master)
1731
1732 #pass
1733 def test_cluster_with_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1734 status = self.verify_cluster_status(onos_instances=onos_instances)
1735 assert_equal(status, True)
1736 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1737 for device in device_dict.keys():
1738 log.info("Device is %s"%device_dict[device])
1739 assert_not_equal(device_dict[device]['master'],'none')
1740 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1741 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1742
1743 #pass
1744 def test_cluster_state_in_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1745 status = self.verify_cluster_status(onos_instances=onos_instances)
1746 assert_equal(status, True)
1747 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1748 cluster_ips = self.get_cluster_current_member_ips()
1749 for ip in cluster_ips:
1750 device_dict= self.get_cluster_current_master_standbys_of_connected_devices(controller = ip)
1751 assert_equal(len(device_dict.keys()),onos_instances)
1752 for device in device_dict.keys():
1753 log.info("Device is %s"%device_dict[device])
1754 assert_not_equal(device_dict[device]['master'],'none')
1755 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1756 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1757
1758 #pass
1759 def test_cluster_verifying_multiple_ovs_switches_after_master_is_restarted(self,onos_instances = ONOS_INSTANCES):
1760 status = self.verify_cluster_status(onos_instances=onos_instances)
1761 assert_equal(status, True)
1762 onos_names_ips = self.get_cluster_container_names_ips()
1763 master_count = self.get_number_of_devices_of_master()
1764 log.info('Master count information is %s'%master_count)
1765 total_devices = 0
1766 for master in master_count.keys():
1767 total_devices += master_count[master]['size']
1768 if master_count[master]['size'] != 0:
1769 restart_ip = master
1770 assert_equal(total_devices,onos_instances)
1771 member_onos_name = onos_names_ips[restart_ip]
1772 log.info('Restarting cluster member %s having ip %s'%(member_onos_name,restart_ip))
1773 Container(member_onos_name, Onos.IMAGE).restart()
1774 time.sleep(40)
1775 master_count = self.get_number_of_devices_of_master()
1776 log.info('Master count information after restart is %s'%master_count)
1777 total_devices = 0
1778 for master in master_count.keys():
1779 total_devices += master_count[master]['size']
1780 if master == restart_ip:
1781 assert_equal(master_count[master]['size'], 0)
1782 assert_equal(total_devices,onos_instances)
1783
1784 #pass
1785 def test_cluster_verifying_multiple_ovs_switches_with_one_master_down(self,onos_instances = ONOS_INSTANCES):
1786 status = self.verify_cluster_status(onos_instances=onos_instances)
1787 assert_equal(status, True)
1788 onos_names_ips = self.get_cluster_container_names_ips()
1789 master_count = self.get_number_of_devices_of_master()
1790 log.info('Master count information is %s'%master_count)
1791 total_devices = 0
1792 for master in master_count.keys():
1793 total_devices += master_count[master]['size']
1794 if master_count[master]['size'] != 0:
1795 restart_ip = master
1796 assert_equal(total_devices,onos_instances)
1797 master_onos_name = onos_names_ips[restart_ip]
1798 log.info('Shutting down cluster member %s having ip %s'%(master_onos_name,restart_ip))
1799 Container(master_onos_name, Onos.IMAGE).kill()
1800 time.sleep(40)
1801 for ip in onos_names_ips.keys():
1802 if ip != restart_ip:
1803 controller_ip = ip
1804 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=controller_ip)
1805 assert_equal(status, True)
1806 master_count = self.get_number_of_devices_of_master(controller=controller_ip)
1807 log.info('Master count information after restart is %s'%master_count)
1808 total_devices = 0
1809 for master in master_count.keys():
1810 total_devices += master_count[master]['size']
1811 if master == restart_ip:
1812 assert_equal(master_count[master]['size'], 0)
1813 assert_equal(total_devices,onos_instances)
1814
1815 #pass
1816 def test_cluster_verifying_multiple_ovs_switches_with_current_master_withdrawing_mastership(self,onos_instances = ONOS_INSTANCES):
1817 status = self.verify_cluster_status(onos_instances=onos_instances)
1818 assert_equal(status, True)
1819 master_count = self.get_number_of_devices_of_master()
1820 log.info('Master count information is %s'%master_count)
1821 total_devices = 0
1822 for master in master_count.keys():
1823 total_devices += int(master_count[master]['size'])
1824 if master_count[master]['size'] != 0:
1825 master_ip = master
1826 log.info('Devices of master %s are %s'%(master_count[master]['devices'],master))
1827 device_id = str(master_count[master]['devices'][0])
1828 device_count = master_count[master]['size']
1829 assert_equal(total_devices,onos_instances)
1830 log.info('Withdrawing mastership of device %s for controller %s'%(device_id,master_ip))
1831 status=self.withdraw_cluster_current_mastership(master_ip=master_ip,device_id = device_id)
1832 assert_equal(status, True)
1833 master_count = self.get_number_of_devices_of_master()
1834 log.info('Master count information after cluster mastership withdraw is %s'%master_count)
1835 total_devices = 0
1836 for master in master_count.keys():
1837 total_devices += int(master_count[master]['size'])
1838 if master == master_ip:
1839 assert_equal(master_count[master]['size'], device_count-1)
1840 assert_equal(total_devices,onos_instances)
1841
1842 #pass
1843 def test_cluster_verifying_multiple_ovs_switches_and_restarting_cluster(self,onos_instances = ONOS_INSTANCES):
1844 status = self.verify_cluster_status(onos_instances=onos_instances)
1845 assert_equal(status, True)
1846 master_count = self.get_number_of_devices_of_master()
1847 log.info('Master count information is %s'%master_count)
1848 total_devices = 0
1849 for master in master_count.keys():
1850 total_devices += master_count[master]['size']
1851 assert_equal(total_devices,onos_instances)
1852 log.info('Restarting cluster')
1853 cord_test_onos_restart()
1854 time.sleep(60)
1855 master_count = self.get_number_of_devices_of_master()
1856 log.info('Master count information after restart is %s'%master_count)
1857 total_devices = 0
1858 for master in master_count.keys():
1859 total_devices += master_count[master]['size']
1860 assert_equal(total_devices,onos_instances)