blob: 88a991584fc3b5ea5d6fcb91fd99135873a2f5fe [file] [log] [blame]
ChetanGaonker2099d722016-10-07 15:16:58 -07001#copyright 2016-present Ciena Corporation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14#
15import unittest
16from nose.tools import *
17from scapy.all import *
18from OnosCtrl import OnosCtrl, get_mac
19from OltConfig import OltConfig
20from socket import socket
21from OnosFlowCtrl import OnosFlowCtrl
22from nose.twistedtools import reactor, deferred
23from twisted.internet import defer
24from onosclidriver import OnosCliDriver
25from CordContainer import Container, Onos, Quagga
A.R Karthick2560f042016-11-30 14:38:52 -080026from CordTestServer import cord_test_onos_restart, cord_test_onos_shutdown, cord_test_onos_add_cluster, cord_test_quagga_restart, cord_test_restart_cluster
ChetanGaonker2099d722016-10-07 15:16:58 -070027from portmaps import g_subscriber_port_map
28from scapy.all import *
29import time, monotonic
30import threading
31from threading import current_thread
32from Cluster import *
33from EapTLS import TLSAuthTest
34from ACL import ACLTest
A R Karthick1f908202016-11-16 17:32:20 -080035from OnosLog import OnosLog
36from CordLogger import CordLogger
A.R Karthick99044822017-02-09 14:04:20 -080037from CordTestConfig import setup_module
ChetanGaonker2099d722016-10-07 15:16:58 -070038import os
39import json
40import random
41import collections
42log.setLevel('INFO')
43
A R Karthick1f908202016-11-16 17:32:20 -080044class cluster_exchange(CordLogger):
ChetanGaonker2099d722016-10-07 15:16:58 -070045 test_path = os.path.dirname(os.path.realpath(__file__))
46 onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
47 mac = RandMAC()._fix()
48 flows_eth = Ether(src = RandMAC()._fix(), dst = RandMAC()._fix())
49 igmp_eth = Ether(dst = '01:00:5e:00:00:16', type = ETH_P_IP)
50 igmp_ip = IP(dst = '224.0.0.22')
51 ONOS_INSTANCES = 3
52 V_INF1 = 'veth0'
53 TLS_TIMEOUT = 100
54 device_id = 'of:' + get_mac()
55 igmp = cluster_igmp()
56 igmp_groups = igmp.mcast_ip_range(start_ip = '224.1.8.10',end_ip = '224.1.10.49')
57 igmp_sources = igmp.source_ip_range(start_ip = '38.24.29.35',end_ip='38.24.35.56')
58 tls = cluster_tls()
59 flows = cluster_flows()
60 proxyarp = cluster_proxyarp()
61 vrouter = cluster_vrouter()
62 acl = cluster_acl()
63 dhcprelay = cluster_dhcprelay()
64 subscriber = cluster_subscriber()
A R Karthick3b2e0372016-12-14 17:37:43 -080065 testcaseLoggers = ('test_cluster_controller_restarts', 'test_cluster_graceful_controller_restarts',
66 'test_cluster_single_controller_restarts', 'test_cluster_restarts')
A.R Karthick99044822017-02-09 14:04:20 -080067 ITERATIONS = int(os.getenv('ITERATIONS', 10))
A.R Karthick53d92702017-03-13 10:10:38 -070068 ARCHIVE_PARTITION = False
A R Karthick1f908202016-11-16 17:32:20 -080069
70 def setUp(self):
71 if self._testMethodName not in self.testcaseLoggers:
72 super(cluster_exchange, self).setUp()
73
74 def tearDown(self):
75 if self._testMethodName not in self.testcaseLoggers:
76 super(cluster_exchange, self).tearDown()
ChetanGaonker2099d722016-10-07 15:16:58 -070077
78 def get_controller(self):
79 controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
80 controller = controller.split(',')[0]
81 return controller
82
A R Karthick1f908202016-11-16 17:32:20 -080083 @classmethod
84 def get_controllers(cls):
85 controllers = os.getenv('ONOS_CONTROLLER_IP') or ''
86 return controllers.split(',')
87
A R Karthick6cc8b812016-12-09 10:24:40 -080088 def cliEnter(self, controller = None):
ChetanGaonker2099d722016-10-07 15:16:58 -070089 retries = 0
A R Karthick6cc8b812016-12-09 10:24:40 -080090 while retries < 30:
91 self.cli = OnosCliDriver(controller = controller, connect = True)
ChetanGaonker2099d722016-10-07 15:16:58 -070092 if self.cli.handle:
93 break
94 else:
95 retries += 1
96 time.sleep(2)
97
98 def cliExit(self):
99 self.cli.disconnect()
100
A R Karthick1f908202016-11-16 17:32:20 -0800101 def get_leader(self, controller = None):
102 self.cliEnter(controller = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800103 try:
104 result = json.loads(self.cli.leaders(jsonFormat = True))
105 except:
106 result = None
107
A R Karthick1f908202016-11-16 17:32:20 -0800108 if result is None:
109 log.info('Leaders command failure for controller %s' %controller)
110 else:
111 log.info('Leaders returned: %s' %result)
112 self.cliExit()
113 return result
114
A R Karthick3b2e0372016-12-14 17:37:43 -0800115 def onos_shutdown(self, controller = None):
116 status = True
117 self.cliEnter(controller = controller)
118 try:
119 self.cli.shutdown(timeout = 10)
120 except:
121 log.info('Graceful shutdown of ONOS failed for controller: %s' %controller)
122 status = False
123
124 self.cliExit()
125 return status
126
A R Karthicke14fc022016-12-08 14:50:29 -0800127 def log_set(self, level = None, app = 'org.onosproject', controllers = None):
128 CordLogger.logSet(level = level, app = app, controllers = controllers, forced = True)
A R Karthickef1232d2016-12-07 09:18:15 -0800129
A R Karthick1f908202016-11-16 17:32:20 -0800130 def get_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800131 result_map = {}
132 if controller is None:
133 controller = self.get_controller()
A R Karthick1f908202016-11-16 17:32:20 -0800134 if type(controller) in [ list, tuple ]:
135 for c in controller:
136 leaders = self.get_leader(controller = c)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800137 result_map[c] = leaders
A R Karthick1f908202016-11-16 17:32:20 -0800138 else:
139 leaders = self.get_leader(controller = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800140 result_map[controller] = leaders
141 return result_map
A R Karthick1f908202016-11-16 17:32:20 -0800142
A R Karthickec2db322016-11-17 15:06:01 -0800143 def verify_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800144 leaders_map = self.get_leaders(controller = controller)
145 failed = [ k for k,v in leaders_map.items() if v == None ]
A R Karthickec2db322016-11-17 15:06:01 -0800146 return failed
147
ChetanGaonker2099d722016-10-07 15:16:58 -0700148 def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
149 tries = 0
150 try:
151 self.cliEnter(controller = controller)
152 while tries <= 10:
153 cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
154 if cluster_summary:
155 log.info("cluster 'summary' command output is %s"%cluster_summary)
156 nodes = cluster_summary['nodes']
157 if verify:
158 if nodes == onos_instances:
159 self.cliExit()
160 return True
161 else:
162 tries += 1
163 time.sleep(1)
164 else:
165 if nodes >= onos_instances:
166 self.cliExit()
167 return True
168 else:
169 tries += 1
170 time.sleep(1)
171 else:
172 tries += 1
173 time.sleep(1)
174 self.cliExit()
175 return False
176 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700177 raise Exception('Failed to get cluster members')
178 return False
ChetanGaonker2099d722016-10-07 15:16:58 -0700179
A.R Karthick45ab3e12016-11-30 11:25:51 -0800180 def get_cluster_current_member_ips(self, controller = None, nodes_filter = None):
ChetanGaonker2099d722016-10-07 15:16:58 -0700181 tries = 0
182 cluster_ips = []
183 try:
184 self.cliEnter(controller = controller)
185 while tries <= 10:
186 cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
187 if cluster_nodes:
188 log.info("cluster 'nodes' output is %s"%cluster_nodes)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800189 if nodes_filter:
190 cluster_nodes = nodes_filter(cluster_nodes)
ChetanGaonker2099d722016-10-07 15:16:58 -0700191 cluster_ips = map(lambda c: c['id'], cluster_nodes)
192 self.cliExit()
193 cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
194 return cluster_ips
195 else:
196 tries += 1
197 self.cliExit()
198 return cluster_ips
199 except:
200 raise Exception('Failed to get cluster members')
201 return cluster_ips
202
ChetanGaonker689b3862016-10-17 16:25:01 -0700203 def get_cluster_container_names_ips(self,controller=None):
A R Karthick1f908202016-11-16 17:32:20 -0800204 onos_names_ips = {}
A R Karthick0f3f25b2016-12-15 09:50:57 -0800205 controllers = self.get_controllers()
206 i = 0
207 for controller in controllers:
208 if i == 0:
209 name = Onos.NAME
210 else:
211 name = '{}-{}'.format(Onos.NAME, i+1)
212 onos_names_ips[controller] = name
213 onos_names_ips[name] = controller
214 i += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700215 return onos_names_ips
A R Karthick0f3f25b2016-12-15 09:50:57 -0800216 # onos_ips = self.get_cluster_current_member_ips(controller=controller)
217 # onos_names_ips[onos_ips[0]] = Onos.NAME
218 # onos_names_ips[Onos.NAME] = onos_ips[0]
219 # for i in range(1,len(onos_ips)):
220 # name = '{0}-{1}'.format(Onos.NAME,i+1)
221 # onos_names_ips[onos_ips[i]] = name
222 # onos_names_ips[name] = onos_ips[i]
223
224 # return onos_names_ips
ChetanGaonker2099d722016-10-07 15:16:58 -0700225
226 #identifying current master of a connected device, not tested
227 def get_cluster_current_master_standbys(self,controller=None,device_id=device_id):
228 master = None
229 standbys = []
230 tries = 0
231 try:
232 cli = self.cliEnter(controller = controller)
233 while tries <= 10:
234 roles = json.loads(self.cli.roles(jsonFormat = True))
235 log.info("cluster 'roles' command output is %s"%roles)
236 if roles:
237 for device in roles:
238 log.info('Verifying device info in line %s'%device)
239 if device['id'] == device_id:
240 master = str(device['master'])
241 standbys = map(lambda d: str(d), device['standbys'])
242 log.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
243 self.cliExit()
244 return master, standbys
245 self.cliExit()
246 return master, standbys
247 else:
248 tries += 1
ChetanGaonker689b3862016-10-17 16:25:01 -0700249 time.sleep(1)
250 self.cliExit()
251 return master,standbys
252 except:
253 raise Exception('Failed to get cluster members')
254 return master,standbys
255
256 def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
257 ''' returns master and standbys of all the connected devices to ONOS cluster instance'''
258 device_dict = {}
259 tries = 0
260 try:
261 cli = self.cliEnter(controller = controller)
262 while tries <= 10:
263 device_dict = {}
264 roles = json.loads(self.cli.roles(jsonFormat = True))
265 log.info("cluster 'roles' command output is %s"%roles)
266 if roles:
267 for device in roles:
268 device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
269 for i in range(len(device_dict[device['id']]['standbys'])):
270 device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
271 log.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
272 self.cliExit()
273 return device_dict
274 else:
275 tries += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700276 time.sleep(1)
277 self.cliExit()
ChetanGaonker689b3862016-10-17 16:25:01 -0700278 return device_dict
279 except:
280 raise Exception('Failed to get cluster members')
281 return device_dict
282
283 #identify current master of a connected device, not tested
284 def get_cluster_connected_devices(self,controller=None):
285 '''returns all the devices connected to ONOS cluster'''
286 device_list = []
287 tries = 0
288 try:
289 cli = self.cliEnter(controller = controller)
290 while tries <= 10:
291 device_list = []
292 devices = json.loads(self.cli.devices(jsonFormat = True))
293 log.info("cluster 'devices' command output is %s"%devices)
294 if devices:
295 for device in devices:
296 log.info('device id is %s'%device['id'])
297 device_list.append(str(device['id']))
298 self.cliExit()
299 return device_list
300 else:
301 tries += 1
302 time.sleep(1)
303 self.cliExit()
304 return device_list
305 except:
306 raise Exception('Failed to get cluster members')
307 return device_list
308
309 def get_number_of_devices_of_master(self,controller=None):
310 '''returns master-device pairs, which master having what devices'''
311 master_count = {}
312 try:
313 cli = self.cliEnter(controller = controller)
314 masters = json.loads(self.cli.masters(jsonFormat = True))
315 if masters:
316 for master in masters:
317 master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
318 return master_count
319 else:
320 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700321 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700322 raise Exception('Failed to get cluster members')
323 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700324
325 def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None):
326 if new_master is None: return False
ChetanGaonker689b3862016-10-17 16:25:01 -0700327 self.cliEnter(controller=controller)
ChetanGaonker2099d722016-10-07 15:16:58 -0700328 cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
329 command = self.cli.command(cmd = cmd, jsonFormat = False)
330 self.cliExit()
331 time.sleep(60)
332 master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
333 assert_equal(master,new_master)
334 log.info('Cluster master changed to %s successfully'%new_master)
335
ChetanGaonker689b3862016-10-17 16:25:01 -0700336 def withdraw_cluster_current_mastership(self,master_ip=None,device_id=device_id,controller=None):
337 '''current master looses its mastership and hence new master will be elected'''
338 self.cliEnter(controller=controller)
339 cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
340 command = self.cli.command(cmd = cmd, jsonFormat = False)
341 self.cliExit()
342 time.sleep(60)
343 new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
344 assert_not_equal(new_master_ip,master_ip)
345 log.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
346 log.info('Cluster new master is %s'%new_master_ip)
347 return True
348
A R Karthick3b2e0372016-12-14 17:37:43 -0800349 def cluster_controller_restarts(self, graceful = False):
A R Karthick1f908202016-11-16 17:32:20 -0800350 controllers = self.get_controllers()
351 ctlr_len = len(controllers)
352 if ctlr_len <= 1:
353 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
354 assert_greater(ctlr_len, 1)
355
356 #this call would verify the cluster for once
357 onos_map = self.get_cluster_container_names_ips()
358
A R Karthick2a70a2f2016-12-16 14:40:16 -0800359 def check_exception(iteration, controller = None):
A R Karthick1f908202016-11-16 17:32:20 -0800360 adjacent_controller = None
361 adjacent_controllers = None
362 if controller:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800363 adjacent_controllers = list(set(controllers) - set([controller]))
364 adjacent_controller = adjacent_controllers[0]
A R Karthick1f908202016-11-16 17:32:20 -0800365 for node in controllers:
366 onosLog = OnosLog(host = node)
367 ##check the logs for storage exception
368 _, output = onosLog.get_log(('ERROR', 'Exception',))
A R Karthickec2db322016-11-17 15:06:01 -0800369 if output and output.find('StorageException$Timeout') >= 0:
370 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
371 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
372 log.info('\n' + '-' * 50 + '\n')
A R Karthick1f908202016-11-16 17:32:20 -0800373 log.info('%s' %output)
A R Karthickec2db322016-11-17 15:06:01 -0800374 log.info('\n' + '-' * 50 + '\n')
375 failed = self.verify_leaders(controllers)
376 if failed:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800377 log.info('Leaders command failed on nodes: %s' %failed)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800378 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800379 CordLogger.archive_results(self._testMethodName,
380 controllers = controllers,
A.R Karthick53d92702017-03-13 10:10:38 -0700381 iteration = 'FAILED',
382 archive_partition = self.ARCHIVE_PARTITION)
A R Karthickec2db322016-11-17 15:06:01 -0800383 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800384 return controller
385
386 try:
A R Karthickec2db322016-11-17 15:06:01 -0800387 ips = self.get_cluster_current_member_ips(controller = adjacent_controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800388 log.info('ONOS cluster formed with controllers: %s' %ips)
A R Karthick1f908202016-11-16 17:32:20 -0800389 st = True
390 except:
391 st = False
392
A R Karthickec2db322016-11-17 15:06:01 -0800393 failed = self.verify_leaders(controllers)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800394 if failed:
395 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick3396ec42017-01-11 17:12:13 -0800396 CordLogger.archive_results(self._testMethodName,
397 controllers = controllers,
A.R Karthick53d92702017-03-13 10:10:38 -0700398 iteration = 'FAILED',
399 archive_partition = self.ARCHIVE_PARTITION)
A R Karthick1f908202016-11-16 17:32:20 -0800400 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800401 if st is False:
402 log.info('No storage exception and ONOS cluster was not formed successfully')
403 else:
404 controller = None
405
406 return controller
407
408 next_controller = None
A.R Karthick99044822017-02-09 14:04:20 -0800409 tries = self.ITERATIONS
A R Karthick1f908202016-11-16 17:32:20 -0800410 for num in range(tries):
411 index = num % ctlr_len
412 #index = random.randrange(0, ctlr_len)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800413 controller_name = onos_map[controllers[index]] if next_controller is None else onos_map[next_controller]
414 controller = onos_map[controller_name]
415 log.info('ITERATION: %d. Restarting Controller %s' %(num + 1, controller_name))
A R Karthick1f908202016-11-16 17:32:20 -0800416 try:
A R Karthickef1232d2016-12-07 09:18:15 -0800417 #enable debug log for the other controllers before restarting this controller
A R Karthicke14fc022016-12-08 14:50:29 -0800418 adjacent_controllers = list( set(controllers) - set([controller]) )
419 self.log_set(controllers = adjacent_controllers)
420 self.log_set(app = 'io.atomix', controllers = adjacent_controllers)
A R Karthick3b2e0372016-12-14 17:37:43 -0800421 if graceful is True:
A R Karthick0f3f25b2016-12-15 09:50:57 -0800422 log.info('Gracefully shutting down controller: %s' %controller)
A R Karthick3b2e0372016-12-14 17:37:43 -0800423 self.onos_shutdown(controller)
424 cord_test_onos_restart(node = controller, timeout = 0)
A R Karthicke14fc022016-12-08 14:50:29 -0800425 self.log_set(controllers = controller)
426 self.log_set(app = 'io.atomix', controllers = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800427 time.sleep(60)
A R Karthick1f908202016-11-16 17:32:20 -0800428 except:
429 time.sleep(5)
430 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800431
432 #first archive the test case logs for this run
A R Karthick3b2e0372016-12-14 17:37:43 -0800433 CordLogger.archive_results(self._testMethodName,
A R Karthicke8935c62016-12-08 18:17:17 -0800434 controllers = controllers,
A.R Karthick53d92702017-03-13 10:10:38 -0700435 iteration = 'iteration_{}'.format(num+1),
436 archive_partition = self.ARCHIVE_PARTITION)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800437 next_controller = check_exception(num, controller = controller)
A R Karthick1f908202016-11-16 17:32:20 -0800438
A R Karthick3b2e0372016-12-14 17:37:43 -0800439 def test_cluster_controller_restarts(self):
440 '''Test the cluster by repeatedly killing the controllers'''
441 self.cluster_controller_restarts()
442
443 def test_cluster_graceful_controller_restarts(self):
444 '''Test the cluster by repeatedly restarting the controllers gracefully'''
445 self.cluster_controller_restarts(graceful = True)
446
A.R Karthick45ab3e12016-11-30 11:25:51 -0800447 def test_cluster_single_controller_restarts(self):
448 '''Test the cluster by repeatedly restarting the same controller'''
449 controllers = self.get_controllers()
450 ctlr_len = len(controllers)
451 if ctlr_len <= 1:
452 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
453 assert_greater(ctlr_len, 1)
454
455 #this call would verify the cluster for once
456 onos_map = self.get_cluster_container_names_ips()
457
A R Karthick2a70a2f2016-12-16 14:40:16 -0800458 def check_exception(iteration, controller, inclusive = False):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800459 adjacent_controllers = list(set(controllers) - set([controller]))
460 adjacent_controller = adjacent_controllers[0]
461 controller_list = adjacent_controllers if inclusive == False else controllers
462 storage_exceptions = []
463 for node in controller_list:
464 onosLog = OnosLog(host = node)
465 ##check the logs for storage exception
466 _, output = onosLog.get_log(('ERROR', 'Exception',))
467 if output and output.find('StorageException$Timeout') >= 0:
468 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
469 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
470 log.info('\n' + '-' * 50 + '\n')
471 log.info('%s' %output)
472 log.info('\n' + '-' * 50 + '\n')
473 storage_exceptions.append(node)
474
475 failed = self.verify_leaders(controller_list)
476 if failed:
477 log.info('Leaders command failed on nodes: %s' %failed)
478 if storage_exceptions:
479 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800480 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800481 CordLogger.archive_results('test_cluster_single_controller_restarts',
482 controllers = controllers,
A.R Karthick53d92702017-03-13 10:10:38 -0700483 iteration = 'FAILED',
484 archive_partition = self.ARCHIVE_PARTITION)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800485 assert_equal(len(failed), 0)
486 return controller
487
488 for ctlr in controller_list:
489 ips = self.get_cluster_current_member_ips(controller = ctlr,
490 nodes_filter = \
491 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
492 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
493 if controller in ips and inclusive is False:
494 log.info('Controller %s still ACTIVE on Node %s after it was shutdown' %(controller, ctlr))
495 if controller not in ips and inclusive is True:
A R Karthick6cc8b812016-12-09 10:24:40 -0800496 log.info('Controller %s still INACTIVE on Node %s after it was restarted' %(controller, ctlr))
A.R Karthick45ab3e12016-11-30 11:25:51 -0800497
498 return controller
499
A.R Karthick99044822017-02-09 14:04:20 -0800500 tries = self.ITERATIONS
A.R Karthick45ab3e12016-11-30 11:25:51 -0800501 #chose a random controller for shutdown/restarts
502 controller = controllers[random.randrange(0, ctlr_len)]
503 controller_name = onos_map[controller]
A R Karthick6cc8b812016-12-09 10:24:40 -0800504 ##enable the log level for the controllers
505 self.log_set(controllers = controllers)
506 self.log_set(app = 'io.atomix', controllers = controllers)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800507 for num in range(tries):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800508 log.info('ITERATION: %d. Shutting down Controller %s' %(num + 1, controller_name))
509 try:
A R Karthick3b2e0372016-12-14 17:37:43 -0800510 cord_test_onos_shutdown(node = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800511 time.sleep(20)
512 except:
513 time.sleep(5)
514 continue
515 #check for exceptions on the adjacent nodes
A R Karthick2a70a2f2016-12-16 14:40:16 -0800516 check_exception(num, controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800517 #Now restart the controller back
518 log.info('Restarting back the controller %s' %controller_name)
A R Karthick3b2e0372016-12-14 17:37:43 -0800519 cord_test_onos_restart(node = controller)
A R Karthick6cc8b812016-12-09 10:24:40 -0800520 self.log_set(controllers = controller)
521 self.log_set(app = 'io.atomix', controllers = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800522 time.sleep(60)
A R Karthicke8935c62016-12-08 18:17:17 -0800523 #archive the logs for this run
524 CordLogger.archive_results('test_cluster_single_controller_restarts',
525 controllers = controllers,
A.R Karthick53d92702017-03-13 10:10:38 -0700526 iteration = 'iteration_{}'.format(num+1),
527 archive_partition = self.ARCHIVE_PARTITION)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800528 check_exception(num, controller, inclusive = True)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800529
A.R Karthick2560f042016-11-30 14:38:52 -0800530 def test_cluster_restarts(self):
531 '''Test the cluster by repeatedly restarting the entire cluster'''
532 controllers = self.get_controllers()
533 ctlr_len = len(controllers)
534 if ctlr_len <= 1:
535 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
536 assert_greater(ctlr_len, 1)
537
538 #this call would verify the cluster for once
539 onos_map = self.get_cluster_container_names_ips()
540
A R Karthick2a70a2f2016-12-16 14:40:16 -0800541 def check_exception(iteration):
A.R Karthick2560f042016-11-30 14:38:52 -0800542 controller_list = controllers
543 storage_exceptions = []
544 for node in controller_list:
545 onosLog = OnosLog(host = node)
546 ##check the logs for storage exception
547 _, output = onosLog.get_log(('ERROR', 'Exception',))
548 if output and output.find('StorageException$Timeout') >= 0:
549 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
550 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
551 log.info('\n' + '-' * 50 + '\n')
552 log.info('%s' %output)
553 log.info('\n' + '-' * 50 + '\n')
554 storage_exceptions.append(node)
555
556 failed = self.verify_leaders(controller_list)
557 if failed:
558 log.info('Leaders command failed on nodes: %s' %failed)
559 if storage_exceptions:
560 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800561 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800562 CordLogger.archive_results('test_cluster_restarts',
563 controllers = controllers,
A.R Karthick53d92702017-03-13 10:10:38 -0700564 iteration = 'FAILED',
565 archive_partition = self.ARCHIVE_PARTITION)
A.R Karthick2560f042016-11-30 14:38:52 -0800566 assert_equal(len(failed), 0)
567 return
568
569 for ctlr in controller_list:
570 ips = self.get_cluster_current_member_ips(controller = ctlr,
571 nodes_filter = \
572 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
573 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
A R Karthick2a70a2f2016-12-16 14:40:16 -0800574 if len(ips) != len(controllers):
575 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick81ece152017-01-11 16:46:43 -0800576 CordLogger.archive_results('test_cluster_restarts',
577 controllers = controllers,
A.R Karthick53d92702017-03-13 10:10:38 -0700578 iteration = 'FAILED',
579 archive_partition = self.ARCHIVE_PARTITION)
A.R Karthick2560f042016-11-30 14:38:52 -0800580 assert_equal(len(ips), len(controllers))
581
A.R Karthick99044822017-02-09 14:04:20 -0800582 tries = self.ITERATIONS
A.R Karthick2560f042016-11-30 14:38:52 -0800583 for num in range(tries):
584 log.info('ITERATION: %d. Restarting cluster with controllers at %s' %(num+1, controllers))
585 try:
586 cord_test_restart_cluster()
A R Karthick6cc8b812016-12-09 10:24:40 -0800587 self.log_set(controllers = controllers)
588 self.log_set(app = 'io.atomix', controllers = controllers)
A.R Karthick2560f042016-11-30 14:38:52 -0800589 log.info('Delaying before verifying cluster status')
590 time.sleep(60)
591 except:
592 time.sleep(10)
593 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800594
595 #archive the logs for this run before verification
596 CordLogger.archive_results('test_cluster_restarts',
597 controllers = controllers,
A.R Karthick53d92702017-03-13 10:10:38 -0700598 iteration = 'iteration_{}'.format(num+1),
599 archive_partition = self.ARCHIVE_PARTITION)
A.R Karthick2560f042016-11-30 14:38:52 -0800600 #check for exceptions on the adjacent nodes
A R Karthick2a70a2f2016-12-16 14:40:16 -0800601 check_exception(num)
A.R Karthick2560f042016-11-30 14:38:52 -0800602
ChetanGaonker2099d722016-10-07 15:16:58 -0700603 #pass
ChetanGaonker689b3862016-10-17 16:25:01 -0700604 def test_cluster_formation_and_verification(self,onos_instances = ONOS_INSTANCES):
605 status = self.verify_cluster_status(onos_instances = onos_instances)
606 assert_equal(status, True)
607 log.info('Cluster exists with %d ONOS instances'%onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -0700608
609 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700610 def test_cluster_adding_members(self, add = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700611 status = self.verify_cluster_status(onos_instances = onos_instances)
612 assert_equal(status, True)
613 onos_ips = self.get_cluster_current_member_ips()
614 onos_instances = len(onos_ips)+add
615 log.info('Adding %d nodes to the ONOS cluster' %add)
616 cord_test_onos_add_cluster(count = add)
617 status = self.verify_cluster_status(onos_instances=onos_instances)
618 assert_equal(status, True)
619
ChetanGaonker689b3862016-10-17 16:25:01 -0700620 def test_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700621 status = self.verify_cluster_status(onos_instances = onos_instances)
622 assert_equal(status, True)
623 master, standbys = self.get_cluster_current_master_standbys()
624 assert_equal(len(standbys),(onos_instances-1))
625 onos_names_ips = self.get_cluster_container_names_ips()
626 master_onos_name = onos_names_ips[master]
627 log.info('Removing cluster current master %s'%(master))
A R Karthick3b2e0372016-12-14 17:37:43 -0800628 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700629 time.sleep(60)
630 onos_instances -= 1
631 status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0])
632 assert_equal(status, True)
633 new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
634 assert_not_equal(master,new_master)
ChetanGaonker689b3862016-10-17 16:25:01 -0700635 log.info('Successfully removed clusters master instance')
ChetanGaonker2099d722016-10-07 15:16:58 -0700636
ChetanGaonker689b3862016-10-17 16:25:01 -0700637 def test_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700638 status = self.verify_cluster_status(onos_instances = onos_instances)
639 assert_equal(status, True)
640 master, standbys = self.get_cluster_current_master_standbys()
641 assert_equal(len(standbys),(onos_instances-1))
642 onos_names_ips = self.get_cluster_container_names_ips()
643 member_onos_name = onos_names_ips[standbys[0]]
644 log.info('Removing cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800645 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700646 time.sleep(60)
647 onos_instances -= 1
648 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
649 assert_equal(status, True)
650
ChetanGaonker689b3862016-10-17 16:25:01 -0700651 def test_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700652 status = self.verify_cluster_status(onos_instances = onos_instances)
653 assert_equal(status, True)
654 master, standbys = self.get_cluster_current_master_standbys()
655 assert_equal(len(standbys),(onos_instances-1))
656 onos_names_ips = self.get_cluster_container_names_ips()
657 member1_onos_name = onos_names_ips[standbys[0]]
658 member2_onos_name = onos_names_ips[standbys[1]]
659 log.info('Removing cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800660 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700661 log.info('Removing cluster member %s'%standbys[1])
A R Karthick3b2e0372016-12-14 17:37:43 -0800662 cord_test_onos_shutdown(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700663 time.sleep(60)
664 onos_instances = onos_instances - 2
665 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
666 assert_equal(status, True)
667
ChetanGaonker689b3862016-10-17 16:25:01 -0700668 def test_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700669 status = self.verify_cluster_status(onos_instances = onos_instances)
670 assert_equal(status, True)
671 master, standbys = self.get_cluster_current_master_standbys()
672 assert_equal(len(standbys),(onos_instances-1))
673 onos_names_ips = self.get_cluster_container_names_ips()
674 for i in range(remove):
675 member_onos_name = onos_names_ips[standbys[i]]
676 log.info('Removing onos container with name %s'%standbys[i])
A R Karthick3b2e0372016-12-14 17:37:43 -0800677 cord_test_onos_shutdown(node = standbys[i])
ChetanGaonker2099d722016-10-07 15:16:58 -0700678 time.sleep(60)
679 onos_instances = onos_instances - remove
680 status = self.verify_cluster_status(onos_instances = onos_instances, controller=master)
681 assert_equal(status, True)
682
683 #nottest test cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700684 def test_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES , add = 2, remove = 2):
ChetanGaonker2099d722016-10-07 15:16:58 -0700685 status = self.verify_cluster_status(onos_instances = onos_instances)
686 assert_equal(status, True)
687 onos_ips = self.get_cluster_current_member_ips()
688 onos_instances = len(onos_ips)+add
689 log.info('Adding %d ONOS instances to the cluster'%add)
690 cord_test_onos_add_cluster(count = add)
691 status = self.verify_cluster_status(onos_instances=onos_instances)
692 assert_equal(status, True)
693 log.info('Removing %d ONOS instances from the cluster'%remove)
694 for i in range(remove):
695 name = '{}-{}'.format(Onos.NAME, onos_instances - i)
696 log.info('Removing onos container with name %s'%name)
697 cord_test_onos_shutdown(node = name)
698 time.sleep(60)
699 onos_instances = onos_instances-remove
700 status = self.verify_cluster_status(onos_instances=onos_instances)
701 assert_equal(status, True)
702
703 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700704 def test_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
ChetanGaonker2099d722016-10-07 15:16:58 -0700705 status = self.verify_cluster_status(onos_instances = onos_instances)
706 assert_equal(status, True)
707 onos_ips = self.get_cluster_current_member_ips()
708 onos_instances = onos_instances-remove
709 log.info('Removing %d ONOS instances from the cluster'%remove)
710 for i in range(remove):
711 name = '{}-{}'.format(Onos.NAME, len(onos_ips)-i)
712 log.info('Removing onos container with name %s'%name)
713 cord_test_onos_shutdown(node = name)
714 time.sleep(60)
715 status = self.verify_cluster_status(onos_instances=onos_instances)
716 assert_equal(status, True)
717 log.info('Adding %d ONOS instances to the cluster'%add)
718 cord_test_onos_add_cluster(count = add)
719 onos_instances = onos_instances+add
720 status = self.verify_cluster_status(onos_instances=onos_instances)
721 assert_equal(status, True)
722
ChetanGaonker689b3862016-10-17 16:25:01 -0700723 def test_cluster_restart(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700724 status = self.verify_cluster_status(onos_instances = onos_instances)
725 assert_equal(status, True)
726 log.info('Restarting cluster')
727 cord_test_onos_restart()
728 status = self.verify_cluster_status(onos_instances = onos_instances)
729 assert_equal(status, True)
730
ChetanGaonker689b3862016-10-17 16:25:01 -0700731 def test_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700732 status = self.verify_cluster_status(onos_instances = onos_instances)
733 assert_equal(status, True)
734 master, standbys = self.get_cluster_current_master_standbys()
735 onos_names_ips = self.get_cluster_container_names_ips()
736 master_onos_name = onos_names_ips[master]
737 log.info('Restarting cluster master %s'%master)
A R Karthick3b2e0372016-12-14 17:37:43 -0800738 cord_test_onos_restart(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700739 status = self.verify_cluster_status(onos_instances = onos_instances)
740 assert_equal(status, True)
741 log.info('Cluster came up after master restart as expected')
742
743 #test fail. master changing after restart. Need to check correct behavior.
ChetanGaonker689b3862016-10-17 16:25:01 -0700744 def test_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700745 status = self.verify_cluster_status(onos_instances = onos_instances)
746 assert_equal(status, True)
747 master1, standbys = self.get_cluster_current_master_standbys()
748 onos_names_ips = self.get_cluster_container_names_ips()
749 master_onos_name = onos_names_ips[master1]
A R Karthick3b2e0372016-12-14 17:37:43 -0800750 log.info('Restarting cluster master %s'%master1)
751 cord_test_onos_restart(node = master1)
ChetanGaonker2099d722016-10-07 15:16:58 -0700752 status = self.verify_cluster_status(onos_instances = onos_instances)
753 assert_equal(status, True)
754 master2, standbys = self.get_cluster_current_master_standbys()
755 assert_equal(master1,master2)
756 log.info('Cluster master is same before and after cluster master restart as expected')
757
ChetanGaonker689b3862016-10-17 16:25:01 -0700758 def test_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700759 status = self.verify_cluster_status(onos_instances = onos_instances)
760 assert_equal(status, True)
761 master, standbys = self.get_cluster_current_master_standbys()
762 assert_equal(len(standbys),(onos_instances-1))
763 onos_names_ips = self.get_cluster_container_names_ips()
764 member_onos_name = onos_names_ips[standbys[0]]
765 log.info('Restarting cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800766 cord_test_onos_restart(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700767 status = self.verify_cluster_status(onos_instances = onos_instances)
768 assert_equal(status, True)
769 log.info('Cluster came up as expected after restarting one member')
770
ChetanGaonker689b3862016-10-17 16:25:01 -0700771 def test_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700772 status = self.verify_cluster_status(onos_instances = onos_instances)
773 assert_equal(status, True)
774 master, standbys = self.get_cluster_current_master_standbys()
775 assert_equal(len(standbys),(onos_instances-1))
776 onos_names_ips = self.get_cluster_container_names_ips()
777 member1_onos_name = onos_names_ips[standbys[0]]
778 member2_onos_name = onos_names_ips[standbys[1]]
779 log.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1]))
A R Karthick3b2e0372016-12-14 17:37:43 -0800780 cord_test_onos_restart(node = standbys[0])
781 cord_test_onos_restart(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700782 status = self.verify_cluster_status(onos_instances = onos_instances)
783 assert_equal(status, True)
784 log.info('Cluster came up as expected after restarting two members')
785
ChetanGaonker689b3862016-10-17 16:25:01 -0700786 def test_cluster_state_with_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700787 status = self.verify_cluster_status(onos_instances = onos_instances)
788 assert_equal(status,True)
789 master, standbys = self.get_cluster_current_master_standbys()
790 assert_equal(len(standbys),(onos_instances-1))
791 onos_names_ips = self.get_cluster_container_names_ips()
792 for i in range(members):
793 member_onos_name = onos_names_ips[standbys[i]]
794 log.info('Restarting cluster member %s'%standbys[i])
A R Karthick3b2e0372016-12-14 17:37:43 -0800795 cord_test_onos_restart(node = standbys[i])
ChetanGaonker2099d722016-10-07 15:16:58 -0700796
797 status = self.verify_cluster_status(onos_instances = onos_instances)
798 assert_equal(status, True)
799 log.info('Cluster came up as expected after restarting %d members'%members)
800
ChetanGaonker689b3862016-10-17 16:25:01 -0700801 def test_cluster_state_with_master_change(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700802 status = self.verify_cluster_status(onos_instances=onos_instances)
803 assert_equal(status, True)
804 master, standbys = self.get_cluster_current_master_standbys()
805 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -0700806 log.info('Cluster current master of devices is %s'%master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700807 self.change_master_current_cluster(new_master=standbys[0])
808 log.info('Cluster master changed successfully')
809
810 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700811 def test_cluster_with_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700812 status = self.verify_cluster_status(onos_instances = onos_instances)
813 assert_equal(status, True)
814 onos_ips = self.get_cluster_current_member_ips()
815 self.vrouter.setUpClass()
816 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
817 assert_equal(res, True)
818 for onos_ip in onos_ips:
819 tries = 0
820 flag = False
821 try:
822 self.cliEnter(controller = onos_ip)
823 while tries <= 5:
824 routes = json.loads(self.cli.routes(jsonFormat = True))
825 if routes:
826 assert_equal(len(routes['routes4']), networks)
827 self.cliExit()
828 flag = True
829 break
830 else:
831 tries += 1
832 time.sleep(1)
833 assert_equal(flag, True)
834 except:
835 log.info('Exception occured while checking routes in onos instance %s'%onos_ip)
836 raise
837
838 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700839 def test_cluster_with_vrouter_and_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700840 status = self.verify_cluster_status(onos_instances = onos_instances)
841 assert_equal(status, True)
842 onos_ips = self.get_cluster_current_member_ips()
843 master, standbys = self.get_cluster_current_master_standbys()
844 onos_names_ips = self.get_cluster_container_names_ips()
845 master_onos_name = onos_names_ips[master]
846 self.vrouter.setUpClass()
847 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
848 assert_equal(res,True)
A R Karthick3b2e0372016-12-14 17:37:43 -0800849 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700850 time.sleep(60)
ChetanGaonker689b3862016-10-17 16:25:01 -0700851 log.info('Verifying vrouter traffic after cluster master is down')
ChetanGaonker2099d722016-10-07 15:16:58 -0700852 self.vrouter.vrouter_traffic_verify()
853
854 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700855 def test_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700856 status = self.verify_cluster_status(onos_instances = onos_instances)
857 assert_equal(status, True)
858 onos_ips = self.get_cluster_current_member_ips()
859 master, standbys = self.get_cluster_current_master_standbys()
860 onos_names_ips = self.get_cluster_container_names_ips()
861 master_onos_name = onos_names_ips[master]
862 self.vrouter.setUpClass()
863 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
864 assert_equal(res, True)
865 cord_test_onos_restart()
866 self.vrouter.vrouter_traffic_verify()
867
868 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700869 def test_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700870 status = self.verify_cluster_status(onos_instances = onos_instances)
871 assert_equal(status, True)
872 self.vrouter.setUpClass()
873 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
874 assert_equal(res, True)
875 self.vrouter.vrouter_activate(deactivate=True)
876 time.sleep(15)
877 self.vrouter.vrouter_traffic_verify(positive_test=False)
878 self.vrouter.vrouter_activate(deactivate=False)
879
880 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700881 def test_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700882 status = self.verify_cluster_status(onos_instances = onos_instances)
883 assert_equal(status, True)
884 master, standbys = self.get_cluster_current_master_standbys()
885 onos_names_ips = self.get_cluster_container_names_ips()
886 master_onos_name = onos_names_ips[master]
887 self.vrouter.setUpClass()
888 log.info('Verifying vrouter before master down')
889 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
890 assert_equal(res, True)
891 self.vrouter.vrouter_activate(deactivate=True)
892 log.info('Verifying vrouter traffic after app deactivated')
893 time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down
894 self.vrouter.vrouter_traffic_verify(positive_test=False)
895 log.info('Verifying vrouter traffic after master down')
A R Karthick3b2e0372016-12-14 17:37:43 -0800896 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700897 time.sleep(60)
898 self.vrouter.vrouter_traffic_verify(positive_test=False)
899 self.vrouter.vrouter_activate(deactivate=False)
900
901 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700902 def test_cluster_for_vrouter_app_and_making_member_down(self, networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700903 status = self.verify_cluster_status(onos_instances = onos_instances)
904 assert_equal(status, True)
905 master, standbys = self.get_cluster_current_master_standbys()
906 onos_names_ips = self.get_cluster_container_names_ips()
907 member_onos_name = onos_names_ips[standbys[0]]
908 self.vrouter.setUpClass()
909 log.info('Verifying vrouter before cluster member down')
910 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
911 assert_equal(res, True) # Expecting vrouter should work properly
912 log.info('Verifying vrouter after cluster member down')
A R Karthick3b2e0372016-12-14 17:37:43 -0800913 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700914 time.sleep(60)
915 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
916
917 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700918 def test_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700919 status = self.verify_cluster_status(onos_instances = onos_instances)
920 assert_equal(status, True)
921 master, standbys = self.get_cluster_current_master_standbys()
922 onos_names_ips = self.get_cluster_container_names_ips()
923 member_onos_name = onos_names_ips[standbys[1]]
924 self.vrouter.setUpClass()
925 log.info('Verifying vrouter traffic before cluster member restart')
926 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
927 assert_equal(res, True) # Expecting vrouter should work properly
A R Karthick3b2e0372016-12-14 17:37:43 -0800928 cord_test_onos_restart(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700929 log.info('Verifying vrouter traffic after cluster member restart')
930 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
931
932 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700933 def test_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700934 status = self.verify_cluster_status(onos_instances = onos_instances)
935 assert_equal(status, True)
936 self.vrouter.setUpClass()
937 log.info('Verifying vrouter traffic before cluster restart')
938 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
939 assert_equal(res, True) # Expecting vrouter should work properly
940 cord_test_onos_restart()
941 log.info('Verifying vrouter traffic after cluster restart')
942 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
943
944
945 #test fails because flow state is in pending_add in onos
ChetanGaonker689b3862016-10-17 16:25:01 -0700946 def test_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700947 status = self.verify_cluster_status(onos_instances = onos_instances)
948 assert_equal(status, True)
949 master, standbys = self.get_cluster_current_master_standbys()
950 onos_names_ips = self.get_cluster_container_names_ips()
951 master_onos_name = onos_names_ips[master]
952 self.flows.setUpClass()
953 egress = 1
954 ingress = 2
955 egress_map = { 'ip': '192.168.30.1', 'udp_port': 9500 }
956 ingress_map = { 'ip': '192.168.40.1', 'udp_port': 9000 }
957 flow = OnosFlowCtrl(deviceId = self.device_id,
958 egressPort = egress,
959 ingressPort = ingress,
960 udpSrc = ingress_map['udp_port'],
961 udpDst = egress_map['udp_port'],
962 controller=master
963 )
964 result = flow.addFlow()
965 assert_equal(result, True)
966 time.sleep(1)
967 self.success = False
968 def mac_recv_task():
969 def recv_cb(pkt):
970 log.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
971 self.success = True
972 sniff(timeout=2,
973 lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
974 and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.flows.port_map[egress])
975
976 for i in [0,1]:
977 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -0800978 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700979 log.info('Verifying flows traffic after master killed')
980 time.sleep(45)
981 else:
982 log.info('Verifying flows traffic before master killed')
983 t = threading.Thread(target = mac_recv_task)
984 t.start()
985 L2 = self.flows_eth #Ether(src = ingress_map['ether'], dst = egress_map['ether'])
986 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
987 L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
988 pkt = L2/L3/L4
989 log.info('Sending packets to verify if flows are correct')
990 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
991 t.join()
992 assert_equal(self.success, True)
993
ChetanGaonker689b3862016-10-17 16:25:01 -0700994 def test_cluster_state_changing_master_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700995 status = self.verify_cluster_status(onos_instances=onos_instances)
996 assert_equal(status, True)
997 master, standbys = self.get_cluster_current_master_standbys()
998 self.flows.setUpClass()
999 egress = 1
1000 ingress = 2
1001 egress_map = { 'ip': '192.168.30.1' }
1002 ingress_map = { 'ip': '192.168.40.1' }
1003 flow = OnosFlowCtrl(deviceId = self.device_id,
1004 egressPort = egress,
1005 ingressPort = ingress,
1006 ecn = 1,
1007 controller=master
1008 )
1009 result = flow.addFlow()
1010 assert_equal(result, True)
1011 ##wait for flows to be added to ONOS
1012 time.sleep(1)
1013 self.success = False
1014 def mac_recv_task():
1015 def recv_cb(pkt):
1016 log.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
1017 self.success = True
1018 sniff(count=2, timeout=5,
1019 lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
1020 and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb,
1021 iface = self.flows.port_map[egress])
1022 for i in [0,1]:
1023 if i == 1:
1024 log.info('Changing cluster master to %s'%standbys[0])
1025 self.change_master_current_cluster(new_master=standbys[0])
1026 log.info('Verifying flow traffic after cluster master chnaged')
1027 else:
1028 log.info('Verifying flow traffic before cluster master changed')
1029 t = threading.Thread(target = mac_recv_task)
1030 t.start()
1031 L2 = self.flows_eth # Ether(src = ingress_map['ether'], dst = egress_map['ether'])
1032 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1)
1033 pkt = L2/L3
1034 log.info('Sending a packet to verify if flows are correct')
1035 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
1036 t.join()
1037 assert_equal(self.success, True)
1038
ChetanGaonker689b3862016-10-17 16:25:01 -07001039 #pass
1040 def test_cluster_flow_for_ipv6_extension_header_and_master_restart(self,onos_instances = ONOS_INSTANCES):
1041 status = self.verify_cluster_status(onos_instances=onos_instances)
1042 assert_equal(status, True)
1043 master,standbys = self.get_cluster_current_master_standbys()
1044 onos_names_ips = self.get_cluster_container_names_ips()
1045 master_onos_name = onos_names_ips[master]
1046 self.flows.setUpClass()
1047 egress = 1
1048 ingress = 2
1049 egress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
1050 ingress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
1051 flow = OnosFlowCtrl(deviceId = self.device_id,
1052 egressPort = egress,
1053 ingressPort = ingress,
1054 ipv6_extension = 0,
1055 controller=master
1056 )
1057
1058 result = flow.addFlow()
1059 assert_equal(result, True)
1060 ##wait for flows to be added to ONOS
1061 time.sleep(1)
1062 self.success = False
1063 def mac_recv_task():
1064 def recv_cb(pkt):
1065 log.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s'%(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
1066 self.success = True
1067 sniff(timeout=2,count=5,
1068 lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.flows.port_map[egress])
1069 for i in [0,1]:
1070 if i == 1:
1071 log.info('Restart cluster current master %s'%master)
1072 Container(master_onos_name,Onos.IMAGE).restart()
1073 time.sleep(45)
1074 log.info('Verifying flow traffic after master restart')
1075 else:
1076 log.info('Verifying flow traffic before master restart')
1077 t = threading.Thread(target = mac_recv_task)
1078 t.start()
1079 L2 = self.flows_eth
1080 L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
1081 pkt = L2/L3
1082 log.info('Sending packets to verify if flows are correct')
1083 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
1084 t.join()
1085 assert_equal(self.success, True)
1086
1087 def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
1088 dst_mac = self.igmp.iptomac(group)
1089 eth = Ether(dst= dst_mac)
1090 ip = IP(dst=group,src=source)
1091 data = repr(monotonic.monotonic())
1092 sendp(eth/ip/data,count=20, iface = intf)
1093 pkt = (eth/ip/data)
1094 log.info('multicast traffic packet %s'%pkt.show())
1095
1096 def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
1097 log.info('verifying multicast traffic for group %s from source %s'%(group,source))
1098 self.success = False
1099 def recv_task():
1100 def igmp_recv_cb(pkt):
1101 log.info('multicast data received for group %s from source %s'%(group,source))
1102 self.success = True
1103 sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
1104 t = threading.Thread(target = recv_task)
1105 t.start()
1106 self.send_multicast_data_traffic(group,source=source)
1107 t.join()
1108 return self.success
1109
1110 #pass
1111 def test_cluster_with_igmp_include_exclude_modes_and_restarting_master(self, onos_instances=ONOS_INSTANCES):
1112 status = self.verify_cluster_status(onos_instances=onos_instances)
1113 assert_equal(status, True)
1114 master, standbys = self.get_cluster_current_master_standbys()
1115 assert_equal(len(standbys), (onos_instances-1))
1116 onos_names_ips = self.get_cluster_container_names_ips()
1117 master_onos_name = onos_names_ips[master]
1118 self.igmp.setUp(controller=master)
1119 groups = ['224.2.3.4','230.5.6.7']
1120 src_list = ['2.2.2.2','3.3.3.3']
1121 self.igmp.onos_ssm_table_load(groups, src_list=src_list, controller=master)
1122 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1123 iface = self.V_INF1, delay = 2)
1124 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1125 iface = self.V_INF1, delay = 2)
1126 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1127 assert_equal(status,True)
1128 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1129 assert_equal(status,False)
1130 log.info('restarting cluster master %s'%master)
1131 Container(master_onos_name,Onos.IMAGE).restart()
1132 time.sleep(60)
1133 log.info('verifying multicast data traffic after master restart')
1134 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1135 assert_equal(status,True)
1136 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1137 assert_equal(status,False)
1138
1139 #pass
1140 def test_cluster_with_igmp_include_exclude_modes_and_making_master_down(self, onos_instances=ONOS_INSTANCES):
1141 status = self.verify_cluster_status(onos_instances=onos_instances)
1142 assert_equal(status, True)
1143 master, standbys = self.get_cluster_current_master_standbys()
1144 assert_equal(len(standbys), (onos_instances-1))
1145 onos_names_ips = self.get_cluster_container_names_ips()
1146 master_onos_name = onos_names_ips[master]
1147 self.igmp.setUp(controller=master)
1148 groups = [self.igmp.random_mcast_ip(),self.igmp.random_mcast_ip()]
1149 src_list = [self.igmp.randomsourceip()]
1150 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1151 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1152 iface = self.V_INF1, delay = 2)
1153 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1154 iface = self.V_INF1, delay = 2)
1155 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1156 assert_equal(status,True)
1157 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1158 assert_equal(status,False)
1159 log.info('Killing cluster master %s'%master)
1160 Container(master_onos_name,Onos.IMAGE).kill()
1161 time.sleep(60)
1162 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=standbys[0])
1163 assert_equal(status, True)
1164 log.info('Verifying multicast data traffic after cluster master down')
1165 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1166 assert_equal(status,True)
1167 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1168 assert_equal(status,False)
1169
1170 def test_cluster_with_igmp_include_mode_checking_traffic_recovery_time_after_master_is_down(self, onos_instances=ONOS_INSTANCES):
1171 status = self.verify_cluster_status(onos_instances=onos_instances)
1172 assert_equal(status, True)
1173 master, standbys = self.get_cluster_current_master_standbys()
1174 assert_equal(len(standbys), (onos_instances-1))
1175 onos_names_ips = self.get_cluster_container_names_ips()
1176 master_onos_name = onos_names_ips[master]
1177 self.igmp.setUp(controller=master)
1178 groups = [self.igmp.random_mcast_ip()]
1179 src_list = [self.igmp.randomsourceip()]
1180 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1181 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1182 iface = self.V_INF1, delay = 2)
1183 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1184 assert_equal(status,True)
1185 log.info('Killing clusters master %s'%master)
1186 Container(master_onos_name,Onos.IMAGE).kill()
1187 count = 0
1188 for i in range(60):
1189 log.info('Verifying multicast data traffic after cluster master down')
1190 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1191 if status:
1192 break
1193 else:
1194 count += 1
1195 time.sleep(1)
1196 assert_equal(status, True)
1197 log.info('Time taken to recover traffic after clusters master down is %d seconds'%count)
1198
1199
1200 #pass
1201 def test_cluster_state_with_igmp_leave_group_after_master_change(self, onos_instances=ONOS_INSTANCES):
1202 status = self.verify_cluster_status(onos_instances=onos_instances)
1203 assert_equal(status, True)
1204 master, standbys = self.get_cluster_current_master_standbys()
1205 assert_equal(len(standbys), (onos_instances-1))
1206 self.igmp.setUp(controller=master)
1207 groups = [self.igmp.random_mcast_ip()]
1208 src_list = [self.igmp.randomsourceip()]
1209 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1210 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1211 iface = self.V_INF1, delay = 2)
1212 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1213 assert_equal(status,True)
1214 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1215 self.change_cluster_current_master(new_master=standbys[0])
1216 log.info('Verifying multicast traffic after cluster master change')
1217 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1218 assert_equal(status,True)
1219 log.info('Sending igmp TO_EXCLUDE message to leave the group %s'%groups[0])
1220 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
1221 iface = self.V_INF1, delay = 1)
1222 time.sleep(10)
1223 status = self.verify_igmp_data_traffic(groups[0],intf = self.V_INF1,source= src_list[0])
1224 assert_equal(status,False)
1225
1226 #pass
1227 def test_cluster_state_with_igmp_join_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
1228 status = self.verify_cluster_status(onos_instances=onos_instances)
1229 assert_equal(status, True)
1230 master,standbys = self.get_cluster_current_master_standbys()
1231 assert_equal(len(standbys), (onos_instances-1))
1232 self.igmp.setUp(controller=master)
1233 groups = [self.igmp.random_mcast_ip()]
1234 src_list = [self.igmp.randomsourceip()]
1235 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1236 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1237 self.change_cluster_current_master(new_master = standbys[0])
1238 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1239 iface = self.V_INF1, delay = 2)
1240 time.sleep(1)
1241 self.change_cluster_current_master(new_master = master)
1242 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1243 assert_equal(status,True)
1244
1245 #pass
ChetanGaonker2099d722016-10-07 15:16:58 -07001246 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001247 def test_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001248 status = self.verify_cluster_status(onos_instances=onos_instances)
1249 assert_equal(status, True)
1250 master, standbys = self.get_cluster_current_master_standbys()
1251 assert_equal(len(standbys), (onos_instances-1))
1252 self.tls.setUp(controller=master)
1253 df = defer.Deferred()
1254 def eap_tls_verify(df):
1255 tls = TLSAuthTest()
1256 tls.runTest()
1257 df.callback(0)
1258 reactor.callLater(0, eap_tls_verify, df)
1259 return df
1260
1261 @deferred(120)
ChetanGaonker689b3862016-10-17 16:25:01 -07001262 def test_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001263 master, standbys = self.get_cluster_current_master_standbys()
1264 assert_equal(len(standbys), (onos_instances-1))
1265 self.tls.setUp()
1266 df = defer.Deferred()
1267 def eap_tls_verify2(df2):
1268 tls = TLSAuthTest()
1269 tls.runTest()
1270 df.callback(0)
1271 for i in [0,1]:
1272 if i == 1:
1273 log.info('Changing cluster master %s to %s'%(master, standbys[0]))
1274 self.change_master_current_cluster(new_master=standbys[0])
1275 log.info('Verifying tls authentication after cluster master changed to %s'%standbys[0])
1276 else:
1277 log.info('Verifying tls authentication before cluster master change')
1278 reactor.callLater(0, eap_tls_verify, df)
1279 return df
1280
1281 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001282 def test_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001283 status = self.verify_cluster_status(onos_instances=onos_instances)
1284 assert_equal(status, True)
1285 master, standbys = self.get_cluster_current_master_standbys()
1286 assert_equal(len(standbys), (onos_instances-1))
1287 onos_names_ips = self.get_cluster_container_names_ips()
1288 master_onos_name = onos_names_ips[master]
1289 self.tls.setUp()
1290 df = defer.Deferred()
1291 def eap_tls_verify(df):
1292 tls = TLSAuthTest()
1293 tls.runTest()
1294 df.callback(0)
1295 for i in [0,1]:
1296 if i == 1:
1297 log.info('Killing cluster current master %s'%master)
A R Karthick3b2e0372016-12-14 17:37:43 -08001298 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001299 time.sleep(20)
1300 status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True)
1301 assert_equal(status, True)
1302 log.info('Cluster came up with %d instances after killing master'%(onos_instances-1))
1303 log.info('Verifying tls authentication after killing cluster master')
1304 reactor.callLater(0, eap_tls_verify, df)
1305 return df
1306
1307 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001308 def test_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001309 status = self.verify_cluster_status(onos_instances=onos_instances)
1310 assert_equal(status, True)
1311 master, standbys = self.get_cluster_current_master_standbys()
1312 assert_equal(len(standbys), (onos_instances-1))
1313 onos_names_ips = self.get_cluster_container_names_ips()
1314 member_onos_name = onos_names_ips[standbys[0]]
1315 self.tls.setUp()
1316 df = defer.Deferred()
1317 def eap_tls_no_cert(df):
1318 def tls_no_cert_cb():
1319 log.info('TLS authentication failed with no certificate')
1320 tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
1321 tls.runTest()
1322 assert_equal(tls.failTest, True)
1323 df.callback(0)
1324 for i in [0,1]:
1325 if i == 1:
1326 log.info('Restart cluster member %s'%standbys[0])
1327 Container(member_onos_name,Onos.IMAGE).restart()
1328 time.sleep(20)
1329 status = self.verify_cluster_status(onos_instances=onos_instances)
1330 assert_equal(status, True)
1331 log.info('Cluster came up with %d instances after member restart'%(onos_instances))
1332 log.info('Verifying tls authentication after member restart')
1333 reactor.callLater(0, eap_tls_no_cert, df)
1334 return df
1335
ChetanGaonker689b3862016-10-17 16:25:01 -07001336 #pass
1337 def test_cluster_proxyarp_master_change_and_app_deactivation(self,onos_instances=ONOS_INSTANCES,hosts = 3):
1338 status = self.verify_cluster_status(onos_instances=onos_instances)
1339 assert_equal(status,True)
1340 master,standbys = self.get_cluster_current_master_standbys()
1341 assert_equal(len(standbys),(onos_instances-1))
1342 self.proxyarp.setUpClass()
1343 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1344 ingress = hosts+1
1345 for hostip, hostmac in hosts_config:
1346 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1347 time.sleep(1)
1348 log.info('changing cluster current master from %s to %s'%(master,standbys[0]))
1349 self.change_cluster_current_master(new_master=standbys[0])
1350 log.info('verifying proxyarp after master change')
1351 for hostip, hostmac in hosts_config:
1352 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1353 time.sleep(1)
1354 log.info('Deactivating proxyarp app and expecting proxyarp functionality not to work')
1355 self.proxyarp.proxyarp_activate(deactivate = True,controller=standbys[0])
1356 time.sleep(3)
1357 for hostip, hostmac in hosts_config:
1358 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
1359 time.sleep(1)
1360 log.info('activating proxyarp app and expecting to get arp reply from ONOS')
1361 self.proxyarp.proxyarp_activate(deactivate = False,controller=standbys[0])
1362 time.sleep(3)
1363 for hostip, hostmac in hosts_config:
1364 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1365 time.sleep(1)
ChetanGaonker2099d722016-10-07 15:16:58 -07001366
ChetanGaonker689b3862016-10-17 16:25:01 -07001367 #pass
1368 def test_cluster_with_proxyarp_and_one_member_down(self,hosts=3,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001369 status = self.verify_cluster_status(onos_instances=onos_instances)
1370 assert_equal(status, True)
1371 master, standbys = self.get_cluster_current_master_standbys()
ChetanGaonker689b3862016-10-17 16:25:01 -07001372 assert_equal(len(standbys), (onos_instances-1))
1373 onos_names_ips = self.get_cluster_container_names_ips()
1374 member_onos_name = onos_names_ips[standbys[1]]
1375 self.proxyarp.setUpClass()
1376 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1377 ingress = hosts+1
1378 for hostip, hostmac in hosts_config:
1379 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1380 time.sleep(1)
1381 log.info('killing cluster member %s'%standbys[1])
1382 Container(member_onos_name,Onos.IMAGE).kill()
1383 time.sleep(20)
1384 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=master,verify=True)
1385 assert_equal(status, True)
1386 log.info('cluster came up with %d instances after member down'%(onos_instances-1))
1387 log.info('verifying proxy arp functionality after cluster member down')
1388 for hostip, hostmac in hosts_config:
1389 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1390 time.sleep(1)
1391
1392 #pass
1393 def test_cluster_with_proxyarp_and_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts=10,onos_instances=ONOS_INSTANCES):
1394 status = self.verify_cluster_status(onos_instances=onos_instances)
1395 assert_equal(status, True)
1396 self.proxyarp.setUpClass()
1397 master, standbys = self.get_cluster_current_master_standbys()
1398 assert_equal(len(standbys), (onos_instances-1))
1399 ports_map, egress_map, hosts_config = self.proxyarp.proxyarp_config(hosts = hosts, controller=master)
1400 self.success = True
1401 ingress = hosts+1
1402 ports = range(ingress,ingress+10)
1403 hostmac = []
1404 hostip = []
1405 for ip,mac in hosts_config:
1406 hostmac.append(mac)
1407 hostip.append(ip)
1408 success_dir = {}
1409 def verify_proxyarp(*r):
1410 ingress, hostmac, hostip = r[0],r[1],r[2]
1411 def mac_recv_task():
1412 def recv_cb(pkt):
1413 log.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
1414 success_dir[current_thread().name] = True
1415 sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
1416 prn = recv_cb, iface = self.proxyarp.port_map[ingress])
1417 t = threading.Thread(target = mac_recv_task)
1418 t.start()
1419 pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
1420 log.info('Sending arp request for dest ip %s on interface %s' %
1421 (hostip,self.proxyarp.port_map[ingress]))
1422 sendp(pkt, count = 10,iface = self.proxyarp.port_map[ingress])
1423 t.join()
1424 t = []
1425 for i in range(10):
1426 t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
1427 for i in range(10):
1428 t[i].start()
1429 time.sleep(2)
1430 for i in range(10):
1431 t[i].join()
1432 if len(success_dir) != 10:
1433 self.success = False
1434 assert_equal(self.success, True)
1435
1436 #pass
1437 def test_cluster_with_acl_rule_before_master_change_and_remove_acl_rule_after_master_change(self,onos_instances=ONOS_INSTANCES):
1438 status = self.verify_cluster_status(onos_instances=onos_instances)
1439 assert_equal(status, True)
1440 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001441 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001442 self.acl.setUp()
1443 acl_rule = ACLTest()
1444 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1445 if status is False:
1446 log.info('JSON request returned status %d' %code)
1447 assert_equal(status, True)
1448 result = acl_rule.get_acl_rules(controller=master)
1449 aclRules1 = result.json()['aclRules']
1450 log.info('Added acl rules is %s'%aclRules1)
1451 acl_Id = map(lambda d: d['id'], aclRules1)
1452 log.info('Changing cluster current master from %s to %s'%(master,standbys[0]))
1453 self.change_cluster_current_master(new_master=standbys[0])
1454 status,code = acl_rule.remove_acl_rule(acl_Id[0],controller=standbys[0])
1455 if status is False:
1456 log.info('JSON request returned status %d' %code)
1457 assert_equal(status, True)
1458
1459 #pass
1460 def test_cluster_verifying_acl_rule_in_new_master_after_current_master_is_down(self,onos_instances=ONOS_INSTANCES):
1461 status = self.verify_cluster_status(onos_instances=onos_instances)
1462 assert_equal(status, True)
1463 master,standbys = self.get_cluster_current_master_standbys()
1464 assert_equal(len(standbys),(onos_instances-1))
1465 onos_names_ips = self.get_cluster_container_names_ips()
1466 master_onos_name = onos_names_ips[master]
1467 self.acl.setUp()
1468 acl_rule = ACLTest()
1469 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1470 if status is False:
1471 log.info('JSON request returned status %d' %code)
1472 assert_equal(status, True)
1473 result1 = acl_rule.get_acl_rules(controller=master)
1474 aclRules1 = result1.json()['aclRules']
1475 log.info('Added acl rules is %s'%aclRules1)
1476 acl_Id1 = map(lambda d: d['id'], aclRules1)
1477 log.info('Killing cluster current master %s'%master)
1478 Container(master_onos_name,Onos.IMAGE).kill()
1479 time.sleep(45)
1480 status = self.verify_cluster_status(onos_instances=onos_instances,controller=standbys[0])
1481 assert_equal(status, True)
1482 new_master,standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
1483 assert_equal(len(standbys),(onos_instances-2))
1484 assert_not_equal(new_master,master)
1485 result2 = acl_rule.get_acl_rules(controller=new_master)
1486 aclRules2 = result2.json()['aclRules']
1487 acl_Id2 = map(lambda d: d['id'], aclRules2)
1488 log.info('Acl Ids before and after master down are %s and %s'%(acl_Id1,acl_Id2))
1489 assert_equal(acl_Id2,acl_Id1)
1490
1491 #acl traffic scenario not working as acl rule is not getting added to onos
1492 def test_cluster_with_acl_traffic_before_and_after_two_members_down(self,onos_instances=ONOS_INSTANCES):
1493 status = self.verify_cluster_status(onos_instances=onos_instances)
1494 assert_equal(status, True)
1495 master,standbys = self.get_cluster_current_master_standbys()
1496 assert_equal(len(standbys),(onos_instances-1))
1497 onos_names_ips = self.get_cluster_container_names_ips()
1498 member1_onos_name = onos_names_ips[standbys[0]]
1499 member2_onos_name = onos_names_ips[standbys[1]]
1500 ingress = self.acl.ingress_iface
1501 egress = self.acl.CURRENT_PORT_NUM
1502 acl_rule = ACLTest()
1503 status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.acl.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.acl.HOST_DST_IP)
1504 self.acl.CURRENT_PORT_NUM += 1
1505 time.sleep(5)
1506 if status is False:
1507 log.info('JSON request returned status %d' %code)
1508 assert_equal(status, True)
1509 srcMac = '00:00:00:00:00:11'
1510 dstMac = host_ip_mac[0][1]
1511 self.acl.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1, egress_iface_num = egress )
1512 status, code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'deny',controller=master)
1513 time.sleep(10)
1514 if status is False:
1515 log.info('JSON request returned status %d' %code)
1516 assert_equal(status, True)
1517 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1518 log.info('killing cluster members %s and %s'%(standbys[0],standbys[1]))
1519 Container(member1_onos_name, Onos.IMAGE).kill()
1520 Container(member2_onos_name, Onos.IMAGE).kill()
1521 time.sleep(40)
1522 status = self.verify_cluster_status(onos_instances=onos_instances-2,verify=True,controller=master)
1523 assert_equal(status, True)
1524 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1525 self.acl.acl_hosts_remove(egress_iface_count = 1, egress_iface_num = egress)
1526
1527 #pass
1528 def test_cluster_with_dhcpRelay_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1529 status = self.verify_cluster_status(onos_instances=onos_instances)
1530 assert_equal(status, True)
1531 master,standbys = self.get_cluster_current_master_standbys()
1532 assert_equal(len(standbys),(onos_instances-1))
1533 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001534 mac = self.dhcprelay.get_mac(iface)
1535 self.dhcprelay.host_load(iface)
1536 ##we use the defaults for this test that serves as an example for others
1537 ##You don't need to restart dhcpd server if retaining default config
1538 config = self.dhcprelay.default_config
1539 options = self.dhcprelay.default_options
1540 subnet = self.dhcprelay.default_subnet_config
1541 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1542 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1543 config = config,
1544 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001545 subnet = subnet,
1546 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001547 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
1548 cip, sip = self.dhcprelay.send_recv(mac)
1549 log.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
1550 self.change_master_current_cluster(new_master=standbys[0])
1551 log.info('Releasing ip %s to server %s' %(cip, sip))
1552 assert_equal(self.dhcprelay.dhcp.release(cip), True)
1553 log.info('Triggering DHCP discover again after release')
1554 cip2, sip2 = self.dhcprelay.send_recv(mac)
1555 log.info('Verifying released IP was given back on rediscover')
1556 assert_equal(cip, cip2)
1557 log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
1558 assert_equal(self.dhcprelay.dhcp.release(cip2), True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001559 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001560
ChetanGaonker689b3862016-10-17 16:25:01 -07001561
1562 def test_cluster_with_dhcpRelay_and_verify_dhcp_ip_after_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1563 status = self.verify_cluster_status(onos_instances=onos_instances)
1564 assert_equal(status, True)
1565 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001566 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001567 onos_names_ips = self.get_cluster_container_names_ips()
1568 master_onos_name = onos_names_ips[master]
1569 self.dhcprelay.setUpClass(controller=master)
1570 mac = self.dhcprelay.get_mac(iface)
1571 self.dhcprelay.host_load(iface)
1572 ##we use the defaults for this test that serves as an example for others
1573 ##You don't need to restart dhcpd server if retaining default config
1574 config = self.dhcprelay.default_config
1575 options = self.dhcprelay.default_options
1576 subnet = self.dhcprelay.default_subnet_config
1577 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1578 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1579 config = config,
1580 options = options,
1581 subnet = subnet,
1582 controller=master)
1583 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1584 log.info('Initiating dhcp process from client %s'%mac)
1585 cip, sip = self.dhcprelay.send_recv(mac)
1586 log.info('Killing cluster current master %s'%master)
1587 Container(master_onos_name, Onos.IMAGE).kill()
1588 time.sleep(60)
1589 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=standbys[0])
1590 assert_equal(status, True)
1591 mac = self.dhcprelay.dhcp.get_mac(cip)[0]
1592 log.info("Verifying dhcp clients gets same IP after cluster master restarts")
1593 new_cip, new_sip = self.dhcprelay.dhcp.only_request(cip, mac)
1594 assert_equal(new_cip, cip)
1595 self.dhcprelay.tearDownClass(controller=standbys[0])
1596
1597 #pass
1598 def test_cluster_with_dhcpRelay_and_simulate_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1599 status = self.verify_cluster_status(onos_instances=onos_instances)
1600 assert_equal(status, True)
1601 master,standbys = self.get_cluster_current_master_standbys()
1602 assert_equal(len(standbys),(onos_instances-1))
1603 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001604 macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3']
1605 self.dhcprelay.host_load(iface)
1606 ##we use the defaults for this test that serves as an example for others
1607 ##You don't need to restart dhcpd server if retaining default config
1608 config = self.dhcprelay.default_config
1609 options = self.dhcprelay.default_options
1610 subnet = self.dhcprelay.default_subnet_config
1611 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1612 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1613 config = config,
1614 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001615 subnet = subnet,
1616 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001617 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1618 cip1, sip1 = self.dhcprelay.send_recv(macs[0])
1619 assert_not_equal(cip1,None)
1620 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip1,macs[0],master))
1621 log.info('Changing cluster master from %s to %s'%(master, standbys[0]))
1622 self.change_master_current_cluster(new_master=standbys[0])
1623 cip2, sip2 = self.dhcprelay.send_recv(macs[1])
1624 assert_not_equal(cip2,None)
1625 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[1],standbys[0]))
1626 self.change_master_current_cluster(new_master=master)
1627 log.info('Changing cluster master from %s to %s'%(standbys[0],master))
1628 cip3, sip3 = self.dhcprelay.send_recv(macs[2])
1629 assert_not_equal(cip3,None)
1630 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master))
ChetanGaonker689b3862016-10-17 16:25:01 -07001631 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001632
ChetanGaonker689b3862016-10-17 16:25:01 -07001633 def test_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001634 status = self.verify_cluster_status(onos_instances=onos_instances)
1635 assert_equal(status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001636 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001637 self.subscriber.num_subscribers = 5
1638 self.subscriber.num_channels = 10
1639 for i in [0,1]:
1640 if i == 1:
1641 cord_test_onos_restart()
1642 time.sleep(45)
1643 status = self.verify_cluster_status(onos_instances=onos_instances)
1644 assert_equal(status, True)
1645 log.info('Verifying cord subscriber functionality after cluster restart')
1646 else:
1647 log.info('Verifying cord subscriber functionality before cluster restart')
1648 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1649 num_channels = self.subscriber.num_channels,
1650 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1651 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1652 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1653 self.subscriber.num_channels))
1654 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001655 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001656
ChetanGaonker689b3862016-10-17 16:25:01 -07001657 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1658 def test_cluster_with_cord_subscriber_join_next_channel_before_and_after_cluster_mastership_is_withdrawn(self,onos_instances=ONOS_INSTANCES):
1659 status = self.verify_cluster_status(onos_instances=onos_instances)
1660 assert_equal(status, True)
1661 master,standbys = self.get_cluster_current_master_standbys()
1662 assert_equal(len(standbys),(onos_instances-1))
1663 self.subscriber.setUpClass(controller=master)
1664 self.subscriber.num_subscribers = 5
1665 self.subscriber.num_channels = 10
1666 for i in [0,1]:
1667 if i == 1:
1668 status=self.withdraw_cluster_current_mastership(master_ip=master)
1669 asser_equal(status, True)
1670 master,standbys = self.get_cluster_current_master_standbys()
1671 log.info('verifying cord subscriber functionality after cluster current master withdraw mastership')
1672 else:
1673 log.info('verifying cord subscriber functionality before cluster master withdraw mastership')
1674 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1675 num_channels = self.subscriber.num_channels,
1676 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1677 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1678 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1679 self.subscriber.num_channels),controller=master)
1680 assert_equal(test_status, True)
1681 self.subscriber.tearDownClass(controller=master)
1682
1683 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1684 def test_cluster_with_cord_subscriber_join_recv_traffic_from_10channels_and_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001685 status = self.verify_cluster_status(onos_instances=onos_instances)
1686 assert_equal(status, True)
1687 master, standbys = self.get_cluster_current_master_standbys()
1688 assert_equal(len(standbys),(onos_instances-1))
1689 onos_names_ips = self.get_cluster_container_names_ips()
1690 member_onos_name = onos_names_ips[standbys[0]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001691 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001692 num_subscribers = 1
1693 num_channels = 10
1694 for i in [0,1]:
1695 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -08001696 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001697 time.sleep(30)
ChetanGaonker689b3862016-10-17 16:25:01 -07001698 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001699 assert_equal(status, True)
1700 log.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0])
1701 else:
1702 log.info('Verifying cord subscriber functionality before cluster member %s is down'%standbys[0])
1703 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1704 num_channels = num_channels,
1705 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify,
1706 self.subscriber.igmp_verify, self.subscriber.traffic_verify),
1707 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
ChetanGaonker689b3862016-10-17 16:25:01 -07001708 negative_subscriber_auth = 'all',controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001709 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001710 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001711
ChetanGaonker689b3862016-10-17 16:25:01 -07001712 def test_cluster_with_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001713 status = self.verify_cluster_status(onos_instances=onos_instances)
1714 assert_equal(status, True)
1715 master, standbys = self.get_cluster_current_master_standbys()
1716 assert_equal(len(standbys),(onos_instances-1))
1717 onos_names_ips = self.get_cluster_container_names_ips()
1718 member1_onos_name = onos_names_ips[standbys[0]]
1719 member2_onos_name = onos_names_ips[standbys[1]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001720 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001721 num_subscribers = 1
1722 num_channels = 10
1723 for i in [0,1]:
1724 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -08001725 cord_test_onos_shutdown(node = standbys[0])
1726 cord_test_onos_shutdown(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -07001727 time.sleep(60)
1728 status = self.verify_cluster_status(onos_instances=onos_instances-2)
1729 assert_equal(status, True)
1730 log.info('Verifying cord subscriber funtionality after cluster two members %s and %s down'%(standbys[0],standbys[1]))
1731 else:
1732 log.info('Verifying cord subscriber funtionality before cluster two members %s and %s down'%(standbys[0],standbys[1]))
1733 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1734 num_channels = num_channels,
1735 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1736 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1737 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
1738 negative_subscriber_auth = 'all')
1739 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001740 self.subscriber.tearDownClass(controller=master)
1741
1742 #pass
1743 def test_cluster_with_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1744 status = self.verify_cluster_status(onos_instances=onos_instances)
1745 assert_equal(status, True)
1746 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1747 for device in device_dict.keys():
1748 log.info("Device is %s"%device_dict[device])
1749 assert_not_equal(device_dict[device]['master'],'none')
1750 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1751 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1752
1753 #pass
1754 def test_cluster_state_in_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1755 status = self.verify_cluster_status(onos_instances=onos_instances)
1756 assert_equal(status, True)
1757 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1758 cluster_ips = self.get_cluster_current_member_ips()
1759 for ip in cluster_ips:
1760 device_dict= self.get_cluster_current_master_standbys_of_connected_devices(controller = ip)
1761 assert_equal(len(device_dict.keys()),onos_instances)
1762 for device in device_dict.keys():
1763 log.info("Device is %s"%device_dict[device])
1764 assert_not_equal(device_dict[device]['master'],'none')
1765 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1766 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1767
1768 #pass
1769 def test_cluster_verifying_multiple_ovs_switches_after_master_is_restarted(self,onos_instances = ONOS_INSTANCES):
1770 status = self.verify_cluster_status(onos_instances=onos_instances)
1771 assert_equal(status, True)
1772 onos_names_ips = self.get_cluster_container_names_ips()
1773 master_count = self.get_number_of_devices_of_master()
1774 log.info('Master count information is %s'%master_count)
1775 total_devices = 0
1776 for master in master_count.keys():
1777 total_devices += master_count[master]['size']
1778 if master_count[master]['size'] != 0:
1779 restart_ip = master
1780 assert_equal(total_devices,onos_instances)
1781 member_onos_name = onos_names_ips[restart_ip]
1782 log.info('Restarting cluster member %s having ip %s'%(member_onos_name,restart_ip))
1783 Container(member_onos_name, Onos.IMAGE).restart()
1784 time.sleep(40)
1785 master_count = self.get_number_of_devices_of_master()
1786 log.info('Master count information after restart is %s'%master_count)
1787 total_devices = 0
1788 for master in master_count.keys():
1789 total_devices += master_count[master]['size']
1790 if master == restart_ip:
1791 assert_equal(master_count[master]['size'], 0)
1792 assert_equal(total_devices,onos_instances)
1793
1794 #pass
1795 def test_cluster_verifying_multiple_ovs_switches_with_one_master_down(self,onos_instances = ONOS_INSTANCES):
1796 status = self.verify_cluster_status(onos_instances=onos_instances)
1797 assert_equal(status, True)
1798 onos_names_ips = self.get_cluster_container_names_ips()
1799 master_count = self.get_number_of_devices_of_master()
1800 log.info('Master count information is %s'%master_count)
1801 total_devices = 0
1802 for master in master_count.keys():
1803 total_devices += master_count[master]['size']
1804 if master_count[master]['size'] != 0:
1805 restart_ip = master
1806 assert_equal(total_devices,onos_instances)
1807 master_onos_name = onos_names_ips[restart_ip]
1808 log.info('Shutting down cluster member %s having ip %s'%(master_onos_name,restart_ip))
1809 Container(master_onos_name, Onos.IMAGE).kill()
1810 time.sleep(40)
1811 for ip in onos_names_ips.keys():
1812 if ip != restart_ip:
1813 controller_ip = ip
1814 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=controller_ip)
1815 assert_equal(status, True)
1816 master_count = self.get_number_of_devices_of_master(controller=controller_ip)
1817 log.info('Master count information after restart is %s'%master_count)
1818 total_devices = 0
1819 for master in master_count.keys():
1820 total_devices += master_count[master]['size']
1821 if master == restart_ip:
1822 assert_equal(master_count[master]['size'], 0)
1823 assert_equal(total_devices,onos_instances)
1824
1825 #pass
1826 def test_cluster_verifying_multiple_ovs_switches_with_current_master_withdrawing_mastership(self,onos_instances = ONOS_INSTANCES):
1827 status = self.verify_cluster_status(onos_instances=onos_instances)
1828 assert_equal(status, True)
1829 master_count = self.get_number_of_devices_of_master()
1830 log.info('Master count information is %s'%master_count)
1831 total_devices = 0
1832 for master in master_count.keys():
1833 total_devices += int(master_count[master]['size'])
1834 if master_count[master]['size'] != 0:
1835 master_ip = master
1836 log.info('Devices of master %s are %s'%(master_count[master]['devices'],master))
1837 device_id = str(master_count[master]['devices'][0])
1838 device_count = master_count[master]['size']
1839 assert_equal(total_devices,onos_instances)
1840 log.info('Withdrawing mastership of device %s for controller %s'%(device_id,master_ip))
1841 status=self.withdraw_cluster_current_mastership(master_ip=master_ip,device_id = device_id)
1842 assert_equal(status, True)
1843 master_count = self.get_number_of_devices_of_master()
1844 log.info('Master count information after cluster mastership withdraw is %s'%master_count)
1845 total_devices = 0
1846 for master in master_count.keys():
1847 total_devices += int(master_count[master]['size'])
1848 if master == master_ip:
1849 assert_equal(master_count[master]['size'], device_count-1)
1850 assert_equal(total_devices,onos_instances)
1851
1852 #pass
1853 def test_cluster_verifying_multiple_ovs_switches_and_restarting_cluster(self,onos_instances = ONOS_INSTANCES):
1854 status = self.verify_cluster_status(onos_instances=onos_instances)
1855 assert_equal(status, True)
1856 master_count = self.get_number_of_devices_of_master()
1857 log.info('Master count information is %s'%master_count)
1858 total_devices = 0
1859 for master in master_count.keys():
1860 total_devices += master_count[master]['size']
1861 assert_equal(total_devices,onos_instances)
1862 log.info('Restarting cluster')
1863 cord_test_onos_restart()
1864 time.sleep(60)
1865 master_count = self.get_number_of_devices_of_master()
1866 log.info('Master count information after restart is %s'%master_count)
1867 total_devices = 0
1868 for master in master_count.keys():
1869 total_devices += master_count[master]['size']
1870 assert_equal(total_devices,onos_instances)