blob: b1b62c82873c59af8ca2953aabfa7925620574a9 [file] [log] [blame]
ChetanGaonker2099d722016-10-07 15:16:58 -07001#copyright 2016-present Ciena Corporation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14#
15import unittest
16from nose.tools import *
17from scapy.all import *
18from OnosCtrl import OnosCtrl, get_mac
19from OltConfig import OltConfig
20from socket import socket
21from OnosFlowCtrl import OnosFlowCtrl
22from nose.twistedtools import reactor, deferred
23from twisted.internet import defer
24from onosclidriver import OnosCliDriver
25from CordContainer import Container, Onos, Quagga
A.R Karthick2560f042016-11-30 14:38:52 -080026from CordTestServer import cord_test_onos_restart, cord_test_onos_shutdown, cord_test_onos_add_cluster, cord_test_quagga_restart, cord_test_restart_cluster
ChetanGaonker2099d722016-10-07 15:16:58 -070027from portmaps import g_subscriber_port_map
28from scapy.all import *
29import time, monotonic
30import threading
31from threading import current_thread
32from Cluster import *
33from EapTLS import TLSAuthTest
34from ACL import ACLTest
A R Karthick1f908202016-11-16 17:32:20 -080035from OnosLog import OnosLog
36from CordLogger import CordLogger
ChetanGaonker2099d722016-10-07 15:16:58 -070037import os
38import json
39import random
40import collections
41log.setLevel('INFO')
42
A R Karthick1f908202016-11-16 17:32:20 -080043class cluster_exchange(CordLogger):
ChetanGaonker2099d722016-10-07 15:16:58 -070044 test_path = os.path.dirname(os.path.realpath(__file__))
45 onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
46 mac = RandMAC()._fix()
47 flows_eth = Ether(src = RandMAC()._fix(), dst = RandMAC()._fix())
48 igmp_eth = Ether(dst = '01:00:5e:00:00:16', type = ETH_P_IP)
49 igmp_ip = IP(dst = '224.0.0.22')
50 ONOS_INSTANCES = 3
51 V_INF1 = 'veth0'
52 TLS_TIMEOUT = 100
53 device_id = 'of:' + get_mac()
54 igmp = cluster_igmp()
55 igmp_groups = igmp.mcast_ip_range(start_ip = '224.1.8.10',end_ip = '224.1.10.49')
56 igmp_sources = igmp.source_ip_range(start_ip = '38.24.29.35',end_ip='38.24.35.56')
57 tls = cluster_tls()
58 flows = cluster_flows()
59 proxyarp = cluster_proxyarp()
60 vrouter = cluster_vrouter()
61 acl = cluster_acl()
62 dhcprelay = cluster_dhcprelay()
63 subscriber = cluster_subscriber()
A R Karthick3b2e0372016-12-14 17:37:43 -080064 testcaseLoggers = ('test_cluster_controller_restarts', 'test_cluster_graceful_controller_restarts',
65 'test_cluster_single_controller_restarts', 'test_cluster_restarts')
A R Karthick1f908202016-11-16 17:32:20 -080066
67 def setUp(self):
68 if self._testMethodName not in self.testcaseLoggers:
69 super(cluster_exchange, self).setUp()
70
71 def tearDown(self):
72 if self._testMethodName not in self.testcaseLoggers:
73 super(cluster_exchange, self).tearDown()
ChetanGaonker2099d722016-10-07 15:16:58 -070074
75 def get_controller(self):
76 controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
77 controller = controller.split(',')[0]
78 return controller
79
A R Karthick1f908202016-11-16 17:32:20 -080080 @classmethod
81 def get_controllers(cls):
82 controllers = os.getenv('ONOS_CONTROLLER_IP') or ''
83 return controllers.split(',')
84
A R Karthick6cc8b812016-12-09 10:24:40 -080085 def cliEnter(self, controller = None):
ChetanGaonker2099d722016-10-07 15:16:58 -070086 retries = 0
A R Karthick6cc8b812016-12-09 10:24:40 -080087 while retries < 30:
88 self.cli = OnosCliDriver(controller = controller, connect = True)
ChetanGaonker2099d722016-10-07 15:16:58 -070089 if self.cli.handle:
90 break
91 else:
92 retries += 1
93 time.sleep(2)
94
95 def cliExit(self):
96 self.cli.disconnect()
97
A R Karthick1f908202016-11-16 17:32:20 -080098 def get_leader(self, controller = None):
99 self.cliEnter(controller = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800100 try:
101 result = json.loads(self.cli.leaders(jsonFormat = True))
102 except:
103 result = None
104
A R Karthick1f908202016-11-16 17:32:20 -0800105 if result is None:
106 log.info('Leaders command failure for controller %s' %controller)
107 else:
108 log.info('Leaders returned: %s' %result)
109 self.cliExit()
110 return result
111
A R Karthick3b2e0372016-12-14 17:37:43 -0800112 def onos_shutdown(self, controller = None):
113 status = True
114 self.cliEnter(controller = controller)
115 try:
116 self.cli.shutdown(timeout = 10)
117 except:
118 log.info('Graceful shutdown of ONOS failed for controller: %s' %controller)
119 status = False
120
121 self.cliExit()
122 return status
123
A R Karthicke14fc022016-12-08 14:50:29 -0800124 def log_set(self, level = None, app = 'org.onosproject', controllers = None):
125 CordLogger.logSet(level = level, app = app, controllers = controllers, forced = True)
A R Karthickef1232d2016-12-07 09:18:15 -0800126
A R Karthick1f908202016-11-16 17:32:20 -0800127 def get_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800128 result_map = {}
129 if controller is None:
130 controller = self.get_controller()
A R Karthick1f908202016-11-16 17:32:20 -0800131 if type(controller) in [ list, tuple ]:
132 for c in controller:
133 leaders = self.get_leader(controller = c)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800134 result_map[c] = leaders
A R Karthick1f908202016-11-16 17:32:20 -0800135 else:
136 leaders = self.get_leader(controller = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800137 result_map[controller] = leaders
138 return result_map
A R Karthick1f908202016-11-16 17:32:20 -0800139
A R Karthickec2db322016-11-17 15:06:01 -0800140 def verify_leaders(self, controller = None):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800141 leaders_map = self.get_leaders(controller = controller)
142 failed = [ k for k,v in leaders_map.items() if v == None ]
A R Karthickec2db322016-11-17 15:06:01 -0800143 return failed
144
ChetanGaonker2099d722016-10-07 15:16:58 -0700145 def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
146 tries = 0
147 try:
148 self.cliEnter(controller = controller)
149 while tries <= 10:
150 cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
151 if cluster_summary:
152 log.info("cluster 'summary' command output is %s"%cluster_summary)
153 nodes = cluster_summary['nodes']
154 if verify:
155 if nodes == onos_instances:
156 self.cliExit()
157 return True
158 else:
159 tries += 1
160 time.sleep(1)
161 else:
162 if nodes >= onos_instances:
163 self.cliExit()
164 return True
165 else:
166 tries += 1
167 time.sleep(1)
168 else:
169 tries += 1
170 time.sleep(1)
171 self.cliExit()
172 return False
173 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700174 raise Exception('Failed to get cluster members')
175 return False
ChetanGaonker2099d722016-10-07 15:16:58 -0700176
A.R Karthick45ab3e12016-11-30 11:25:51 -0800177 def get_cluster_current_member_ips(self, controller = None, nodes_filter = None):
ChetanGaonker2099d722016-10-07 15:16:58 -0700178 tries = 0
179 cluster_ips = []
180 try:
181 self.cliEnter(controller = controller)
182 while tries <= 10:
183 cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
184 if cluster_nodes:
185 log.info("cluster 'nodes' output is %s"%cluster_nodes)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800186 if nodes_filter:
187 cluster_nodes = nodes_filter(cluster_nodes)
ChetanGaonker2099d722016-10-07 15:16:58 -0700188 cluster_ips = map(lambda c: c['id'], cluster_nodes)
189 self.cliExit()
190 cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
191 return cluster_ips
192 else:
193 tries += 1
194 self.cliExit()
195 return cluster_ips
196 except:
197 raise Exception('Failed to get cluster members')
198 return cluster_ips
199
ChetanGaonker689b3862016-10-17 16:25:01 -0700200 def get_cluster_container_names_ips(self,controller=None):
A R Karthick1f908202016-11-16 17:32:20 -0800201 onos_names_ips = {}
A R Karthick0f3f25b2016-12-15 09:50:57 -0800202 controllers = self.get_controllers()
203 i = 0
204 for controller in controllers:
205 if i == 0:
206 name = Onos.NAME
207 else:
208 name = '{}-{}'.format(Onos.NAME, i+1)
209 onos_names_ips[controller] = name
210 onos_names_ips[name] = controller
211 i += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700212 return onos_names_ips
A R Karthick0f3f25b2016-12-15 09:50:57 -0800213 # onos_ips = self.get_cluster_current_member_ips(controller=controller)
214 # onos_names_ips[onos_ips[0]] = Onos.NAME
215 # onos_names_ips[Onos.NAME] = onos_ips[0]
216 # for i in range(1,len(onos_ips)):
217 # name = '{0}-{1}'.format(Onos.NAME,i+1)
218 # onos_names_ips[onos_ips[i]] = name
219 # onos_names_ips[name] = onos_ips[i]
220
221 # return onos_names_ips
ChetanGaonker2099d722016-10-07 15:16:58 -0700222
223 #identifying current master of a connected device, not tested
224 def get_cluster_current_master_standbys(self,controller=None,device_id=device_id):
225 master = None
226 standbys = []
227 tries = 0
228 try:
229 cli = self.cliEnter(controller = controller)
230 while tries <= 10:
231 roles = json.loads(self.cli.roles(jsonFormat = True))
232 log.info("cluster 'roles' command output is %s"%roles)
233 if roles:
234 for device in roles:
235 log.info('Verifying device info in line %s'%device)
236 if device['id'] == device_id:
237 master = str(device['master'])
238 standbys = map(lambda d: str(d), device['standbys'])
239 log.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
240 self.cliExit()
241 return master, standbys
242 self.cliExit()
243 return master, standbys
244 else:
245 tries += 1
ChetanGaonker689b3862016-10-17 16:25:01 -0700246 time.sleep(1)
247 self.cliExit()
248 return master,standbys
249 except:
250 raise Exception('Failed to get cluster members')
251 return master,standbys
252
253 def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
254 ''' returns master and standbys of all the connected devices to ONOS cluster instance'''
255 device_dict = {}
256 tries = 0
257 try:
258 cli = self.cliEnter(controller = controller)
259 while tries <= 10:
260 device_dict = {}
261 roles = json.loads(self.cli.roles(jsonFormat = True))
262 log.info("cluster 'roles' command output is %s"%roles)
263 if roles:
264 for device in roles:
265 device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
266 for i in range(len(device_dict[device['id']]['standbys'])):
267 device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
268 log.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
269 self.cliExit()
270 return device_dict
271 else:
272 tries += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700273 time.sleep(1)
274 self.cliExit()
ChetanGaonker689b3862016-10-17 16:25:01 -0700275 return device_dict
276 except:
277 raise Exception('Failed to get cluster members')
278 return device_dict
279
280 #identify current master of a connected device, not tested
281 def get_cluster_connected_devices(self,controller=None):
282 '''returns all the devices connected to ONOS cluster'''
283 device_list = []
284 tries = 0
285 try:
286 cli = self.cliEnter(controller = controller)
287 while tries <= 10:
288 device_list = []
289 devices = json.loads(self.cli.devices(jsonFormat = True))
290 log.info("cluster 'devices' command output is %s"%devices)
291 if devices:
292 for device in devices:
293 log.info('device id is %s'%device['id'])
294 device_list.append(str(device['id']))
295 self.cliExit()
296 return device_list
297 else:
298 tries += 1
299 time.sleep(1)
300 self.cliExit()
301 return device_list
302 except:
303 raise Exception('Failed to get cluster members')
304 return device_list
305
306 def get_number_of_devices_of_master(self,controller=None):
307 '''returns master-device pairs, which master having what devices'''
308 master_count = {}
309 try:
310 cli = self.cliEnter(controller = controller)
311 masters = json.loads(self.cli.masters(jsonFormat = True))
312 if masters:
313 for master in masters:
314 master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
315 return master_count
316 else:
317 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700318 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700319 raise Exception('Failed to get cluster members')
320 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700321
322 def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None):
323 if new_master is None: return False
ChetanGaonker689b3862016-10-17 16:25:01 -0700324 self.cliEnter(controller=controller)
ChetanGaonker2099d722016-10-07 15:16:58 -0700325 cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
326 command = self.cli.command(cmd = cmd, jsonFormat = False)
327 self.cliExit()
328 time.sleep(60)
329 master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
330 assert_equal(master,new_master)
331 log.info('Cluster master changed to %s successfully'%new_master)
332
ChetanGaonker689b3862016-10-17 16:25:01 -0700333 def withdraw_cluster_current_mastership(self,master_ip=None,device_id=device_id,controller=None):
334 '''current master looses its mastership and hence new master will be elected'''
335 self.cliEnter(controller=controller)
336 cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
337 command = self.cli.command(cmd = cmd, jsonFormat = False)
338 self.cliExit()
339 time.sleep(60)
340 new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
341 assert_not_equal(new_master_ip,master_ip)
342 log.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
343 log.info('Cluster new master is %s'%new_master_ip)
344 return True
345
A R Karthick3b2e0372016-12-14 17:37:43 -0800346 def cluster_controller_restarts(self, graceful = False):
A R Karthick1f908202016-11-16 17:32:20 -0800347 controllers = self.get_controllers()
348 ctlr_len = len(controllers)
349 if ctlr_len <= 1:
350 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
351 assert_greater(ctlr_len, 1)
352
353 #this call would verify the cluster for once
354 onos_map = self.get_cluster_container_names_ips()
355
A R Karthick2a70a2f2016-12-16 14:40:16 -0800356 def check_exception(iteration, controller = None):
A R Karthick1f908202016-11-16 17:32:20 -0800357 adjacent_controller = None
358 adjacent_controllers = None
359 if controller:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800360 adjacent_controllers = list(set(controllers) - set([controller]))
361 adjacent_controller = adjacent_controllers[0]
A R Karthick1f908202016-11-16 17:32:20 -0800362 for node in controllers:
363 onosLog = OnosLog(host = node)
364 ##check the logs for storage exception
365 _, output = onosLog.get_log(('ERROR', 'Exception',))
A R Karthickec2db322016-11-17 15:06:01 -0800366 if output and output.find('StorageException$Timeout') >= 0:
367 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
368 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
369 log.info('\n' + '-' * 50 + '\n')
A R Karthick1f908202016-11-16 17:32:20 -0800370 log.info('%s' %output)
A R Karthickec2db322016-11-17 15:06:01 -0800371 log.info('\n' + '-' * 50 + '\n')
372 failed = self.verify_leaders(controllers)
373 if failed:
A.R Karthick45ab3e12016-11-30 11:25:51 -0800374 log.info('Leaders command failed on nodes: %s' %failed)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800375 log.error('Test failed on ITERATION %d' %iteration)
A R Karthickec2db322016-11-17 15:06:01 -0800376 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800377 return controller
378
379 try:
A R Karthickec2db322016-11-17 15:06:01 -0800380 ips = self.get_cluster_current_member_ips(controller = adjacent_controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800381 log.info('ONOS cluster formed with controllers: %s' %ips)
A R Karthick1f908202016-11-16 17:32:20 -0800382 st = True
383 except:
384 st = False
385
A R Karthickec2db322016-11-17 15:06:01 -0800386 failed = self.verify_leaders(controllers)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800387 if failed:
388 log.error('Test failed on ITERATION %d' %iteration)
A R Karthick1f908202016-11-16 17:32:20 -0800389 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800390 if st is False:
391 log.info('No storage exception and ONOS cluster was not formed successfully')
392 else:
393 controller = None
394
395 return controller
396
397 next_controller = None
398 tries = 10
399 for num in range(tries):
400 index = num % ctlr_len
401 #index = random.randrange(0, ctlr_len)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800402 controller_name = onos_map[controllers[index]] if next_controller is None else onos_map[next_controller]
403 controller = onos_map[controller_name]
404 log.info('ITERATION: %d. Restarting Controller %s' %(num + 1, controller_name))
A R Karthick1f908202016-11-16 17:32:20 -0800405 try:
A R Karthickef1232d2016-12-07 09:18:15 -0800406 #enable debug log for the other controllers before restarting this controller
A R Karthicke14fc022016-12-08 14:50:29 -0800407 adjacent_controllers = list( set(controllers) - set([controller]) )
408 self.log_set(controllers = adjacent_controllers)
409 self.log_set(app = 'io.atomix', controllers = adjacent_controllers)
A R Karthick3b2e0372016-12-14 17:37:43 -0800410 if graceful is True:
A R Karthick0f3f25b2016-12-15 09:50:57 -0800411 log.info('Gracefully shutting down controller: %s' %controller)
A R Karthick3b2e0372016-12-14 17:37:43 -0800412 self.onos_shutdown(controller)
413 cord_test_onos_restart(node = controller, timeout = 0)
A R Karthicke14fc022016-12-08 14:50:29 -0800414 self.log_set(controllers = controller)
415 self.log_set(app = 'io.atomix', controllers = controller)
A R Karthickde6b9dc2016-11-29 17:46:16 -0800416 time.sleep(60)
A R Karthick1f908202016-11-16 17:32:20 -0800417 except:
418 time.sleep(5)
419 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800420
421 #first archive the test case logs for this run
A R Karthick3b2e0372016-12-14 17:37:43 -0800422 CordLogger.archive_results(self._testMethodName,
A R Karthicke8935c62016-12-08 18:17:17 -0800423 controllers = controllers,
424 iteration = 'iteration_{}'.format(num+1))
A R Karthick2a70a2f2016-12-16 14:40:16 -0800425 next_controller = check_exception(num, controller = controller)
A R Karthick1f908202016-11-16 17:32:20 -0800426
A R Karthick3b2e0372016-12-14 17:37:43 -0800427 def test_cluster_controller_restarts(self):
428 '''Test the cluster by repeatedly killing the controllers'''
429 self.cluster_controller_restarts()
430
431 def test_cluster_graceful_controller_restarts(self):
432 '''Test the cluster by repeatedly restarting the controllers gracefully'''
433 self.cluster_controller_restarts(graceful = True)
434
A.R Karthick45ab3e12016-11-30 11:25:51 -0800435 def test_cluster_single_controller_restarts(self):
436 '''Test the cluster by repeatedly restarting the same controller'''
437 controllers = self.get_controllers()
438 ctlr_len = len(controllers)
439 if ctlr_len <= 1:
440 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
441 assert_greater(ctlr_len, 1)
442
443 #this call would verify the cluster for once
444 onos_map = self.get_cluster_container_names_ips()
445
A R Karthick2a70a2f2016-12-16 14:40:16 -0800446 def check_exception(iteration, controller, inclusive = False):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800447 adjacent_controllers = list(set(controllers) - set([controller]))
448 adjacent_controller = adjacent_controllers[0]
449 controller_list = adjacent_controllers if inclusive == False else controllers
450 storage_exceptions = []
451 for node in controller_list:
452 onosLog = OnosLog(host = node)
453 ##check the logs for storage exception
454 _, output = onosLog.get_log(('ERROR', 'Exception',))
455 if output and output.find('StorageException$Timeout') >= 0:
456 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
457 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
458 log.info('\n' + '-' * 50 + '\n')
459 log.info('%s' %output)
460 log.info('\n' + '-' * 50 + '\n')
461 storage_exceptions.append(node)
462
463 failed = self.verify_leaders(controller_list)
464 if failed:
465 log.info('Leaders command failed on nodes: %s' %failed)
466 if storage_exceptions:
467 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800468 log.error('Test failed on ITERATION %d' %iteration)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800469 assert_equal(len(failed), 0)
470 return controller
471
472 for ctlr in controller_list:
473 ips = self.get_cluster_current_member_ips(controller = ctlr,
474 nodes_filter = \
475 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
476 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
477 if controller in ips and inclusive is False:
478 log.info('Controller %s still ACTIVE on Node %s after it was shutdown' %(controller, ctlr))
479 if controller not in ips and inclusive is True:
A R Karthick6cc8b812016-12-09 10:24:40 -0800480 log.info('Controller %s still INACTIVE on Node %s after it was restarted' %(controller, ctlr))
A.R Karthick45ab3e12016-11-30 11:25:51 -0800481
482 return controller
483
484 tries = 10
485 #chose a random controller for shutdown/restarts
486 controller = controllers[random.randrange(0, ctlr_len)]
487 controller_name = onos_map[controller]
A R Karthick6cc8b812016-12-09 10:24:40 -0800488 ##enable the log level for the controllers
489 self.log_set(controllers = controllers)
490 self.log_set(app = 'io.atomix', controllers = controllers)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800491 for num in range(tries):
A.R Karthick45ab3e12016-11-30 11:25:51 -0800492 log.info('ITERATION: %d. Shutting down Controller %s' %(num + 1, controller_name))
493 try:
A R Karthick3b2e0372016-12-14 17:37:43 -0800494 cord_test_onos_shutdown(node = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800495 time.sleep(20)
496 except:
497 time.sleep(5)
498 continue
499 #check for exceptions on the adjacent nodes
A R Karthick2a70a2f2016-12-16 14:40:16 -0800500 check_exception(num, controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800501 #Now restart the controller back
502 log.info('Restarting back the controller %s' %controller_name)
A R Karthick3b2e0372016-12-14 17:37:43 -0800503 cord_test_onos_restart(node = controller)
A R Karthick6cc8b812016-12-09 10:24:40 -0800504 self.log_set(controllers = controller)
505 self.log_set(app = 'io.atomix', controllers = controller)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800506 time.sleep(60)
A R Karthicke8935c62016-12-08 18:17:17 -0800507 #archive the logs for this run
508 CordLogger.archive_results('test_cluster_single_controller_restarts',
509 controllers = controllers,
510 iteration = 'iteration_{}'.format(num+1))
A R Karthick2a70a2f2016-12-16 14:40:16 -0800511 check_exception(num, controller, inclusive = True)
A.R Karthick45ab3e12016-11-30 11:25:51 -0800512
A.R Karthick2560f042016-11-30 14:38:52 -0800513 def test_cluster_restarts(self):
514 '''Test the cluster by repeatedly restarting the entire cluster'''
515 controllers = self.get_controllers()
516 ctlr_len = len(controllers)
517 if ctlr_len <= 1:
518 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
519 assert_greater(ctlr_len, 1)
520
521 #this call would verify the cluster for once
522 onos_map = self.get_cluster_container_names_ips()
523
A R Karthick2a70a2f2016-12-16 14:40:16 -0800524 def check_exception(iteration):
A.R Karthick2560f042016-11-30 14:38:52 -0800525 controller_list = controllers
526 storage_exceptions = []
527 for node in controller_list:
528 onosLog = OnosLog(host = node)
529 ##check the logs for storage exception
530 _, output = onosLog.get_log(('ERROR', 'Exception',))
531 if output and output.find('StorageException$Timeout') >= 0:
532 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
533 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
534 log.info('\n' + '-' * 50 + '\n')
535 log.info('%s' %output)
536 log.info('\n' + '-' * 50 + '\n')
537 storage_exceptions.append(node)
538
539 failed = self.verify_leaders(controller_list)
540 if failed:
541 log.info('Leaders command failed on nodes: %s' %failed)
542 if storage_exceptions:
543 log.info('Storage exception seen on nodes: %s' %storage_exceptions)
A R Karthick2a70a2f2016-12-16 14:40:16 -0800544 log.error('Test failed on ITERATION %d' %iteration)
A.R Karthick2560f042016-11-30 14:38:52 -0800545 assert_equal(len(failed), 0)
546 return
547
548 for ctlr in controller_list:
549 ips = self.get_cluster_current_member_ips(controller = ctlr,
550 nodes_filter = \
551 lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
552 log.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
A R Karthick2a70a2f2016-12-16 14:40:16 -0800553 if len(ips) != len(controllers):
554 log.error('Test failed on ITERATION %d' %iteration)
A.R Karthick2560f042016-11-30 14:38:52 -0800555 assert_equal(len(ips), len(controllers))
556
557 tries = 10
558 for num in range(tries):
559 log.info('ITERATION: %d. Restarting cluster with controllers at %s' %(num+1, controllers))
560 try:
561 cord_test_restart_cluster()
A R Karthick6cc8b812016-12-09 10:24:40 -0800562 self.log_set(controllers = controllers)
563 self.log_set(app = 'io.atomix', controllers = controllers)
A.R Karthick2560f042016-11-30 14:38:52 -0800564 log.info('Delaying before verifying cluster status')
565 time.sleep(60)
566 except:
567 time.sleep(10)
568 continue
A R Karthicke8935c62016-12-08 18:17:17 -0800569
570 #archive the logs for this run before verification
571 CordLogger.archive_results('test_cluster_restarts',
572 controllers = controllers,
573 iteration = 'iteration_{}'.format(num+1))
A.R Karthick2560f042016-11-30 14:38:52 -0800574 #check for exceptions on the adjacent nodes
A R Karthick2a70a2f2016-12-16 14:40:16 -0800575 check_exception(num)
A.R Karthick2560f042016-11-30 14:38:52 -0800576
ChetanGaonker2099d722016-10-07 15:16:58 -0700577 #pass
ChetanGaonker689b3862016-10-17 16:25:01 -0700578 def test_cluster_formation_and_verification(self,onos_instances = ONOS_INSTANCES):
579 status = self.verify_cluster_status(onos_instances = onos_instances)
580 assert_equal(status, True)
581 log.info('Cluster exists with %d ONOS instances'%onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -0700582
583 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700584 def test_cluster_adding_members(self, add = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700585 status = self.verify_cluster_status(onos_instances = onos_instances)
586 assert_equal(status, True)
587 onos_ips = self.get_cluster_current_member_ips()
588 onos_instances = len(onos_ips)+add
589 log.info('Adding %d nodes to the ONOS cluster' %add)
590 cord_test_onos_add_cluster(count = add)
591 status = self.verify_cluster_status(onos_instances=onos_instances)
592 assert_equal(status, True)
593
ChetanGaonker689b3862016-10-17 16:25:01 -0700594 def test_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700595 status = self.verify_cluster_status(onos_instances = onos_instances)
596 assert_equal(status, True)
597 master, standbys = self.get_cluster_current_master_standbys()
598 assert_equal(len(standbys),(onos_instances-1))
599 onos_names_ips = self.get_cluster_container_names_ips()
600 master_onos_name = onos_names_ips[master]
601 log.info('Removing cluster current master %s'%(master))
A R Karthick3b2e0372016-12-14 17:37:43 -0800602 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700603 time.sleep(60)
604 onos_instances -= 1
605 status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0])
606 assert_equal(status, True)
607 new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
608 assert_not_equal(master,new_master)
ChetanGaonker689b3862016-10-17 16:25:01 -0700609 log.info('Successfully removed clusters master instance')
ChetanGaonker2099d722016-10-07 15:16:58 -0700610
ChetanGaonker689b3862016-10-17 16:25:01 -0700611 def test_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700612 status = self.verify_cluster_status(onos_instances = onos_instances)
613 assert_equal(status, True)
614 master, standbys = self.get_cluster_current_master_standbys()
615 assert_equal(len(standbys),(onos_instances-1))
616 onos_names_ips = self.get_cluster_container_names_ips()
617 member_onos_name = onos_names_ips[standbys[0]]
618 log.info('Removing cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800619 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700620 time.sleep(60)
621 onos_instances -= 1
622 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
623 assert_equal(status, True)
624
ChetanGaonker689b3862016-10-17 16:25:01 -0700625 def test_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700626 status = self.verify_cluster_status(onos_instances = onos_instances)
627 assert_equal(status, True)
628 master, standbys = self.get_cluster_current_master_standbys()
629 assert_equal(len(standbys),(onos_instances-1))
630 onos_names_ips = self.get_cluster_container_names_ips()
631 member1_onos_name = onos_names_ips[standbys[0]]
632 member2_onos_name = onos_names_ips[standbys[1]]
633 log.info('Removing cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800634 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700635 log.info('Removing cluster member %s'%standbys[1])
A R Karthick3b2e0372016-12-14 17:37:43 -0800636 cord_test_onos_shutdown(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700637 time.sleep(60)
638 onos_instances = onos_instances - 2
639 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
640 assert_equal(status, True)
641
ChetanGaonker689b3862016-10-17 16:25:01 -0700642 def test_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700643 status = self.verify_cluster_status(onos_instances = onos_instances)
644 assert_equal(status, True)
645 master, standbys = self.get_cluster_current_master_standbys()
646 assert_equal(len(standbys),(onos_instances-1))
647 onos_names_ips = self.get_cluster_container_names_ips()
648 for i in range(remove):
649 member_onos_name = onos_names_ips[standbys[i]]
650 log.info('Removing onos container with name %s'%standbys[i])
A R Karthick3b2e0372016-12-14 17:37:43 -0800651 cord_test_onos_shutdown(node = standbys[i])
ChetanGaonker2099d722016-10-07 15:16:58 -0700652 time.sleep(60)
653 onos_instances = onos_instances - remove
654 status = self.verify_cluster_status(onos_instances = onos_instances, controller=master)
655 assert_equal(status, True)
656
657 #nottest test cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700658 def test_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES , add = 2, remove = 2):
ChetanGaonker2099d722016-10-07 15:16:58 -0700659 status = self.verify_cluster_status(onos_instances = onos_instances)
660 assert_equal(status, True)
661 onos_ips = self.get_cluster_current_member_ips()
662 onos_instances = len(onos_ips)+add
663 log.info('Adding %d ONOS instances to the cluster'%add)
664 cord_test_onos_add_cluster(count = add)
665 status = self.verify_cluster_status(onos_instances=onos_instances)
666 assert_equal(status, True)
667 log.info('Removing %d ONOS instances from the cluster'%remove)
668 for i in range(remove):
669 name = '{}-{}'.format(Onos.NAME, onos_instances - i)
670 log.info('Removing onos container with name %s'%name)
671 cord_test_onos_shutdown(node = name)
672 time.sleep(60)
673 onos_instances = onos_instances-remove
674 status = self.verify_cluster_status(onos_instances=onos_instances)
675 assert_equal(status, True)
676
677 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700678 def test_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
ChetanGaonker2099d722016-10-07 15:16:58 -0700679 status = self.verify_cluster_status(onos_instances = onos_instances)
680 assert_equal(status, True)
681 onos_ips = self.get_cluster_current_member_ips()
682 onos_instances = onos_instances-remove
683 log.info('Removing %d ONOS instances from the cluster'%remove)
684 for i in range(remove):
685 name = '{}-{}'.format(Onos.NAME, len(onos_ips)-i)
686 log.info('Removing onos container with name %s'%name)
687 cord_test_onos_shutdown(node = name)
688 time.sleep(60)
689 status = self.verify_cluster_status(onos_instances=onos_instances)
690 assert_equal(status, True)
691 log.info('Adding %d ONOS instances to the cluster'%add)
692 cord_test_onos_add_cluster(count = add)
693 onos_instances = onos_instances+add
694 status = self.verify_cluster_status(onos_instances=onos_instances)
695 assert_equal(status, True)
696
ChetanGaonker689b3862016-10-17 16:25:01 -0700697 def test_cluster_restart(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700698 status = self.verify_cluster_status(onos_instances = onos_instances)
699 assert_equal(status, True)
700 log.info('Restarting cluster')
701 cord_test_onos_restart()
702 status = self.verify_cluster_status(onos_instances = onos_instances)
703 assert_equal(status, True)
704
ChetanGaonker689b3862016-10-17 16:25:01 -0700705 def test_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700706 status = self.verify_cluster_status(onos_instances = onos_instances)
707 assert_equal(status, True)
708 master, standbys = self.get_cluster_current_master_standbys()
709 onos_names_ips = self.get_cluster_container_names_ips()
710 master_onos_name = onos_names_ips[master]
711 log.info('Restarting cluster master %s'%master)
A R Karthick3b2e0372016-12-14 17:37:43 -0800712 cord_test_onos_restart(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700713 status = self.verify_cluster_status(onos_instances = onos_instances)
714 assert_equal(status, True)
715 log.info('Cluster came up after master restart as expected')
716
717 #test fail. master changing after restart. Need to check correct behavior.
ChetanGaonker689b3862016-10-17 16:25:01 -0700718 def test_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700719 status = self.verify_cluster_status(onos_instances = onos_instances)
720 assert_equal(status, True)
721 master1, standbys = self.get_cluster_current_master_standbys()
722 onos_names_ips = self.get_cluster_container_names_ips()
723 master_onos_name = onos_names_ips[master1]
A R Karthick3b2e0372016-12-14 17:37:43 -0800724 log.info('Restarting cluster master %s'%master1)
725 cord_test_onos_restart(node = master1)
ChetanGaonker2099d722016-10-07 15:16:58 -0700726 status = self.verify_cluster_status(onos_instances = onos_instances)
727 assert_equal(status, True)
728 master2, standbys = self.get_cluster_current_master_standbys()
729 assert_equal(master1,master2)
730 log.info('Cluster master is same before and after cluster master restart as expected')
731
ChetanGaonker689b3862016-10-17 16:25:01 -0700732 def test_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700733 status = self.verify_cluster_status(onos_instances = onos_instances)
734 assert_equal(status, True)
735 master, standbys = self.get_cluster_current_master_standbys()
736 assert_equal(len(standbys),(onos_instances-1))
737 onos_names_ips = self.get_cluster_container_names_ips()
738 member_onos_name = onos_names_ips[standbys[0]]
739 log.info('Restarting cluster member %s'%standbys[0])
A R Karthick3b2e0372016-12-14 17:37:43 -0800740 cord_test_onos_restart(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700741 status = self.verify_cluster_status(onos_instances = onos_instances)
742 assert_equal(status, True)
743 log.info('Cluster came up as expected after restarting one member')
744
ChetanGaonker689b3862016-10-17 16:25:01 -0700745 def test_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700746 status = self.verify_cluster_status(onos_instances = onos_instances)
747 assert_equal(status, True)
748 master, standbys = self.get_cluster_current_master_standbys()
749 assert_equal(len(standbys),(onos_instances-1))
750 onos_names_ips = self.get_cluster_container_names_ips()
751 member1_onos_name = onos_names_ips[standbys[0]]
752 member2_onos_name = onos_names_ips[standbys[1]]
753 log.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1]))
A R Karthick3b2e0372016-12-14 17:37:43 -0800754 cord_test_onos_restart(node = standbys[0])
755 cord_test_onos_restart(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700756 status = self.verify_cluster_status(onos_instances = onos_instances)
757 assert_equal(status, True)
758 log.info('Cluster came up as expected after restarting two members')
759
ChetanGaonker689b3862016-10-17 16:25:01 -0700760 def test_cluster_state_with_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700761 status = self.verify_cluster_status(onos_instances = onos_instances)
762 assert_equal(status,True)
763 master, standbys = self.get_cluster_current_master_standbys()
764 assert_equal(len(standbys),(onos_instances-1))
765 onos_names_ips = self.get_cluster_container_names_ips()
766 for i in range(members):
767 member_onos_name = onos_names_ips[standbys[i]]
768 log.info('Restarting cluster member %s'%standbys[i])
A R Karthick3b2e0372016-12-14 17:37:43 -0800769 cord_test_onos_restart(node = standbys[i])
ChetanGaonker2099d722016-10-07 15:16:58 -0700770
771 status = self.verify_cluster_status(onos_instances = onos_instances)
772 assert_equal(status, True)
773 log.info('Cluster came up as expected after restarting %d members'%members)
774
ChetanGaonker689b3862016-10-17 16:25:01 -0700775 def test_cluster_state_with_master_change(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700776 status = self.verify_cluster_status(onos_instances=onos_instances)
777 assert_equal(status, True)
778 master, standbys = self.get_cluster_current_master_standbys()
779 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -0700780 log.info('Cluster current master of devices is %s'%master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700781 self.change_master_current_cluster(new_master=standbys[0])
782 log.info('Cluster master changed successfully')
783
784 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700785 def test_cluster_with_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700786 status = self.verify_cluster_status(onos_instances = onos_instances)
787 assert_equal(status, True)
788 onos_ips = self.get_cluster_current_member_ips()
789 self.vrouter.setUpClass()
790 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
791 assert_equal(res, True)
792 for onos_ip in onos_ips:
793 tries = 0
794 flag = False
795 try:
796 self.cliEnter(controller = onos_ip)
797 while tries <= 5:
798 routes = json.loads(self.cli.routes(jsonFormat = True))
799 if routes:
800 assert_equal(len(routes['routes4']), networks)
801 self.cliExit()
802 flag = True
803 break
804 else:
805 tries += 1
806 time.sleep(1)
807 assert_equal(flag, True)
808 except:
809 log.info('Exception occured while checking routes in onos instance %s'%onos_ip)
810 raise
811
812 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700813 def test_cluster_with_vrouter_and_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700814 status = self.verify_cluster_status(onos_instances = onos_instances)
815 assert_equal(status, True)
816 onos_ips = self.get_cluster_current_member_ips()
817 master, standbys = self.get_cluster_current_master_standbys()
818 onos_names_ips = self.get_cluster_container_names_ips()
819 master_onos_name = onos_names_ips[master]
820 self.vrouter.setUpClass()
821 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
822 assert_equal(res,True)
A R Karthick3b2e0372016-12-14 17:37:43 -0800823 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700824 time.sleep(60)
ChetanGaonker689b3862016-10-17 16:25:01 -0700825 log.info('Verifying vrouter traffic after cluster master is down')
ChetanGaonker2099d722016-10-07 15:16:58 -0700826 self.vrouter.vrouter_traffic_verify()
827
828 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700829 def test_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700830 status = self.verify_cluster_status(onos_instances = onos_instances)
831 assert_equal(status, True)
832 onos_ips = self.get_cluster_current_member_ips()
833 master, standbys = self.get_cluster_current_master_standbys()
834 onos_names_ips = self.get_cluster_container_names_ips()
835 master_onos_name = onos_names_ips[master]
836 self.vrouter.setUpClass()
837 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
838 assert_equal(res, True)
839 cord_test_onos_restart()
840 self.vrouter.vrouter_traffic_verify()
841
842 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700843 def test_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700844 status = self.verify_cluster_status(onos_instances = onos_instances)
845 assert_equal(status, True)
846 self.vrouter.setUpClass()
847 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
848 assert_equal(res, True)
849 self.vrouter.vrouter_activate(deactivate=True)
850 time.sleep(15)
851 self.vrouter.vrouter_traffic_verify(positive_test=False)
852 self.vrouter.vrouter_activate(deactivate=False)
853
854 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700855 def test_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700856 status = self.verify_cluster_status(onos_instances = onos_instances)
857 assert_equal(status, True)
858 master, standbys = self.get_cluster_current_master_standbys()
859 onos_names_ips = self.get_cluster_container_names_ips()
860 master_onos_name = onos_names_ips[master]
861 self.vrouter.setUpClass()
862 log.info('Verifying vrouter before master down')
863 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
864 assert_equal(res, True)
865 self.vrouter.vrouter_activate(deactivate=True)
866 log.info('Verifying vrouter traffic after app deactivated')
867 time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down
868 self.vrouter.vrouter_traffic_verify(positive_test=False)
869 log.info('Verifying vrouter traffic after master down')
A R Karthick3b2e0372016-12-14 17:37:43 -0800870 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700871 time.sleep(60)
872 self.vrouter.vrouter_traffic_verify(positive_test=False)
873 self.vrouter.vrouter_activate(deactivate=False)
874
875 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700876 def test_cluster_for_vrouter_app_and_making_member_down(self, networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700877 status = self.verify_cluster_status(onos_instances = onos_instances)
878 assert_equal(status, True)
879 master, standbys = self.get_cluster_current_master_standbys()
880 onos_names_ips = self.get_cluster_container_names_ips()
881 member_onos_name = onos_names_ips[standbys[0]]
882 self.vrouter.setUpClass()
883 log.info('Verifying vrouter before cluster member down')
884 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
885 assert_equal(res, True) # Expecting vrouter should work properly
886 log.info('Verifying vrouter after cluster member down')
A R Karthick3b2e0372016-12-14 17:37:43 -0800887 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -0700888 time.sleep(60)
889 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
890
891 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700892 def test_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700893 status = self.verify_cluster_status(onos_instances = onos_instances)
894 assert_equal(status, True)
895 master, standbys = self.get_cluster_current_master_standbys()
896 onos_names_ips = self.get_cluster_container_names_ips()
897 member_onos_name = onos_names_ips[standbys[1]]
898 self.vrouter.setUpClass()
899 log.info('Verifying vrouter traffic before cluster member restart')
900 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
901 assert_equal(res, True) # Expecting vrouter should work properly
A R Karthick3b2e0372016-12-14 17:37:43 -0800902 cord_test_onos_restart(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -0700903 log.info('Verifying vrouter traffic after cluster member restart')
904 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
905
906 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700907 def test_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700908 status = self.verify_cluster_status(onos_instances = onos_instances)
909 assert_equal(status, True)
910 self.vrouter.setUpClass()
911 log.info('Verifying vrouter traffic before cluster restart')
912 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
913 assert_equal(res, True) # Expecting vrouter should work properly
914 cord_test_onos_restart()
915 log.info('Verifying vrouter traffic after cluster restart')
916 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
917
918
919 #test fails because flow state is in pending_add in onos
ChetanGaonker689b3862016-10-17 16:25:01 -0700920 def test_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700921 status = self.verify_cluster_status(onos_instances = onos_instances)
922 assert_equal(status, True)
923 master, standbys = self.get_cluster_current_master_standbys()
924 onos_names_ips = self.get_cluster_container_names_ips()
925 master_onos_name = onos_names_ips[master]
926 self.flows.setUpClass()
927 egress = 1
928 ingress = 2
929 egress_map = { 'ip': '192.168.30.1', 'udp_port': 9500 }
930 ingress_map = { 'ip': '192.168.40.1', 'udp_port': 9000 }
931 flow = OnosFlowCtrl(deviceId = self.device_id,
932 egressPort = egress,
933 ingressPort = ingress,
934 udpSrc = ingress_map['udp_port'],
935 udpDst = egress_map['udp_port'],
936 controller=master
937 )
938 result = flow.addFlow()
939 assert_equal(result, True)
940 time.sleep(1)
941 self.success = False
942 def mac_recv_task():
943 def recv_cb(pkt):
944 log.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
945 self.success = True
946 sniff(timeout=2,
947 lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
948 and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.flows.port_map[egress])
949
950 for i in [0,1]:
951 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -0800952 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700953 log.info('Verifying flows traffic after master killed')
954 time.sleep(45)
955 else:
956 log.info('Verifying flows traffic before master killed')
957 t = threading.Thread(target = mac_recv_task)
958 t.start()
959 L2 = self.flows_eth #Ether(src = ingress_map['ether'], dst = egress_map['ether'])
960 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
961 L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
962 pkt = L2/L3/L4
963 log.info('Sending packets to verify if flows are correct')
964 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
965 t.join()
966 assert_equal(self.success, True)
967
ChetanGaonker689b3862016-10-17 16:25:01 -0700968 def test_cluster_state_changing_master_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700969 status = self.verify_cluster_status(onos_instances=onos_instances)
970 assert_equal(status, True)
971 master, standbys = self.get_cluster_current_master_standbys()
972 self.flows.setUpClass()
973 egress = 1
974 ingress = 2
975 egress_map = { 'ip': '192.168.30.1' }
976 ingress_map = { 'ip': '192.168.40.1' }
977 flow = OnosFlowCtrl(deviceId = self.device_id,
978 egressPort = egress,
979 ingressPort = ingress,
980 ecn = 1,
981 controller=master
982 )
983 result = flow.addFlow()
984 assert_equal(result, True)
985 ##wait for flows to be added to ONOS
986 time.sleep(1)
987 self.success = False
988 def mac_recv_task():
989 def recv_cb(pkt):
990 log.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
991 self.success = True
992 sniff(count=2, timeout=5,
993 lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
994 and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb,
995 iface = self.flows.port_map[egress])
996 for i in [0,1]:
997 if i == 1:
998 log.info('Changing cluster master to %s'%standbys[0])
999 self.change_master_current_cluster(new_master=standbys[0])
1000 log.info('Verifying flow traffic after cluster master chnaged')
1001 else:
1002 log.info('Verifying flow traffic before cluster master changed')
1003 t = threading.Thread(target = mac_recv_task)
1004 t.start()
1005 L2 = self.flows_eth # Ether(src = ingress_map['ether'], dst = egress_map['ether'])
1006 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1)
1007 pkt = L2/L3
1008 log.info('Sending a packet to verify if flows are correct')
1009 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
1010 t.join()
1011 assert_equal(self.success, True)
1012
ChetanGaonker689b3862016-10-17 16:25:01 -07001013 #pass
1014 def test_cluster_flow_for_ipv6_extension_header_and_master_restart(self,onos_instances = ONOS_INSTANCES):
1015 status = self.verify_cluster_status(onos_instances=onos_instances)
1016 assert_equal(status, True)
1017 master,standbys = self.get_cluster_current_master_standbys()
1018 onos_names_ips = self.get_cluster_container_names_ips()
1019 master_onos_name = onos_names_ips[master]
1020 self.flows.setUpClass()
1021 egress = 1
1022 ingress = 2
1023 egress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
1024 ingress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
1025 flow = OnosFlowCtrl(deviceId = self.device_id,
1026 egressPort = egress,
1027 ingressPort = ingress,
1028 ipv6_extension = 0,
1029 controller=master
1030 )
1031
1032 result = flow.addFlow()
1033 assert_equal(result, True)
1034 ##wait for flows to be added to ONOS
1035 time.sleep(1)
1036 self.success = False
1037 def mac_recv_task():
1038 def recv_cb(pkt):
1039 log.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s'%(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
1040 self.success = True
1041 sniff(timeout=2,count=5,
1042 lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.flows.port_map[egress])
1043 for i in [0,1]:
1044 if i == 1:
1045 log.info('Restart cluster current master %s'%master)
1046 Container(master_onos_name,Onos.IMAGE).restart()
1047 time.sleep(45)
1048 log.info('Verifying flow traffic after master restart')
1049 else:
1050 log.info('Verifying flow traffic before master restart')
1051 t = threading.Thread(target = mac_recv_task)
1052 t.start()
1053 L2 = self.flows_eth
1054 L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
1055 pkt = L2/L3
1056 log.info('Sending packets to verify if flows are correct')
1057 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
1058 t.join()
1059 assert_equal(self.success, True)
1060
1061 def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
1062 dst_mac = self.igmp.iptomac(group)
1063 eth = Ether(dst= dst_mac)
1064 ip = IP(dst=group,src=source)
1065 data = repr(monotonic.monotonic())
1066 sendp(eth/ip/data,count=20, iface = intf)
1067 pkt = (eth/ip/data)
1068 log.info('multicast traffic packet %s'%pkt.show())
1069
1070 def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
1071 log.info('verifying multicast traffic for group %s from source %s'%(group,source))
1072 self.success = False
1073 def recv_task():
1074 def igmp_recv_cb(pkt):
1075 log.info('multicast data received for group %s from source %s'%(group,source))
1076 self.success = True
1077 sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
1078 t = threading.Thread(target = recv_task)
1079 t.start()
1080 self.send_multicast_data_traffic(group,source=source)
1081 t.join()
1082 return self.success
1083
1084 #pass
1085 def test_cluster_with_igmp_include_exclude_modes_and_restarting_master(self, onos_instances=ONOS_INSTANCES):
1086 status = self.verify_cluster_status(onos_instances=onos_instances)
1087 assert_equal(status, True)
1088 master, standbys = self.get_cluster_current_master_standbys()
1089 assert_equal(len(standbys), (onos_instances-1))
1090 onos_names_ips = self.get_cluster_container_names_ips()
1091 master_onos_name = onos_names_ips[master]
1092 self.igmp.setUp(controller=master)
1093 groups = ['224.2.3.4','230.5.6.7']
1094 src_list = ['2.2.2.2','3.3.3.3']
1095 self.igmp.onos_ssm_table_load(groups, src_list=src_list, controller=master)
1096 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1097 iface = self.V_INF1, delay = 2)
1098 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1099 iface = self.V_INF1, delay = 2)
1100 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1101 assert_equal(status,True)
1102 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1103 assert_equal(status,False)
1104 log.info('restarting cluster master %s'%master)
1105 Container(master_onos_name,Onos.IMAGE).restart()
1106 time.sleep(60)
1107 log.info('verifying multicast data traffic after master restart')
1108 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1109 assert_equal(status,True)
1110 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
1111 assert_equal(status,False)
1112
1113 #pass
1114 def test_cluster_with_igmp_include_exclude_modes_and_making_master_down(self, onos_instances=ONOS_INSTANCES):
1115 status = self.verify_cluster_status(onos_instances=onos_instances)
1116 assert_equal(status, True)
1117 master, standbys = self.get_cluster_current_master_standbys()
1118 assert_equal(len(standbys), (onos_instances-1))
1119 onos_names_ips = self.get_cluster_container_names_ips()
1120 master_onos_name = onos_names_ips[master]
1121 self.igmp.setUp(controller=master)
1122 groups = [self.igmp.random_mcast_ip(),self.igmp.random_mcast_ip()]
1123 src_list = [self.igmp.randomsourceip()]
1124 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1125 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1126 iface = self.V_INF1, delay = 2)
1127 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
1128 iface = self.V_INF1, delay = 2)
1129 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1130 assert_equal(status,True)
1131 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1132 assert_equal(status,False)
1133 log.info('Killing cluster master %s'%master)
1134 Container(master_onos_name,Onos.IMAGE).kill()
1135 time.sleep(60)
1136 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=standbys[0])
1137 assert_equal(status, True)
1138 log.info('Verifying multicast data traffic after cluster master down')
1139 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1140 assert_equal(status,True)
1141 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
1142 assert_equal(status,False)
1143
1144 def test_cluster_with_igmp_include_mode_checking_traffic_recovery_time_after_master_is_down(self, onos_instances=ONOS_INSTANCES):
1145 status = self.verify_cluster_status(onos_instances=onos_instances)
1146 assert_equal(status, True)
1147 master, standbys = self.get_cluster_current_master_standbys()
1148 assert_equal(len(standbys), (onos_instances-1))
1149 onos_names_ips = self.get_cluster_container_names_ips()
1150 master_onos_name = onos_names_ips[master]
1151 self.igmp.setUp(controller=master)
1152 groups = [self.igmp.random_mcast_ip()]
1153 src_list = [self.igmp.randomsourceip()]
1154 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1155 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1156 iface = self.V_INF1, delay = 2)
1157 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1158 assert_equal(status,True)
1159 log.info('Killing clusters master %s'%master)
1160 Container(master_onos_name,Onos.IMAGE).kill()
1161 count = 0
1162 for i in range(60):
1163 log.info('Verifying multicast data traffic after cluster master down')
1164 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1165 if status:
1166 break
1167 else:
1168 count += 1
1169 time.sleep(1)
1170 assert_equal(status, True)
1171 log.info('Time taken to recover traffic after clusters master down is %d seconds'%count)
1172
1173
1174 #pass
1175 def test_cluster_state_with_igmp_leave_group_after_master_change(self, onos_instances=ONOS_INSTANCES):
1176 status = self.verify_cluster_status(onos_instances=onos_instances)
1177 assert_equal(status, True)
1178 master, standbys = self.get_cluster_current_master_standbys()
1179 assert_equal(len(standbys), (onos_instances-1))
1180 self.igmp.setUp(controller=master)
1181 groups = [self.igmp.random_mcast_ip()]
1182 src_list = [self.igmp.randomsourceip()]
1183 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1184 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1185 iface = self.V_INF1, delay = 2)
1186 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1187 assert_equal(status,True)
1188 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1189 self.change_cluster_current_master(new_master=standbys[0])
1190 log.info('Verifying multicast traffic after cluster master change')
1191 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1192 assert_equal(status,True)
1193 log.info('Sending igmp TO_EXCLUDE message to leave the group %s'%groups[0])
1194 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
1195 iface = self.V_INF1, delay = 1)
1196 time.sleep(10)
1197 status = self.verify_igmp_data_traffic(groups[0],intf = self.V_INF1,source= src_list[0])
1198 assert_equal(status,False)
1199
1200 #pass
1201 def test_cluster_state_with_igmp_join_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
1202 status = self.verify_cluster_status(onos_instances=onos_instances)
1203 assert_equal(status, True)
1204 master,standbys = self.get_cluster_current_master_standbys()
1205 assert_equal(len(standbys), (onos_instances-1))
1206 self.igmp.setUp(controller=master)
1207 groups = [self.igmp.random_mcast_ip()]
1208 src_list = [self.igmp.randomsourceip()]
1209 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1210 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1211 self.change_cluster_current_master(new_master = standbys[0])
1212 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1213 iface = self.V_INF1, delay = 2)
1214 time.sleep(1)
1215 self.change_cluster_current_master(new_master = master)
1216 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1217 assert_equal(status,True)
1218
1219 #pass
ChetanGaonker2099d722016-10-07 15:16:58 -07001220 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001221 def test_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001222 status = self.verify_cluster_status(onos_instances=onos_instances)
1223 assert_equal(status, True)
1224 master, standbys = self.get_cluster_current_master_standbys()
1225 assert_equal(len(standbys), (onos_instances-1))
1226 self.tls.setUp(controller=master)
1227 df = defer.Deferred()
1228 def eap_tls_verify(df):
1229 tls = TLSAuthTest()
1230 tls.runTest()
1231 df.callback(0)
1232 reactor.callLater(0, eap_tls_verify, df)
1233 return df
1234
1235 @deferred(120)
ChetanGaonker689b3862016-10-17 16:25:01 -07001236 def test_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001237 master, standbys = self.get_cluster_current_master_standbys()
1238 assert_equal(len(standbys), (onos_instances-1))
1239 self.tls.setUp()
1240 df = defer.Deferred()
1241 def eap_tls_verify2(df2):
1242 tls = TLSAuthTest()
1243 tls.runTest()
1244 df.callback(0)
1245 for i in [0,1]:
1246 if i == 1:
1247 log.info('Changing cluster master %s to %s'%(master, standbys[0]))
1248 self.change_master_current_cluster(new_master=standbys[0])
1249 log.info('Verifying tls authentication after cluster master changed to %s'%standbys[0])
1250 else:
1251 log.info('Verifying tls authentication before cluster master change')
1252 reactor.callLater(0, eap_tls_verify, df)
1253 return df
1254
1255 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001256 def test_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001257 status = self.verify_cluster_status(onos_instances=onos_instances)
1258 assert_equal(status, True)
1259 master, standbys = self.get_cluster_current_master_standbys()
1260 assert_equal(len(standbys), (onos_instances-1))
1261 onos_names_ips = self.get_cluster_container_names_ips()
1262 master_onos_name = onos_names_ips[master]
1263 self.tls.setUp()
1264 df = defer.Deferred()
1265 def eap_tls_verify(df):
1266 tls = TLSAuthTest()
1267 tls.runTest()
1268 df.callback(0)
1269 for i in [0,1]:
1270 if i == 1:
1271 log.info('Killing cluster current master %s'%master)
A R Karthick3b2e0372016-12-14 17:37:43 -08001272 cord_test_onos_shutdown(node = master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001273 time.sleep(20)
1274 status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True)
1275 assert_equal(status, True)
1276 log.info('Cluster came up with %d instances after killing master'%(onos_instances-1))
1277 log.info('Verifying tls authentication after killing cluster master')
1278 reactor.callLater(0, eap_tls_verify, df)
1279 return df
1280
1281 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001282 def test_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001283 status = self.verify_cluster_status(onos_instances=onos_instances)
1284 assert_equal(status, True)
1285 master, standbys = self.get_cluster_current_master_standbys()
1286 assert_equal(len(standbys), (onos_instances-1))
1287 onos_names_ips = self.get_cluster_container_names_ips()
1288 member_onos_name = onos_names_ips[standbys[0]]
1289 self.tls.setUp()
1290 df = defer.Deferred()
1291 def eap_tls_no_cert(df):
1292 def tls_no_cert_cb():
1293 log.info('TLS authentication failed with no certificate')
1294 tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
1295 tls.runTest()
1296 assert_equal(tls.failTest, True)
1297 df.callback(0)
1298 for i in [0,1]:
1299 if i == 1:
1300 log.info('Restart cluster member %s'%standbys[0])
1301 Container(member_onos_name,Onos.IMAGE).restart()
1302 time.sleep(20)
1303 status = self.verify_cluster_status(onos_instances=onos_instances)
1304 assert_equal(status, True)
1305 log.info('Cluster came up with %d instances after member restart'%(onos_instances))
1306 log.info('Verifying tls authentication after member restart')
1307 reactor.callLater(0, eap_tls_no_cert, df)
1308 return df
1309
ChetanGaonker689b3862016-10-17 16:25:01 -07001310 #pass
1311 def test_cluster_proxyarp_master_change_and_app_deactivation(self,onos_instances=ONOS_INSTANCES,hosts = 3):
1312 status = self.verify_cluster_status(onos_instances=onos_instances)
1313 assert_equal(status,True)
1314 master,standbys = self.get_cluster_current_master_standbys()
1315 assert_equal(len(standbys),(onos_instances-1))
1316 self.proxyarp.setUpClass()
1317 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1318 ingress = hosts+1
1319 for hostip, hostmac in hosts_config:
1320 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1321 time.sleep(1)
1322 log.info('changing cluster current master from %s to %s'%(master,standbys[0]))
1323 self.change_cluster_current_master(new_master=standbys[0])
1324 log.info('verifying proxyarp after master change')
1325 for hostip, hostmac in hosts_config:
1326 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1327 time.sleep(1)
1328 log.info('Deactivating proxyarp app and expecting proxyarp functionality not to work')
1329 self.proxyarp.proxyarp_activate(deactivate = True,controller=standbys[0])
1330 time.sleep(3)
1331 for hostip, hostmac in hosts_config:
1332 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
1333 time.sleep(1)
1334 log.info('activating proxyarp app and expecting to get arp reply from ONOS')
1335 self.proxyarp.proxyarp_activate(deactivate = False,controller=standbys[0])
1336 time.sleep(3)
1337 for hostip, hostmac in hosts_config:
1338 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1339 time.sleep(1)
ChetanGaonker2099d722016-10-07 15:16:58 -07001340
ChetanGaonker689b3862016-10-17 16:25:01 -07001341 #pass
1342 def test_cluster_with_proxyarp_and_one_member_down(self,hosts=3,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001343 status = self.verify_cluster_status(onos_instances=onos_instances)
1344 assert_equal(status, True)
1345 master, standbys = self.get_cluster_current_master_standbys()
ChetanGaonker689b3862016-10-17 16:25:01 -07001346 assert_equal(len(standbys), (onos_instances-1))
1347 onos_names_ips = self.get_cluster_container_names_ips()
1348 member_onos_name = onos_names_ips[standbys[1]]
1349 self.proxyarp.setUpClass()
1350 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1351 ingress = hosts+1
1352 for hostip, hostmac in hosts_config:
1353 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1354 time.sleep(1)
1355 log.info('killing cluster member %s'%standbys[1])
1356 Container(member_onos_name,Onos.IMAGE).kill()
1357 time.sleep(20)
1358 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=master,verify=True)
1359 assert_equal(status, True)
1360 log.info('cluster came up with %d instances after member down'%(onos_instances-1))
1361 log.info('verifying proxy arp functionality after cluster member down')
1362 for hostip, hostmac in hosts_config:
1363 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1364 time.sleep(1)
1365
1366 #pass
1367 def test_cluster_with_proxyarp_and_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts=10,onos_instances=ONOS_INSTANCES):
1368 status = self.verify_cluster_status(onos_instances=onos_instances)
1369 assert_equal(status, True)
1370 self.proxyarp.setUpClass()
1371 master, standbys = self.get_cluster_current_master_standbys()
1372 assert_equal(len(standbys), (onos_instances-1))
1373 ports_map, egress_map, hosts_config = self.proxyarp.proxyarp_config(hosts = hosts, controller=master)
1374 self.success = True
1375 ingress = hosts+1
1376 ports = range(ingress,ingress+10)
1377 hostmac = []
1378 hostip = []
1379 for ip,mac in hosts_config:
1380 hostmac.append(mac)
1381 hostip.append(ip)
1382 success_dir = {}
1383 def verify_proxyarp(*r):
1384 ingress, hostmac, hostip = r[0],r[1],r[2]
1385 def mac_recv_task():
1386 def recv_cb(pkt):
1387 log.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
1388 success_dir[current_thread().name] = True
1389 sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
1390 prn = recv_cb, iface = self.proxyarp.port_map[ingress])
1391 t = threading.Thread(target = mac_recv_task)
1392 t.start()
1393 pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
1394 log.info('Sending arp request for dest ip %s on interface %s' %
1395 (hostip,self.proxyarp.port_map[ingress]))
1396 sendp(pkt, count = 10,iface = self.proxyarp.port_map[ingress])
1397 t.join()
1398 t = []
1399 for i in range(10):
1400 t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
1401 for i in range(10):
1402 t[i].start()
1403 time.sleep(2)
1404 for i in range(10):
1405 t[i].join()
1406 if len(success_dir) != 10:
1407 self.success = False
1408 assert_equal(self.success, True)
1409
1410 #pass
1411 def test_cluster_with_acl_rule_before_master_change_and_remove_acl_rule_after_master_change(self,onos_instances=ONOS_INSTANCES):
1412 status = self.verify_cluster_status(onos_instances=onos_instances)
1413 assert_equal(status, True)
1414 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001415 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001416 self.acl.setUp()
1417 acl_rule = ACLTest()
1418 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1419 if status is False:
1420 log.info('JSON request returned status %d' %code)
1421 assert_equal(status, True)
1422 result = acl_rule.get_acl_rules(controller=master)
1423 aclRules1 = result.json()['aclRules']
1424 log.info('Added acl rules is %s'%aclRules1)
1425 acl_Id = map(lambda d: d['id'], aclRules1)
1426 log.info('Changing cluster current master from %s to %s'%(master,standbys[0]))
1427 self.change_cluster_current_master(new_master=standbys[0])
1428 status,code = acl_rule.remove_acl_rule(acl_Id[0],controller=standbys[0])
1429 if status is False:
1430 log.info('JSON request returned status %d' %code)
1431 assert_equal(status, True)
1432
1433 #pass
1434 def test_cluster_verifying_acl_rule_in_new_master_after_current_master_is_down(self,onos_instances=ONOS_INSTANCES):
1435 status = self.verify_cluster_status(onos_instances=onos_instances)
1436 assert_equal(status, True)
1437 master,standbys = self.get_cluster_current_master_standbys()
1438 assert_equal(len(standbys),(onos_instances-1))
1439 onos_names_ips = self.get_cluster_container_names_ips()
1440 master_onos_name = onos_names_ips[master]
1441 self.acl.setUp()
1442 acl_rule = ACLTest()
1443 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1444 if status is False:
1445 log.info('JSON request returned status %d' %code)
1446 assert_equal(status, True)
1447 result1 = acl_rule.get_acl_rules(controller=master)
1448 aclRules1 = result1.json()['aclRules']
1449 log.info('Added acl rules is %s'%aclRules1)
1450 acl_Id1 = map(lambda d: d['id'], aclRules1)
1451 log.info('Killing cluster current master %s'%master)
1452 Container(master_onos_name,Onos.IMAGE).kill()
1453 time.sleep(45)
1454 status = self.verify_cluster_status(onos_instances=onos_instances,controller=standbys[0])
1455 assert_equal(status, True)
1456 new_master,standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
1457 assert_equal(len(standbys),(onos_instances-2))
1458 assert_not_equal(new_master,master)
1459 result2 = acl_rule.get_acl_rules(controller=new_master)
1460 aclRules2 = result2.json()['aclRules']
1461 acl_Id2 = map(lambda d: d['id'], aclRules2)
1462 log.info('Acl Ids before and after master down are %s and %s'%(acl_Id1,acl_Id2))
1463 assert_equal(acl_Id2,acl_Id1)
1464
1465 #acl traffic scenario not working as acl rule is not getting added to onos
1466 def test_cluster_with_acl_traffic_before_and_after_two_members_down(self,onos_instances=ONOS_INSTANCES):
1467 status = self.verify_cluster_status(onos_instances=onos_instances)
1468 assert_equal(status, True)
1469 master,standbys = self.get_cluster_current_master_standbys()
1470 assert_equal(len(standbys),(onos_instances-1))
1471 onos_names_ips = self.get_cluster_container_names_ips()
1472 member1_onos_name = onos_names_ips[standbys[0]]
1473 member2_onos_name = onos_names_ips[standbys[1]]
1474 ingress = self.acl.ingress_iface
1475 egress = self.acl.CURRENT_PORT_NUM
1476 acl_rule = ACLTest()
1477 status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.acl.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.acl.HOST_DST_IP)
1478 self.acl.CURRENT_PORT_NUM += 1
1479 time.sleep(5)
1480 if status is False:
1481 log.info('JSON request returned status %d' %code)
1482 assert_equal(status, True)
1483 srcMac = '00:00:00:00:00:11'
1484 dstMac = host_ip_mac[0][1]
1485 self.acl.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1, egress_iface_num = egress )
1486 status, code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'deny',controller=master)
1487 time.sleep(10)
1488 if status is False:
1489 log.info('JSON request returned status %d' %code)
1490 assert_equal(status, True)
1491 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1492 log.info('killing cluster members %s and %s'%(standbys[0],standbys[1]))
1493 Container(member1_onos_name, Onos.IMAGE).kill()
1494 Container(member2_onos_name, Onos.IMAGE).kill()
1495 time.sleep(40)
1496 status = self.verify_cluster_status(onos_instances=onos_instances-2,verify=True,controller=master)
1497 assert_equal(status, True)
1498 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1499 self.acl.acl_hosts_remove(egress_iface_count = 1, egress_iface_num = egress)
1500
1501 #pass
1502 def test_cluster_with_dhcpRelay_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1503 status = self.verify_cluster_status(onos_instances=onos_instances)
1504 assert_equal(status, True)
1505 master,standbys = self.get_cluster_current_master_standbys()
1506 assert_equal(len(standbys),(onos_instances-1))
1507 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001508 mac = self.dhcprelay.get_mac(iface)
1509 self.dhcprelay.host_load(iface)
1510 ##we use the defaults for this test that serves as an example for others
1511 ##You don't need to restart dhcpd server if retaining default config
1512 config = self.dhcprelay.default_config
1513 options = self.dhcprelay.default_options
1514 subnet = self.dhcprelay.default_subnet_config
1515 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1516 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1517 config = config,
1518 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001519 subnet = subnet,
1520 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001521 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
1522 cip, sip = self.dhcprelay.send_recv(mac)
1523 log.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
1524 self.change_master_current_cluster(new_master=standbys[0])
1525 log.info('Releasing ip %s to server %s' %(cip, sip))
1526 assert_equal(self.dhcprelay.dhcp.release(cip), True)
1527 log.info('Triggering DHCP discover again after release')
1528 cip2, sip2 = self.dhcprelay.send_recv(mac)
1529 log.info('Verifying released IP was given back on rediscover')
1530 assert_equal(cip, cip2)
1531 log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
1532 assert_equal(self.dhcprelay.dhcp.release(cip2), True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001533 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001534
ChetanGaonker689b3862016-10-17 16:25:01 -07001535
1536 def test_cluster_with_dhcpRelay_and_verify_dhcp_ip_after_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1537 status = self.verify_cluster_status(onos_instances=onos_instances)
1538 assert_equal(status, True)
1539 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001540 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001541 onos_names_ips = self.get_cluster_container_names_ips()
1542 master_onos_name = onos_names_ips[master]
1543 self.dhcprelay.setUpClass(controller=master)
1544 mac = self.dhcprelay.get_mac(iface)
1545 self.dhcprelay.host_load(iface)
1546 ##we use the defaults for this test that serves as an example for others
1547 ##You don't need to restart dhcpd server if retaining default config
1548 config = self.dhcprelay.default_config
1549 options = self.dhcprelay.default_options
1550 subnet = self.dhcprelay.default_subnet_config
1551 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1552 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1553 config = config,
1554 options = options,
1555 subnet = subnet,
1556 controller=master)
1557 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1558 log.info('Initiating dhcp process from client %s'%mac)
1559 cip, sip = self.dhcprelay.send_recv(mac)
1560 log.info('Killing cluster current master %s'%master)
1561 Container(master_onos_name, Onos.IMAGE).kill()
1562 time.sleep(60)
1563 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=standbys[0])
1564 assert_equal(status, True)
1565 mac = self.dhcprelay.dhcp.get_mac(cip)[0]
1566 log.info("Verifying dhcp clients gets same IP after cluster master restarts")
1567 new_cip, new_sip = self.dhcprelay.dhcp.only_request(cip, mac)
1568 assert_equal(new_cip, cip)
1569 self.dhcprelay.tearDownClass(controller=standbys[0])
1570
1571 #pass
1572 def test_cluster_with_dhcpRelay_and_simulate_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1573 status = self.verify_cluster_status(onos_instances=onos_instances)
1574 assert_equal(status, True)
1575 master,standbys = self.get_cluster_current_master_standbys()
1576 assert_equal(len(standbys),(onos_instances-1))
1577 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001578 macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3']
1579 self.dhcprelay.host_load(iface)
1580 ##we use the defaults for this test that serves as an example for others
1581 ##You don't need to restart dhcpd server if retaining default config
1582 config = self.dhcprelay.default_config
1583 options = self.dhcprelay.default_options
1584 subnet = self.dhcprelay.default_subnet_config
1585 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1586 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1587 config = config,
1588 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001589 subnet = subnet,
1590 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001591 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1592 cip1, sip1 = self.dhcprelay.send_recv(macs[0])
1593 assert_not_equal(cip1,None)
1594 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip1,macs[0],master))
1595 log.info('Changing cluster master from %s to %s'%(master, standbys[0]))
1596 self.change_master_current_cluster(new_master=standbys[0])
1597 cip2, sip2 = self.dhcprelay.send_recv(macs[1])
1598 assert_not_equal(cip2,None)
1599 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[1],standbys[0]))
1600 self.change_master_current_cluster(new_master=master)
1601 log.info('Changing cluster master from %s to %s'%(standbys[0],master))
1602 cip3, sip3 = self.dhcprelay.send_recv(macs[2])
1603 assert_not_equal(cip3,None)
1604 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master))
ChetanGaonker689b3862016-10-17 16:25:01 -07001605 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001606
ChetanGaonker689b3862016-10-17 16:25:01 -07001607 def test_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001608 status = self.verify_cluster_status(onos_instances=onos_instances)
1609 assert_equal(status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001610 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001611 self.subscriber.num_subscribers = 5
1612 self.subscriber.num_channels = 10
1613 for i in [0,1]:
1614 if i == 1:
1615 cord_test_onos_restart()
1616 time.sleep(45)
1617 status = self.verify_cluster_status(onos_instances=onos_instances)
1618 assert_equal(status, True)
1619 log.info('Verifying cord subscriber functionality after cluster restart')
1620 else:
1621 log.info('Verifying cord subscriber functionality before cluster restart')
1622 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1623 num_channels = self.subscriber.num_channels,
1624 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1625 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1626 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1627 self.subscriber.num_channels))
1628 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001629 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001630
ChetanGaonker689b3862016-10-17 16:25:01 -07001631 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1632 def test_cluster_with_cord_subscriber_join_next_channel_before_and_after_cluster_mastership_is_withdrawn(self,onos_instances=ONOS_INSTANCES):
1633 status = self.verify_cluster_status(onos_instances=onos_instances)
1634 assert_equal(status, True)
1635 master,standbys = self.get_cluster_current_master_standbys()
1636 assert_equal(len(standbys),(onos_instances-1))
1637 self.subscriber.setUpClass(controller=master)
1638 self.subscriber.num_subscribers = 5
1639 self.subscriber.num_channels = 10
1640 for i in [0,1]:
1641 if i == 1:
1642 status=self.withdraw_cluster_current_mastership(master_ip=master)
1643 asser_equal(status, True)
1644 master,standbys = self.get_cluster_current_master_standbys()
1645 log.info('verifying cord subscriber functionality after cluster current master withdraw mastership')
1646 else:
1647 log.info('verifying cord subscriber functionality before cluster master withdraw mastership')
1648 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1649 num_channels = self.subscriber.num_channels,
1650 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1651 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1652 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1653 self.subscriber.num_channels),controller=master)
1654 assert_equal(test_status, True)
1655 self.subscriber.tearDownClass(controller=master)
1656
1657 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1658 def test_cluster_with_cord_subscriber_join_recv_traffic_from_10channels_and_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001659 status = self.verify_cluster_status(onos_instances=onos_instances)
1660 assert_equal(status, True)
1661 master, standbys = self.get_cluster_current_master_standbys()
1662 assert_equal(len(standbys),(onos_instances-1))
1663 onos_names_ips = self.get_cluster_container_names_ips()
1664 member_onos_name = onos_names_ips[standbys[0]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001665 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001666 num_subscribers = 1
1667 num_channels = 10
1668 for i in [0,1]:
1669 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -08001670 cord_test_onos_shutdown(node = standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001671 time.sleep(30)
ChetanGaonker689b3862016-10-17 16:25:01 -07001672 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001673 assert_equal(status, True)
1674 log.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0])
1675 else:
1676 log.info('Verifying cord subscriber functionality before cluster member %s is down'%standbys[0])
1677 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1678 num_channels = num_channels,
1679 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify,
1680 self.subscriber.igmp_verify, self.subscriber.traffic_verify),
1681 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
ChetanGaonker689b3862016-10-17 16:25:01 -07001682 negative_subscriber_auth = 'all',controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001683 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001684 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001685
ChetanGaonker689b3862016-10-17 16:25:01 -07001686 def test_cluster_with_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001687 status = self.verify_cluster_status(onos_instances=onos_instances)
1688 assert_equal(status, True)
1689 master, standbys = self.get_cluster_current_master_standbys()
1690 assert_equal(len(standbys),(onos_instances-1))
1691 onos_names_ips = self.get_cluster_container_names_ips()
1692 member1_onos_name = onos_names_ips[standbys[0]]
1693 member2_onos_name = onos_names_ips[standbys[1]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001694 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001695 num_subscribers = 1
1696 num_channels = 10
1697 for i in [0,1]:
1698 if i == 1:
A R Karthick3b2e0372016-12-14 17:37:43 -08001699 cord_test_onos_shutdown(node = standbys[0])
1700 cord_test_onos_shutdown(node = standbys[1])
ChetanGaonker2099d722016-10-07 15:16:58 -07001701 time.sleep(60)
1702 status = self.verify_cluster_status(onos_instances=onos_instances-2)
1703 assert_equal(status, True)
1704 log.info('Verifying cord subscriber funtionality after cluster two members %s and %s down'%(standbys[0],standbys[1]))
1705 else:
1706 log.info('Verifying cord subscriber funtionality before cluster two members %s and %s down'%(standbys[0],standbys[1]))
1707 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1708 num_channels = num_channels,
1709 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1710 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1711 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
1712 negative_subscriber_auth = 'all')
1713 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001714 self.subscriber.tearDownClass(controller=master)
1715
1716 #pass
1717 def test_cluster_with_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1718 status = self.verify_cluster_status(onos_instances=onos_instances)
1719 assert_equal(status, True)
1720 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1721 for device in device_dict.keys():
1722 log.info("Device is %s"%device_dict[device])
1723 assert_not_equal(device_dict[device]['master'],'none')
1724 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1725 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1726
1727 #pass
1728 def test_cluster_state_in_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1729 status = self.verify_cluster_status(onos_instances=onos_instances)
1730 assert_equal(status, True)
1731 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1732 cluster_ips = self.get_cluster_current_member_ips()
1733 for ip in cluster_ips:
1734 device_dict= self.get_cluster_current_master_standbys_of_connected_devices(controller = ip)
1735 assert_equal(len(device_dict.keys()),onos_instances)
1736 for device in device_dict.keys():
1737 log.info("Device is %s"%device_dict[device])
1738 assert_not_equal(device_dict[device]['master'],'none')
1739 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1740 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1741
1742 #pass
1743 def test_cluster_verifying_multiple_ovs_switches_after_master_is_restarted(self,onos_instances = ONOS_INSTANCES):
1744 status = self.verify_cluster_status(onos_instances=onos_instances)
1745 assert_equal(status, True)
1746 onos_names_ips = self.get_cluster_container_names_ips()
1747 master_count = self.get_number_of_devices_of_master()
1748 log.info('Master count information is %s'%master_count)
1749 total_devices = 0
1750 for master in master_count.keys():
1751 total_devices += master_count[master]['size']
1752 if master_count[master]['size'] != 0:
1753 restart_ip = master
1754 assert_equal(total_devices,onos_instances)
1755 member_onos_name = onos_names_ips[restart_ip]
1756 log.info('Restarting cluster member %s having ip %s'%(member_onos_name,restart_ip))
1757 Container(member_onos_name, Onos.IMAGE).restart()
1758 time.sleep(40)
1759 master_count = self.get_number_of_devices_of_master()
1760 log.info('Master count information after restart is %s'%master_count)
1761 total_devices = 0
1762 for master in master_count.keys():
1763 total_devices += master_count[master]['size']
1764 if master == restart_ip:
1765 assert_equal(master_count[master]['size'], 0)
1766 assert_equal(total_devices,onos_instances)
1767
1768 #pass
1769 def test_cluster_verifying_multiple_ovs_switches_with_one_master_down(self,onos_instances = ONOS_INSTANCES):
1770 status = self.verify_cluster_status(onos_instances=onos_instances)
1771 assert_equal(status, True)
1772 onos_names_ips = self.get_cluster_container_names_ips()
1773 master_count = self.get_number_of_devices_of_master()
1774 log.info('Master count information is %s'%master_count)
1775 total_devices = 0
1776 for master in master_count.keys():
1777 total_devices += master_count[master]['size']
1778 if master_count[master]['size'] != 0:
1779 restart_ip = master
1780 assert_equal(total_devices,onos_instances)
1781 master_onos_name = onos_names_ips[restart_ip]
1782 log.info('Shutting down cluster member %s having ip %s'%(master_onos_name,restart_ip))
1783 Container(master_onos_name, Onos.IMAGE).kill()
1784 time.sleep(40)
1785 for ip in onos_names_ips.keys():
1786 if ip != restart_ip:
1787 controller_ip = ip
1788 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=controller_ip)
1789 assert_equal(status, True)
1790 master_count = self.get_number_of_devices_of_master(controller=controller_ip)
1791 log.info('Master count information after restart is %s'%master_count)
1792 total_devices = 0
1793 for master in master_count.keys():
1794 total_devices += master_count[master]['size']
1795 if master == restart_ip:
1796 assert_equal(master_count[master]['size'], 0)
1797 assert_equal(total_devices,onos_instances)
1798
1799 #pass
1800 def test_cluster_verifying_multiple_ovs_switches_with_current_master_withdrawing_mastership(self,onos_instances = ONOS_INSTANCES):
1801 status = self.verify_cluster_status(onos_instances=onos_instances)
1802 assert_equal(status, True)
1803 master_count = self.get_number_of_devices_of_master()
1804 log.info('Master count information is %s'%master_count)
1805 total_devices = 0
1806 for master in master_count.keys():
1807 total_devices += int(master_count[master]['size'])
1808 if master_count[master]['size'] != 0:
1809 master_ip = master
1810 log.info('Devices of master %s are %s'%(master_count[master]['devices'],master))
1811 device_id = str(master_count[master]['devices'][0])
1812 device_count = master_count[master]['size']
1813 assert_equal(total_devices,onos_instances)
1814 log.info('Withdrawing mastership of device %s for controller %s'%(device_id,master_ip))
1815 status=self.withdraw_cluster_current_mastership(master_ip=master_ip,device_id = device_id)
1816 assert_equal(status, True)
1817 master_count = self.get_number_of_devices_of_master()
1818 log.info('Master count information after cluster mastership withdraw is %s'%master_count)
1819 total_devices = 0
1820 for master in master_count.keys():
1821 total_devices += int(master_count[master]['size'])
1822 if master == master_ip:
1823 assert_equal(master_count[master]['size'], device_count-1)
1824 assert_equal(total_devices,onos_instances)
1825
1826 #pass
1827 def test_cluster_verifying_multiple_ovs_switches_and_restarting_cluster(self,onos_instances = ONOS_INSTANCES):
1828 status = self.verify_cluster_status(onos_instances=onos_instances)
1829 assert_equal(status, True)
1830 master_count = self.get_number_of_devices_of_master()
1831 log.info('Master count information is %s'%master_count)
1832 total_devices = 0
1833 for master in master_count.keys():
1834 total_devices += master_count[master]['size']
1835 assert_equal(total_devices,onos_instances)
1836 log.info('Restarting cluster')
1837 cord_test_onos_restart()
1838 time.sleep(60)
1839 master_count = self.get_number_of_devices_of_master()
1840 log.info('Master count information after restart is %s'%master_count)
1841 total_devices = 0
1842 for master in master_count.keys():
1843 total_devices += master_count[master]['size']
1844 assert_equal(total_devices,onos_instances)