blob: 11a398f19d110593eca7bface9811545538cc466 [file] [log] [blame]
ChetanGaonker2099d722016-10-07 15:16:58 -07001#copyright 2016-present Ciena Corporation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14#
15import unittest
16from nose.tools import *
17from scapy.all import *
18from OnosCtrl import OnosCtrl, get_mac
19from OltConfig import OltConfig
20from socket import socket
21from OnosFlowCtrl import OnosFlowCtrl
22from nose.twistedtools import reactor, deferred
23from twisted.internet import defer
24from onosclidriver import OnosCliDriver
25from CordContainer import Container, Onos, Quagga
26from CordTestServer import cord_test_onos_restart, cord_test_onos_shutdown, cord_test_onos_add_cluster, cord_test_quagga_restart
27from portmaps import g_subscriber_port_map
28from scapy.all import *
29import time, monotonic
30import threading
31from threading import current_thread
32from Cluster import *
33from EapTLS import TLSAuthTest
34from ACL import ACLTest
A R Karthick1f908202016-11-16 17:32:20 -080035from OnosLog import OnosLog
36from CordLogger import CordLogger
ChetanGaonker2099d722016-10-07 15:16:58 -070037import os
38import json
39import random
40import collections
41log.setLevel('INFO')
42
A R Karthick1f908202016-11-16 17:32:20 -080043class cluster_exchange(CordLogger):
ChetanGaonker2099d722016-10-07 15:16:58 -070044 test_path = os.path.dirname(os.path.realpath(__file__))
45 onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
46 mac = RandMAC()._fix()
47 flows_eth = Ether(src = RandMAC()._fix(), dst = RandMAC()._fix())
48 igmp_eth = Ether(dst = '01:00:5e:00:00:16', type = ETH_P_IP)
49 igmp_ip = IP(dst = '224.0.0.22')
50 ONOS_INSTANCES = 3
51 V_INF1 = 'veth0'
52 TLS_TIMEOUT = 100
53 device_id = 'of:' + get_mac()
54 igmp = cluster_igmp()
55 igmp_groups = igmp.mcast_ip_range(start_ip = '224.1.8.10',end_ip = '224.1.10.49')
56 igmp_sources = igmp.source_ip_range(start_ip = '38.24.29.35',end_ip='38.24.35.56')
57 tls = cluster_tls()
58 flows = cluster_flows()
59 proxyarp = cluster_proxyarp()
60 vrouter = cluster_vrouter()
61 acl = cluster_acl()
62 dhcprelay = cluster_dhcprelay()
63 subscriber = cluster_subscriber()
A R Karthickec2db322016-11-17 15:06:01 -080064 testcaseLoggers = ('test_cluster_controller_restarts',)
A R Karthick1f908202016-11-16 17:32:20 -080065
66 def setUp(self):
67 if self._testMethodName not in self.testcaseLoggers:
68 super(cluster_exchange, self).setUp()
69
70 def tearDown(self):
71 if self._testMethodName not in self.testcaseLoggers:
72 super(cluster_exchange, self).tearDown()
ChetanGaonker2099d722016-10-07 15:16:58 -070073
74 def get_controller(self):
75 controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
76 controller = controller.split(',')[0]
77 return controller
78
A R Karthick1f908202016-11-16 17:32:20 -080079 @classmethod
80 def get_controllers(cls):
81 controllers = os.getenv('ONOS_CONTROLLER_IP') or ''
82 return controllers.split(',')
83
ChetanGaonker2099d722016-10-07 15:16:58 -070084 def cliEnter(self,controller = None):
85 retries = 0
86 while retries < 3:
87 self.cli = OnosCliDriver(controller = controller,connect = True)
88 if self.cli.handle:
89 break
90 else:
91 retries += 1
92 time.sleep(2)
93
94 def cliExit(self):
95 self.cli.disconnect()
96
A R Karthick1f908202016-11-16 17:32:20 -080097 def get_leader(self, controller = None):
98 self.cliEnter(controller = controller)
99 result = json.loads(self.cli.leaders(jsonFormat = True))
100 if result is None:
101 log.info('Leaders command failure for controller %s' %controller)
102 else:
103 log.info('Leaders returned: %s' %result)
104 self.cliExit()
105 return result
106
107 def get_leaders(self, controller = None):
108 result = []
109 if type(controller) in [ list, tuple ]:
110 for c in controller:
111 leaders = self.get_leader(controller = c)
112 result.append(leaders)
113 else:
114 leaders = self.get_leader(controller = controller)
115 result.append(leaders)
116 return result
117
A R Karthickec2db322016-11-17 15:06:01 -0800118 def verify_leaders(self, controller = None):
119 leaders = self.get_leaders(controller = controller)
120 failed = filter(lambda l: l == None, leaders)
121 return failed
122
ChetanGaonker2099d722016-10-07 15:16:58 -0700123 def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
124 tries = 0
125 try:
126 self.cliEnter(controller = controller)
127 while tries <= 10:
128 cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
129 if cluster_summary:
130 log.info("cluster 'summary' command output is %s"%cluster_summary)
131 nodes = cluster_summary['nodes']
132 if verify:
133 if nodes == onos_instances:
134 self.cliExit()
135 return True
136 else:
137 tries += 1
138 time.sleep(1)
139 else:
140 if nodes >= onos_instances:
141 self.cliExit()
142 return True
143 else:
144 tries += 1
145 time.sleep(1)
146 else:
147 tries += 1
148 time.sleep(1)
149 self.cliExit()
150 return False
151 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700152 raise Exception('Failed to get cluster members')
153 return False
ChetanGaonker2099d722016-10-07 15:16:58 -0700154
155 def get_cluster_current_member_ips(self,controller = None):
156 tries = 0
157 cluster_ips = []
158 try:
159 self.cliEnter(controller = controller)
160 while tries <= 10:
161 cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
162 if cluster_nodes:
163 log.info("cluster 'nodes' output is %s"%cluster_nodes)
164 cluster_ips = map(lambda c: c['id'], cluster_nodes)
165 self.cliExit()
166 cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
167 return cluster_ips
168 else:
169 tries += 1
170 self.cliExit()
171 return cluster_ips
172 except:
173 raise Exception('Failed to get cluster members')
174 return cluster_ips
175
ChetanGaonker689b3862016-10-17 16:25:01 -0700176 def get_cluster_container_names_ips(self,controller=None):
A R Karthick1f908202016-11-16 17:32:20 -0800177 onos_names_ips = {}
178 onos_ips = self.get_cluster_current_member_ips(controller=controller)
179 onos_names_ips[onos_ips[0]] = Onos.NAME
180 onos_names_ips[Onos.NAME] = onos_ips[0]
181 for i in range(1,len(onos_ips)):
182 name = '{0}-{1}'.format(Onos.NAME,i+1)
183 onos_names_ips[onos_ips[i]] = name
184 onos_names_ips[name] = onos_ips[i]
ChetanGaonker2099d722016-10-07 15:16:58 -0700185
186 return onos_names_ips
187
188 #identifying current master of a connected device, not tested
189 def get_cluster_current_master_standbys(self,controller=None,device_id=device_id):
190 master = None
191 standbys = []
192 tries = 0
193 try:
194 cli = self.cliEnter(controller = controller)
195 while tries <= 10:
196 roles = json.loads(self.cli.roles(jsonFormat = True))
197 log.info("cluster 'roles' command output is %s"%roles)
198 if roles:
199 for device in roles:
200 log.info('Verifying device info in line %s'%device)
201 if device['id'] == device_id:
202 master = str(device['master'])
203 standbys = map(lambda d: str(d), device['standbys'])
204 log.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
205 self.cliExit()
206 return master, standbys
207 self.cliExit()
208 return master, standbys
209 else:
210 tries += 1
ChetanGaonker689b3862016-10-17 16:25:01 -0700211 time.sleep(1)
212 self.cliExit()
213 return master,standbys
214 except:
215 raise Exception('Failed to get cluster members')
216 return master,standbys
217
218 def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
219 ''' returns master and standbys of all the connected devices to ONOS cluster instance'''
220 device_dict = {}
221 tries = 0
222 try:
223 cli = self.cliEnter(controller = controller)
224 while tries <= 10:
225 device_dict = {}
226 roles = json.loads(self.cli.roles(jsonFormat = True))
227 log.info("cluster 'roles' command output is %s"%roles)
228 if roles:
229 for device in roles:
230 device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
231 for i in range(len(device_dict[device['id']]['standbys'])):
232 device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
233 log.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
234 self.cliExit()
235 return device_dict
236 else:
237 tries += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700238 time.sleep(1)
239 self.cliExit()
ChetanGaonker689b3862016-10-17 16:25:01 -0700240 return device_dict
241 except:
242 raise Exception('Failed to get cluster members')
243 return device_dict
244
245 #identify current master of a connected device, not tested
246 def get_cluster_connected_devices(self,controller=None):
247 '''returns all the devices connected to ONOS cluster'''
248 device_list = []
249 tries = 0
250 try:
251 cli = self.cliEnter(controller = controller)
252 while tries <= 10:
253 device_list = []
254 devices = json.loads(self.cli.devices(jsonFormat = True))
255 log.info("cluster 'devices' command output is %s"%devices)
256 if devices:
257 for device in devices:
258 log.info('device id is %s'%device['id'])
259 device_list.append(str(device['id']))
260 self.cliExit()
261 return device_list
262 else:
263 tries += 1
264 time.sleep(1)
265 self.cliExit()
266 return device_list
267 except:
268 raise Exception('Failed to get cluster members')
269 return device_list
270
271 def get_number_of_devices_of_master(self,controller=None):
272 '''returns master-device pairs, which master having what devices'''
273 master_count = {}
274 try:
275 cli = self.cliEnter(controller = controller)
276 masters = json.loads(self.cli.masters(jsonFormat = True))
277 if masters:
278 for master in masters:
279 master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
280 return master_count
281 else:
282 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700283 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700284 raise Exception('Failed to get cluster members')
285 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700286
287 def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None):
288 if new_master is None: return False
ChetanGaonker689b3862016-10-17 16:25:01 -0700289 self.cliEnter(controller=controller)
ChetanGaonker2099d722016-10-07 15:16:58 -0700290 cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
291 command = self.cli.command(cmd = cmd, jsonFormat = False)
292 self.cliExit()
293 time.sleep(60)
294 master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
295 assert_equal(master,new_master)
296 log.info('Cluster master changed to %s successfully'%new_master)
297
ChetanGaonker689b3862016-10-17 16:25:01 -0700298 def withdraw_cluster_current_mastership(self,master_ip=None,device_id=device_id,controller=None):
299 '''current master looses its mastership and hence new master will be elected'''
300 self.cliEnter(controller=controller)
301 cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
302 command = self.cli.command(cmd = cmd, jsonFormat = False)
303 self.cliExit()
304 time.sleep(60)
305 new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
306 assert_not_equal(new_master_ip,master_ip)
307 log.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
308 log.info('Cluster new master is %s'%new_master_ip)
309 return True
310
A R Karthickec2db322016-11-17 15:06:01 -0800311 def test_cluster_controller_restarts(self):
A R Karthick1f908202016-11-16 17:32:20 -0800312 '''Test the cluster by repeatedly killing the controllers'''
313 controllers = self.get_controllers()
314 ctlr_len = len(controllers)
315 if ctlr_len <= 1:
316 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
317 assert_greater(ctlr_len, 1)
318
319 #this call would verify the cluster for once
320 onos_map = self.get_cluster_container_names_ips()
321
A R Karthickec2db322016-11-17 15:06:01 -0800322 def check_exception(controller = None):
A R Karthick1f908202016-11-16 17:32:20 -0800323 adjacent_controller = None
324 adjacent_controllers = None
325 if controller:
326 adjacent_controllers = set(controllers) - set( [controller] )
327 adjacent_controller = next(iter(adjacent_controllers))
328 for node in controllers:
329 onosLog = OnosLog(host = node)
330 ##check the logs for storage exception
331 _, output = onosLog.get_log(('ERROR', 'Exception',))
A R Karthickec2db322016-11-17 15:06:01 -0800332 if output and output.find('StorageException$Timeout') >= 0:
333 log.info('\nStorage Exception Timeout found on node: %s\n' %node)
334 log.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
335 log.info('\n' + '-' * 50 + '\n')
A R Karthick1f908202016-11-16 17:32:20 -0800336 log.info('%s' %output)
A R Karthickec2db322016-11-17 15:06:01 -0800337 log.info('\n' + '-' * 50 + '\n')
338 failed = self.verify_leaders(controllers)
339 if failed:
340 log.info('Leaders command failed on node: %s' %node)
341 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800342 return controller
343
344 try:
A R Karthickec2db322016-11-17 15:06:01 -0800345 ips = self.get_cluster_current_member_ips(controller = adjacent_controller)
A R Karthick1f908202016-11-16 17:32:20 -0800346 print('ONOS cluster formed with controllers: %s' %ips)
347 st = True
348 except:
349 st = False
350
A R Karthickec2db322016-11-17 15:06:01 -0800351 failed = self.verify_leaders(controllers)
A R Karthick1f908202016-11-16 17:32:20 -0800352 assert_equal(len(failed), 0)
A R Karthick1f908202016-11-16 17:32:20 -0800353 if st is False:
354 log.info('No storage exception and ONOS cluster was not formed successfully')
355 else:
356 controller = None
357
358 return controller
359
360 next_controller = None
361 tries = 10
362 for num in range(tries):
363 index = num % ctlr_len
364 #index = random.randrange(0, ctlr_len)
365 controller = onos_map[controllers[index]] if next_controller is None else next_controller
A R Karthick1ef70552016-11-17 17:33:36 -0800366 log.info('ITERATION: %d. Restarting Controller %s' %(num + 1, controller))
A R Karthick1f908202016-11-16 17:32:20 -0800367 try:
368 cord_test_onos_restart(node = controller)
369 time.sleep(30)
370 except:
371 time.sleep(5)
372 continue
A R Karthickec2db322016-11-17 15:06:01 -0800373 next_controller = check_exception(controller = controller)
A R Karthick1f908202016-11-16 17:32:20 -0800374
ChetanGaonker2099d722016-10-07 15:16:58 -0700375 #pass
ChetanGaonker689b3862016-10-17 16:25:01 -0700376 def test_cluster_formation_and_verification(self,onos_instances = ONOS_INSTANCES):
377 status = self.verify_cluster_status(onos_instances = onos_instances)
378 assert_equal(status, True)
379 log.info('Cluster exists with %d ONOS instances'%onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -0700380
381 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700382 def test_cluster_adding_members(self, add = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700383 status = self.verify_cluster_status(onos_instances = onos_instances)
384 assert_equal(status, True)
385 onos_ips = self.get_cluster_current_member_ips()
386 onos_instances = len(onos_ips)+add
387 log.info('Adding %d nodes to the ONOS cluster' %add)
388 cord_test_onos_add_cluster(count = add)
389 status = self.verify_cluster_status(onos_instances=onos_instances)
390 assert_equal(status, True)
391
ChetanGaonker689b3862016-10-17 16:25:01 -0700392 def test_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700393 status = self.verify_cluster_status(onos_instances = onos_instances)
394 assert_equal(status, True)
395 master, standbys = self.get_cluster_current_master_standbys()
396 assert_equal(len(standbys),(onos_instances-1))
397 onos_names_ips = self.get_cluster_container_names_ips()
398 master_onos_name = onos_names_ips[master]
399 log.info('Removing cluster current master %s'%(master))
400 cord_test_onos_shutdown(node = master_onos_name)
401 time.sleep(60)
402 onos_instances -= 1
403 status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0])
404 assert_equal(status, True)
405 new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
406 assert_not_equal(master,new_master)
ChetanGaonker689b3862016-10-17 16:25:01 -0700407 log.info('Successfully removed clusters master instance')
ChetanGaonker2099d722016-10-07 15:16:58 -0700408
ChetanGaonker689b3862016-10-17 16:25:01 -0700409 def test_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700410 status = self.verify_cluster_status(onos_instances = onos_instances)
411 assert_equal(status, True)
412 master, standbys = self.get_cluster_current_master_standbys()
413 assert_equal(len(standbys),(onos_instances-1))
414 onos_names_ips = self.get_cluster_container_names_ips()
415 member_onos_name = onos_names_ips[standbys[0]]
416 log.info('Removing cluster member %s'%standbys[0])
417 cord_test_onos_shutdown(node = member_onos_name)
418 time.sleep(60)
419 onos_instances -= 1
420 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
421 assert_equal(status, True)
422
ChetanGaonker689b3862016-10-17 16:25:01 -0700423 def test_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700424 status = self.verify_cluster_status(onos_instances = onos_instances)
425 assert_equal(status, True)
426 master, standbys = self.get_cluster_current_master_standbys()
427 assert_equal(len(standbys),(onos_instances-1))
428 onos_names_ips = self.get_cluster_container_names_ips()
429 member1_onos_name = onos_names_ips[standbys[0]]
430 member2_onos_name = onos_names_ips[standbys[1]]
431 log.info('Removing cluster member %s'%standbys[0])
432 cord_test_onos_shutdown(node = member1_onos_name)
433 log.info('Removing cluster member %s'%standbys[1])
434 cord_test_onos_shutdown(node = member2_onos_name)
435 time.sleep(60)
436 onos_instances = onos_instances - 2
437 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
438 assert_equal(status, True)
439
ChetanGaonker689b3862016-10-17 16:25:01 -0700440 def test_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700441 status = self.verify_cluster_status(onos_instances = onos_instances)
442 assert_equal(status, True)
443 master, standbys = self.get_cluster_current_master_standbys()
444 assert_equal(len(standbys),(onos_instances-1))
445 onos_names_ips = self.get_cluster_container_names_ips()
446 for i in range(remove):
447 member_onos_name = onos_names_ips[standbys[i]]
448 log.info('Removing onos container with name %s'%standbys[i])
449 cord_test_onos_shutdown(node = member_onos_name)
450 time.sleep(60)
451 onos_instances = onos_instances - remove
452 status = self.verify_cluster_status(onos_instances = onos_instances, controller=master)
453 assert_equal(status, True)
454
455 #nottest test cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700456 def test_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES , add = 2, remove = 2):
ChetanGaonker2099d722016-10-07 15:16:58 -0700457 status = self.verify_cluster_status(onos_instances = onos_instances)
458 assert_equal(status, True)
459 onos_ips = self.get_cluster_current_member_ips()
460 onos_instances = len(onos_ips)+add
461 log.info('Adding %d ONOS instances to the cluster'%add)
462 cord_test_onos_add_cluster(count = add)
463 status = self.verify_cluster_status(onos_instances=onos_instances)
464 assert_equal(status, True)
465 log.info('Removing %d ONOS instances from the cluster'%remove)
466 for i in range(remove):
467 name = '{}-{}'.format(Onos.NAME, onos_instances - i)
468 log.info('Removing onos container with name %s'%name)
469 cord_test_onos_shutdown(node = name)
470 time.sleep(60)
471 onos_instances = onos_instances-remove
472 status = self.verify_cluster_status(onos_instances=onos_instances)
473 assert_equal(status, True)
474
475 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700476 def test_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
ChetanGaonker2099d722016-10-07 15:16:58 -0700477 status = self.verify_cluster_status(onos_instances = onos_instances)
478 assert_equal(status, True)
479 onos_ips = self.get_cluster_current_member_ips()
480 onos_instances = onos_instances-remove
481 log.info('Removing %d ONOS instances from the cluster'%remove)
482 for i in range(remove):
483 name = '{}-{}'.format(Onos.NAME, len(onos_ips)-i)
484 log.info('Removing onos container with name %s'%name)
485 cord_test_onos_shutdown(node = name)
486 time.sleep(60)
487 status = self.verify_cluster_status(onos_instances=onos_instances)
488 assert_equal(status, True)
489 log.info('Adding %d ONOS instances to the cluster'%add)
490 cord_test_onos_add_cluster(count = add)
491 onos_instances = onos_instances+add
492 status = self.verify_cluster_status(onos_instances=onos_instances)
493 assert_equal(status, True)
494
ChetanGaonker689b3862016-10-17 16:25:01 -0700495 def test_cluster_restart(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700496 status = self.verify_cluster_status(onos_instances = onos_instances)
497 assert_equal(status, True)
498 log.info('Restarting cluster')
499 cord_test_onos_restart()
500 status = self.verify_cluster_status(onos_instances = onos_instances)
501 assert_equal(status, True)
502
ChetanGaonker689b3862016-10-17 16:25:01 -0700503 def test_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700504 status = self.verify_cluster_status(onos_instances = onos_instances)
505 assert_equal(status, True)
506 master, standbys = self.get_cluster_current_master_standbys()
507 onos_names_ips = self.get_cluster_container_names_ips()
508 master_onos_name = onos_names_ips[master]
509 log.info('Restarting cluster master %s'%master)
510 cord_test_onos_restart(node = master_onos_name)
511 status = self.verify_cluster_status(onos_instances = onos_instances)
512 assert_equal(status, True)
513 log.info('Cluster came up after master restart as expected')
514
515 #test fail. master changing after restart. Need to check correct behavior.
ChetanGaonker689b3862016-10-17 16:25:01 -0700516 def test_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700517 status = self.verify_cluster_status(onos_instances = onos_instances)
518 assert_equal(status, True)
519 master1, standbys = self.get_cluster_current_master_standbys()
520 onos_names_ips = self.get_cluster_container_names_ips()
521 master_onos_name = onos_names_ips[master1]
522 log.info('Restarting cluster master %s'%master)
523 cord_test_onos_restart(node = master_onos_name)
524 status = self.verify_cluster_status(onos_instances = onos_instances)
525 assert_equal(status, True)
526 master2, standbys = self.get_cluster_current_master_standbys()
527 assert_equal(master1,master2)
528 log.info('Cluster master is same before and after cluster master restart as expected')
529
ChetanGaonker689b3862016-10-17 16:25:01 -0700530 def test_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700531 status = self.verify_cluster_status(onos_instances = onos_instances)
532 assert_equal(status, True)
533 master, standbys = self.get_cluster_current_master_standbys()
534 assert_equal(len(standbys),(onos_instances-1))
535 onos_names_ips = self.get_cluster_container_names_ips()
536 member_onos_name = onos_names_ips[standbys[0]]
537 log.info('Restarting cluster member %s'%standbys[0])
538 cord_test_onos_restart(node = member_onos_name)
539 status = self.verify_cluster_status(onos_instances = onos_instances)
540 assert_equal(status, True)
541 log.info('Cluster came up as expected after restarting one member')
542
ChetanGaonker689b3862016-10-17 16:25:01 -0700543 def test_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700544 status = self.verify_cluster_status(onos_instances = onos_instances)
545 assert_equal(status, True)
546 master, standbys = self.get_cluster_current_master_standbys()
547 assert_equal(len(standbys),(onos_instances-1))
548 onos_names_ips = self.get_cluster_container_names_ips()
549 member1_onos_name = onos_names_ips[standbys[0]]
550 member2_onos_name = onos_names_ips[standbys[1]]
551 log.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1]))
552 cord_test_onos_restart(node = member1_onos_name)
553 cord_test_onos_restart(node = member2_onos_name)
554 status = self.verify_cluster_status(onos_instances = onos_instances)
555 assert_equal(status, True)
556 log.info('Cluster came up as expected after restarting two members')
557
ChetanGaonker689b3862016-10-17 16:25:01 -0700558 def test_cluster_state_with_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700559 status = self.verify_cluster_status(onos_instances = onos_instances)
560 assert_equal(status,True)
561 master, standbys = self.get_cluster_current_master_standbys()
562 assert_equal(len(standbys),(onos_instances-1))
563 onos_names_ips = self.get_cluster_container_names_ips()
564 for i in range(members):
565 member_onos_name = onos_names_ips[standbys[i]]
566 log.info('Restarting cluster member %s'%standbys[i])
567 cord_test_onos_restart(node = member_onos_name)
568
569 status = self.verify_cluster_status(onos_instances = onos_instances)
570 assert_equal(status, True)
571 log.info('Cluster came up as expected after restarting %d members'%members)
572
ChetanGaonker689b3862016-10-17 16:25:01 -0700573 def test_cluster_state_with_master_change(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700574 status = self.verify_cluster_status(onos_instances=onos_instances)
575 assert_equal(status, True)
576 master, standbys = self.get_cluster_current_master_standbys()
577 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -0700578 log.info('Cluster current master of devices is %s'%master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700579 self.change_master_current_cluster(new_master=standbys[0])
580 log.info('Cluster master changed successfully')
581
582 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700583 def test_cluster_with_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700584 status = self.verify_cluster_status(onos_instances = onos_instances)
585 assert_equal(status, True)
586 onos_ips = self.get_cluster_current_member_ips()
587 self.vrouter.setUpClass()
588 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
589 assert_equal(res, True)
590 for onos_ip in onos_ips:
591 tries = 0
592 flag = False
593 try:
594 self.cliEnter(controller = onos_ip)
595 while tries <= 5:
596 routes = json.loads(self.cli.routes(jsonFormat = True))
597 if routes:
598 assert_equal(len(routes['routes4']), networks)
599 self.cliExit()
600 flag = True
601 break
602 else:
603 tries += 1
604 time.sleep(1)
605 assert_equal(flag, True)
606 except:
607 log.info('Exception occured while checking routes in onos instance %s'%onos_ip)
608 raise
609
610 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700611 def test_cluster_with_vrouter_and_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700612 status = self.verify_cluster_status(onos_instances = onos_instances)
613 assert_equal(status, True)
614 onos_ips = self.get_cluster_current_member_ips()
615 master, standbys = self.get_cluster_current_master_standbys()
616 onos_names_ips = self.get_cluster_container_names_ips()
617 master_onos_name = onos_names_ips[master]
618 self.vrouter.setUpClass()
619 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
620 assert_equal(res,True)
621 cord_test_onos_shutdown(node = master_onos_name)
622 time.sleep(60)
ChetanGaonker689b3862016-10-17 16:25:01 -0700623 log.info('Verifying vrouter traffic after cluster master is down')
ChetanGaonker2099d722016-10-07 15:16:58 -0700624 self.vrouter.vrouter_traffic_verify()
625
626 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700627 def test_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700628 status = self.verify_cluster_status(onos_instances = onos_instances)
629 assert_equal(status, True)
630 onos_ips = self.get_cluster_current_member_ips()
631 master, standbys = self.get_cluster_current_master_standbys()
632 onos_names_ips = self.get_cluster_container_names_ips()
633 master_onos_name = onos_names_ips[master]
634 self.vrouter.setUpClass()
635 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
636 assert_equal(res, True)
637 cord_test_onos_restart()
638 self.vrouter.vrouter_traffic_verify()
639
640 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700641 def test_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700642 status = self.verify_cluster_status(onos_instances = onos_instances)
643 assert_equal(status, True)
644 self.vrouter.setUpClass()
645 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
646 assert_equal(res, True)
647 self.vrouter.vrouter_activate(deactivate=True)
648 time.sleep(15)
649 self.vrouter.vrouter_traffic_verify(positive_test=False)
650 self.vrouter.vrouter_activate(deactivate=False)
651
652 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700653 def test_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700654 status = self.verify_cluster_status(onos_instances = onos_instances)
655 assert_equal(status, True)
656 master, standbys = self.get_cluster_current_master_standbys()
657 onos_names_ips = self.get_cluster_container_names_ips()
658 master_onos_name = onos_names_ips[master]
659 self.vrouter.setUpClass()
660 log.info('Verifying vrouter before master down')
661 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
662 assert_equal(res, True)
663 self.vrouter.vrouter_activate(deactivate=True)
664 log.info('Verifying vrouter traffic after app deactivated')
665 time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down
666 self.vrouter.vrouter_traffic_verify(positive_test=False)
667 log.info('Verifying vrouter traffic after master down')
668 cord_test_onos_shutdown(node = master_onos_name)
669 time.sleep(60)
670 self.vrouter.vrouter_traffic_verify(positive_test=False)
671 self.vrouter.vrouter_activate(deactivate=False)
672
673 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700674 def test_cluster_for_vrouter_app_and_making_member_down(self, networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700675 status = self.verify_cluster_status(onos_instances = onos_instances)
676 assert_equal(status, True)
677 master, standbys = self.get_cluster_current_master_standbys()
678 onos_names_ips = self.get_cluster_container_names_ips()
679 member_onos_name = onos_names_ips[standbys[0]]
680 self.vrouter.setUpClass()
681 log.info('Verifying vrouter before cluster member down')
682 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
683 assert_equal(res, True) # Expecting vrouter should work properly
684 log.info('Verifying vrouter after cluster member down')
685 cord_test_onos_shutdown(node = member_onos_name)
686 time.sleep(60)
687 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
688
689 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700690 def test_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700691 status = self.verify_cluster_status(onos_instances = onos_instances)
692 assert_equal(status, True)
693 master, standbys = self.get_cluster_current_master_standbys()
694 onos_names_ips = self.get_cluster_container_names_ips()
695 member_onos_name = onos_names_ips[standbys[1]]
696 self.vrouter.setUpClass()
697 log.info('Verifying vrouter traffic before cluster member restart')
698 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
699 assert_equal(res, True) # Expecting vrouter should work properly
700 cord_test_onos_restart(node = member_onos_name)
701 log.info('Verifying vrouter traffic after cluster member restart')
702 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
703
704 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700705 def test_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700706 status = self.verify_cluster_status(onos_instances = onos_instances)
707 assert_equal(status, True)
708 self.vrouter.setUpClass()
709 log.info('Verifying vrouter traffic before cluster restart')
710 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
711 assert_equal(res, True) # Expecting vrouter should work properly
712 cord_test_onos_restart()
713 log.info('Verifying vrouter traffic after cluster restart')
714 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
715
716
717 #test fails because flow state is in pending_add in onos
ChetanGaonker689b3862016-10-17 16:25:01 -0700718 def test_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700719 status = self.verify_cluster_status(onos_instances = onos_instances)
720 assert_equal(status, True)
721 master, standbys = self.get_cluster_current_master_standbys()
722 onos_names_ips = self.get_cluster_container_names_ips()
723 master_onos_name = onos_names_ips[master]
724 self.flows.setUpClass()
725 egress = 1
726 ingress = 2
727 egress_map = { 'ip': '192.168.30.1', 'udp_port': 9500 }
728 ingress_map = { 'ip': '192.168.40.1', 'udp_port': 9000 }
729 flow = OnosFlowCtrl(deviceId = self.device_id,
730 egressPort = egress,
731 ingressPort = ingress,
732 udpSrc = ingress_map['udp_port'],
733 udpDst = egress_map['udp_port'],
734 controller=master
735 )
736 result = flow.addFlow()
737 assert_equal(result, True)
738 time.sleep(1)
739 self.success = False
740 def mac_recv_task():
741 def recv_cb(pkt):
742 log.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
743 self.success = True
744 sniff(timeout=2,
745 lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
746 and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.flows.port_map[egress])
747
748 for i in [0,1]:
749 if i == 1:
750 cord_test_onos_shutdown(node = master_onos_name)
751 log.info('Verifying flows traffic after master killed')
752 time.sleep(45)
753 else:
754 log.info('Verifying flows traffic before master killed')
755 t = threading.Thread(target = mac_recv_task)
756 t.start()
757 L2 = self.flows_eth #Ether(src = ingress_map['ether'], dst = egress_map['ether'])
758 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
759 L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
760 pkt = L2/L3/L4
761 log.info('Sending packets to verify if flows are correct')
762 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
763 t.join()
764 assert_equal(self.success, True)
765
ChetanGaonker689b3862016-10-17 16:25:01 -0700766 def test_cluster_state_changing_master_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700767 status = self.verify_cluster_status(onos_instances=onos_instances)
768 assert_equal(status, True)
769 master, standbys = self.get_cluster_current_master_standbys()
770 self.flows.setUpClass()
771 egress = 1
772 ingress = 2
773 egress_map = { 'ip': '192.168.30.1' }
774 ingress_map = { 'ip': '192.168.40.1' }
775 flow = OnosFlowCtrl(deviceId = self.device_id,
776 egressPort = egress,
777 ingressPort = ingress,
778 ecn = 1,
779 controller=master
780 )
781 result = flow.addFlow()
782 assert_equal(result, True)
783 ##wait for flows to be added to ONOS
784 time.sleep(1)
785 self.success = False
786 def mac_recv_task():
787 def recv_cb(pkt):
788 log.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
789 self.success = True
790 sniff(count=2, timeout=5,
791 lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
792 and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb,
793 iface = self.flows.port_map[egress])
794 for i in [0,1]:
795 if i == 1:
796 log.info('Changing cluster master to %s'%standbys[0])
797 self.change_master_current_cluster(new_master=standbys[0])
798 log.info('Verifying flow traffic after cluster master chnaged')
799 else:
800 log.info('Verifying flow traffic before cluster master changed')
801 t = threading.Thread(target = mac_recv_task)
802 t.start()
803 L2 = self.flows_eth # Ether(src = ingress_map['ether'], dst = egress_map['ether'])
804 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1)
805 pkt = L2/L3
806 log.info('Sending a packet to verify if flows are correct')
807 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
808 t.join()
809 assert_equal(self.success, True)
810
ChetanGaonker689b3862016-10-17 16:25:01 -0700811 #pass
812 def test_cluster_flow_for_ipv6_extension_header_and_master_restart(self,onos_instances = ONOS_INSTANCES):
813 status = self.verify_cluster_status(onos_instances=onos_instances)
814 assert_equal(status, True)
815 master,standbys = self.get_cluster_current_master_standbys()
816 onos_names_ips = self.get_cluster_container_names_ips()
817 master_onos_name = onos_names_ips[master]
818 self.flows.setUpClass()
819 egress = 1
820 ingress = 2
821 egress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
822 ingress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
823 flow = OnosFlowCtrl(deviceId = self.device_id,
824 egressPort = egress,
825 ingressPort = ingress,
826 ipv6_extension = 0,
827 controller=master
828 )
829
830 result = flow.addFlow()
831 assert_equal(result, True)
832 ##wait for flows to be added to ONOS
833 time.sleep(1)
834 self.success = False
835 def mac_recv_task():
836 def recv_cb(pkt):
837 log.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s'%(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
838 self.success = True
839 sniff(timeout=2,count=5,
840 lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.flows.port_map[egress])
841 for i in [0,1]:
842 if i == 1:
843 log.info('Restart cluster current master %s'%master)
844 Container(master_onos_name,Onos.IMAGE).restart()
845 time.sleep(45)
846 log.info('Verifying flow traffic after master restart')
847 else:
848 log.info('Verifying flow traffic before master restart')
849 t = threading.Thread(target = mac_recv_task)
850 t.start()
851 L2 = self.flows_eth
852 L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
853 pkt = L2/L3
854 log.info('Sending packets to verify if flows are correct')
855 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
856 t.join()
857 assert_equal(self.success, True)
858
859 def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
860 dst_mac = self.igmp.iptomac(group)
861 eth = Ether(dst= dst_mac)
862 ip = IP(dst=group,src=source)
863 data = repr(monotonic.monotonic())
864 sendp(eth/ip/data,count=20, iface = intf)
865 pkt = (eth/ip/data)
866 log.info('multicast traffic packet %s'%pkt.show())
867
868 def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
869 log.info('verifying multicast traffic for group %s from source %s'%(group,source))
870 self.success = False
871 def recv_task():
872 def igmp_recv_cb(pkt):
873 log.info('multicast data received for group %s from source %s'%(group,source))
874 self.success = True
875 sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
876 t = threading.Thread(target = recv_task)
877 t.start()
878 self.send_multicast_data_traffic(group,source=source)
879 t.join()
880 return self.success
881
882 #pass
883 def test_cluster_with_igmp_include_exclude_modes_and_restarting_master(self, onos_instances=ONOS_INSTANCES):
884 status = self.verify_cluster_status(onos_instances=onos_instances)
885 assert_equal(status, True)
886 master, standbys = self.get_cluster_current_master_standbys()
887 assert_equal(len(standbys), (onos_instances-1))
888 onos_names_ips = self.get_cluster_container_names_ips()
889 master_onos_name = onos_names_ips[master]
890 self.igmp.setUp(controller=master)
891 groups = ['224.2.3.4','230.5.6.7']
892 src_list = ['2.2.2.2','3.3.3.3']
893 self.igmp.onos_ssm_table_load(groups, src_list=src_list, controller=master)
894 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
895 iface = self.V_INF1, delay = 2)
896 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
897 iface = self.V_INF1, delay = 2)
898 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
899 assert_equal(status,True)
900 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
901 assert_equal(status,False)
902 log.info('restarting cluster master %s'%master)
903 Container(master_onos_name,Onos.IMAGE).restart()
904 time.sleep(60)
905 log.info('verifying multicast data traffic after master restart')
906 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
907 assert_equal(status,True)
908 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
909 assert_equal(status,False)
910
911 #pass
912 def test_cluster_with_igmp_include_exclude_modes_and_making_master_down(self, onos_instances=ONOS_INSTANCES):
913 status = self.verify_cluster_status(onos_instances=onos_instances)
914 assert_equal(status, True)
915 master, standbys = self.get_cluster_current_master_standbys()
916 assert_equal(len(standbys), (onos_instances-1))
917 onos_names_ips = self.get_cluster_container_names_ips()
918 master_onos_name = onos_names_ips[master]
919 self.igmp.setUp(controller=master)
920 groups = [self.igmp.random_mcast_ip(),self.igmp.random_mcast_ip()]
921 src_list = [self.igmp.randomsourceip()]
922 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
923 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
924 iface = self.V_INF1, delay = 2)
925 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
926 iface = self.V_INF1, delay = 2)
927 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
928 assert_equal(status,True)
929 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
930 assert_equal(status,False)
931 log.info('Killing cluster master %s'%master)
932 Container(master_onos_name,Onos.IMAGE).kill()
933 time.sleep(60)
934 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=standbys[0])
935 assert_equal(status, True)
936 log.info('Verifying multicast data traffic after cluster master down')
937 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
938 assert_equal(status,True)
939 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
940 assert_equal(status,False)
941
942 def test_cluster_with_igmp_include_mode_checking_traffic_recovery_time_after_master_is_down(self, onos_instances=ONOS_INSTANCES):
943 status = self.verify_cluster_status(onos_instances=onos_instances)
944 assert_equal(status, True)
945 master, standbys = self.get_cluster_current_master_standbys()
946 assert_equal(len(standbys), (onos_instances-1))
947 onos_names_ips = self.get_cluster_container_names_ips()
948 master_onos_name = onos_names_ips[master]
949 self.igmp.setUp(controller=master)
950 groups = [self.igmp.random_mcast_ip()]
951 src_list = [self.igmp.randomsourceip()]
952 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
953 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
954 iface = self.V_INF1, delay = 2)
955 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
956 assert_equal(status,True)
957 log.info('Killing clusters master %s'%master)
958 Container(master_onos_name,Onos.IMAGE).kill()
959 count = 0
960 for i in range(60):
961 log.info('Verifying multicast data traffic after cluster master down')
962 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
963 if status:
964 break
965 else:
966 count += 1
967 time.sleep(1)
968 assert_equal(status, True)
969 log.info('Time taken to recover traffic after clusters master down is %d seconds'%count)
970
971
972 #pass
973 def test_cluster_state_with_igmp_leave_group_after_master_change(self, onos_instances=ONOS_INSTANCES):
974 status = self.verify_cluster_status(onos_instances=onos_instances)
975 assert_equal(status, True)
976 master, standbys = self.get_cluster_current_master_standbys()
977 assert_equal(len(standbys), (onos_instances-1))
978 self.igmp.setUp(controller=master)
979 groups = [self.igmp.random_mcast_ip()]
980 src_list = [self.igmp.randomsourceip()]
981 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
982 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
983 iface = self.V_INF1, delay = 2)
984 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
985 assert_equal(status,True)
986 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
987 self.change_cluster_current_master(new_master=standbys[0])
988 log.info('Verifying multicast traffic after cluster master change')
989 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
990 assert_equal(status,True)
991 log.info('Sending igmp TO_EXCLUDE message to leave the group %s'%groups[0])
992 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
993 iface = self.V_INF1, delay = 1)
994 time.sleep(10)
995 status = self.verify_igmp_data_traffic(groups[0],intf = self.V_INF1,source= src_list[0])
996 assert_equal(status,False)
997
998 #pass
999 def test_cluster_state_with_igmp_join_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
1000 status = self.verify_cluster_status(onos_instances=onos_instances)
1001 assert_equal(status, True)
1002 master,standbys = self.get_cluster_current_master_standbys()
1003 assert_equal(len(standbys), (onos_instances-1))
1004 self.igmp.setUp(controller=master)
1005 groups = [self.igmp.random_mcast_ip()]
1006 src_list = [self.igmp.randomsourceip()]
1007 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
1008 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1009 self.change_cluster_current_master(new_master = standbys[0])
1010 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1011 iface = self.V_INF1, delay = 2)
1012 time.sleep(1)
1013 self.change_cluster_current_master(new_master = master)
1014 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1015 assert_equal(status,True)
1016
1017 #pass
ChetanGaonker2099d722016-10-07 15:16:58 -07001018 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001019 def test_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001020 status = self.verify_cluster_status(onos_instances=onos_instances)
1021 assert_equal(status, True)
1022 master, standbys = self.get_cluster_current_master_standbys()
1023 assert_equal(len(standbys), (onos_instances-1))
1024 self.tls.setUp(controller=master)
1025 df = defer.Deferred()
1026 def eap_tls_verify(df):
1027 tls = TLSAuthTest()
1028 tls.runTest()
1029 df.callback(0)
1030 reactor.callLater(0, eap_tls_verify, df)
1031 return df
1032
1033 @deferred(120)
ChetanGaonker689b3862016-10-17 16:25:01 -07001034 def test_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001035 master, standbys = self.get_cluster_current_master_standbys()
1036 assert_equal(len(standbys), (onos_instances-1))
1037 self.tls.setUp()
1038 df = defer.Deferred()
1039 def eap_tls_verify2(df2):
1040 tls = TLSAuthTest()
1041 tls.runTest()
1042 df.callback(0)
1043 for i in [0,1]:
1044 if i == 1:
1045 log.info('Changing cluster master %s to %s'%(master, standbys[0]))
1046 self.change_master_current_cluster(new_master=standbys[0])
1047 log.info('Verifying tls authentication after cluster master changed to %s'%standbys[0])
1048 else:
1049 log.info('Verifying tls authentication before cluster master change')
1050 reactor.callLater(0, eap_tls_verify, df)
1051 return df
1052
1053 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001054 def test_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001055 status = self.verify_cluster_status(onos_instances=onos_instances)
1056 assert_equal(status, True)
1057 master, standbys = self.get_cluster_current_master_standbys()
1058 assert_equal(len(standbys), (onos_instances-1))
1059 onos_names_ips = self.get_cluster_container_names_ips()
1060 master_onos_name = onos_names_ips[master]
1061 self.tls.setUp()
1062 df = defer.Deferred()
1063 def eap_tls_verify(df):
1064 tls = TLSAuthTest()
1065 tls.runTest()
1066 df.callback(0)
1067 for i in [0,1]:
1068 if i == 1:
1069 log.info('Killing cluster current master %s'%master)
1070 cord_test_onos_shutdown(node = master_onos_name)
1071 time.sleep(20)
1072 status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True)
1073 assert_equal(status, True)
1074 log.info('Cluster came up with %d instances after killing master'%(onos_instances-1))
1075 log.info('Verifying tls authentication after killing cluster master')
1076 reactor.callLater(0, eap_tls_verify, df)
1077 return df
1078
1079 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001080 def test_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001081 status = self.verify_cluster_status(onos_instances=onos_instances)
1082 assert_equal(status, True)
1083 master, standbys = self.get_cluster_current_master_standbys()
1084 assert_equal(len(standbys), (onos_instances-1))
1085 onos_names_ips = self.get_cluster_container_names_ips()
1086 member_onos_name = onos_names_ips[standbys[0]]
1087 self.tls.setUp()
1088 df = defer.Deferred()
1089 def eap_tls_no_cert(df):
1090 def tls_no_cert_cb():
1091 log.info('TLS authentication failed with no certificate')
1092 tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
1093 tls.runTest()
1094 assert_equal(tls.failTest, True)
1095 df.callback(0)
1096 for i in [0,1]:
1097 if i == 1:
1098 log.info('Restart cluster member %s'%standbys[0])
1099 Container(member_onos_name,Onos.IMAGE).restart()
1100 time.sleep(20)
1101 status = self.verify_cluster_status(onos_instances=onos_instances)
1102 assert_equal(status, True)
1103 log.info('Cluster came up with %d instances after member restart'%(onos_instances))
1104 log.info('Verifying tls authentication after member restart')
1105 reactor.callLater(0, eap_tls_no_cert, df)
1106 return df
1107
ChetanGaonker689b3862016-10-17 16:25:01 -07001108 #pass
1109 def test_cluster_proxyarp_master_change_and_app_deactivation(self,onos_instances=ONOS_INSTANCES,hosts = 3):
1110 status = self.verify_cluster_status(onos_instances=onos_instances)
1111 assert_equal(status,True)
1112 master,standbys = self.get_cluster_current_master_standbys()
1113 assert_equal(len(standbys),(onos_instances-1))
1114 self.proxyarp.setUpClass()
1115 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1116 ingress = hosts+1
1117 for hostip, hostmac in hosts_config:
1118 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1119 time.sleep(1)
1120 log.info('changing cluster current master from %s to %s'%(master,standbys[0]))
1121 self.change_cluster_current_master(new_master=standbys[0])
1122 log.info('verifying proxyarp after master change')
1123 for hostip, hostmac in hosts_config:
1124 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1125 time.sleep(1)
1126 log.info('Deactivating proxyarp app and expecting proxyarp functionality not to work')
1127 self.proxyarp.proxyarp_activate(deactivate = True,controller=standbys[0])
1128 time.sleep(3)
1129 for hostip, hostmac in hosts_config:
1130 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
1131 time.sleep(1)
1132 log.info('activating proxyarp app and expecting to get arp reply from ONOS')
1133 self.proxyarp.proxyarp_activate(deactivate = False,controller=standbys[0])
1134 time.sleep(3)
1135 for hostip, hostmac in hosts_config:
1136 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1137 time.sleep(1)
ChetanGaonker2099d722016-10-07 15:16:58 -07001138
ChetanGaonker689b3862016-10-17 16:25:01 -07001139 #pass
1140 def test_cluster_with_proxyarp_and_one_member_down(self,hosts=3,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001141 status = self.verify_cluster_status(onos_instances=onos_instances)
1142 assert_equal(status, True)
1143 master, standbys = self.get_cluster_current_master_standbys()
ChetanGaonker689b3862016-10-17 16:25:01 -07001144 assert_equal(len(standbys), (onos_instances-1))
1145 onos_names_ips = self.get_cluster_container_names_ips()
1146 member_onos_name = onos_names_ips[standbys[1]]
1147 self.proxyarp.setUpClass()
1148 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1149 ingress = hosts+1
1150 for hostip, hostmac in hosts_config:
1151 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1152 time.sleep(1)
1153 log.info('killing cluster member %s'%standbys[1])
1154 Container(member_onos_name,Onos.IMAGE).kill()
1155 time.sleep(20)
1156 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=master,verify=True)
1157 assert_equal(status, True)
1158 log.info('cluster came up with %d instances after member down'%(onos_instances-1))
1159 log.info('verifying proxy arp functionality after cluster member down')
1160 for hostip, hostmac in hosts_config:
1161 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1162 time.sleep(1)
1163
1164 #pass
1165 def test_cluster_with_proxyarp_and_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts=10,onos_instances=ONOS_INSTANCES):
1166 status = self.verify_cluster_status(onos_instances=onos_instances)
1167 assert_equal(status, True)
1168 self.proxyarp.setUpClass()
1169 master, standbys = self.get_cluster_current_master_standbys()
1170 assert_equal(len(standbys), (onos_instances-1))
1171 ports_map, egress_map, hosts_config = self.proxyarp.proxyarp_config(hosts = hosts, controller=master)
1172 self.success = True
1173 ingress = hosts+1
1174 ports = range(ingress,ingress+10)
1175 hostmac = []
1176 hostip = []
1177 for ip,mac in hosts_config:
1178 hostmac.append(mac)
1179 hostip.append(ip)
1180 success_dir = {}
1181 def verify_proxyarp(*r):
1182 ingress, hostmac, hostip = r[0],r[1],r[2]
1183 def mac_recv_task():
1184 def recv_cb(pkt):
1185 log.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
1186 success_dir[current_thread().name] = True
1187 sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
1188 prn = recv_cb, iface = self.proxyarp.port_map[ingress])
1189 t = threading.Thread(target = mac_recv_task)
1190 t.start()
1191 pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
1192 log.info('Sending arp request for dest ip %s on interface %s' %
1193 (hostip,self.proxyarp.port_map[ingress]))
1194 sendp(pkt, count = 10,iface = self.proxyarp.port_map[ingress])
1195 t.join()
1196 t = []
1197 for i in range(10):
1198 t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
1199 for i in range(10):
1200 t[i].start()
1201 time.sleep(2)
1202 for i in range(10):
1203 t[i].join()
1204 if len(success_dir) != 10:
1205 self.success = False
1206 assert_equal(self.success, True)
1207
1208 #pass
1209 def test_cluster_with_acl_rule_before_master_change_and_remove_acl_rule_after_master_change(self,onos_instances=ONOS_INSTANCES):
1210 status = self.verify_cluster_status(onos_instances=onos_instances)
1211 assert_equal(status, True)
1212 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001213 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001214 self.acl.setUp()
1215 acl_rule = ACLTest()
1216 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1217 if status is False:
1218 log.info('JSON request returned status %d' %code)
1219 assert_equal(status, True)
1220 result = acl_rule.get_acl_rules(controller=master)
1221 aclRules1 = result.json()['aclRules']
1222 log.info('Added acl rules is %s'%aclRules1)
1223 acl_Id = map(lambda d: d['id'], aclRules1)
1224 log.info('Changing cluster current master from %s to %s'%(master,standbys[0]))
1225 self.change_cluster_current_master(new_master=standbys[0])
1226 status,code = acl_rule.remove_acl_rule(acl_Id[0],controller=standbys[0])
1227 if status is False:
1228 log.info('JSON request returned status %d' %code)
1229 assert_equal(status, True)
1230
1231 #pass
1232 def test_cluster_verifying_acl_rule_in_new_master_after_current_master_is_down(self,onos_instances=ONOS_INSTANCES):
1233 status = self.verify_cluster_status(onos_instances=onos_instances)
1234 assert_equal(status, True)
1235 master,standbys = self.get_cluster_current_master_standbys()
1236 assert_equal(len(standbys),(onos_instances-1))
1237 onos_names_ips = self.get_cluster_container_names_ips()
1238 master_onos_name = onos_names_ips[master]
1239 self.acl.setUp()
1240 acl_rule = ACLTest()
1241 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1242 if status is False:
1243 log.info('JSON request returned status %d' %code)
1244 assert_equal(status, True)
1245 result1 = acl_rule.get_acl_rules(controller=master)
1246 aclRules1 = result1.json()['aclRules']
1247 log.info('Added acl rules is %s'%aclRules1)
1248 acl_Id1 = map(lambda d: d['id'], aclRules1)
1249 log.info('Killing cluster current master %s'%master)
1250 Container(master_onos_name,Onos.IMAGE).kill()
1251 time.sleep(45)
1252 status = self.verify_cluster_status(onos_instances=onos_instances,controller=standbys[0])
1253 assert_equal(status, True)
1254 new_master,standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
1255 assert_equal(len(standbys),(onos_instances-2))
1256 assert_not_equal(new_master,master)
1257 result2 = acl_rule.get_acl_rules(controller=new_master)
1258 aclRules2 = result2.json()['aclRules']
1259 acl_Id2 = map(lambda d: d['id'], aclRules2)
1260 log.info('Acl Ids before and after master down are %s and %s'%(acl_Id1,acl_Id2))
1261 assert_equal(acl_Id2,acl_Id1)
1262
1263 #acl traffic scenario not working as acl rule is not getting added to onos
1264 def test_cluster_with_acl_traffic_before_and_after_two_members_down(self,onos_instances=ONOS_INSTANCES):
1265 status = self.verify_cluster_status(onos_instances=onos_instances)
1266 assert_equal(status, True)
1267 master,standbys = self.get_cluster_current_master_standbys()
1268 assert_equal(len(standbys),(onos_instances-1))
1269 onos_names_ips = self.get_cluster_container_names_ips()
1270 member1_onos_name = onos_names_ips[standbys[0]]
1271 member2_onos_name = onos_names_ips[standbys[1]]
1272 ingress = self.acl.ingress_iface
1273 egress = self.acl.CURRENT_PORT_NUM
1274 acl_rule = ACLTest()
1275 status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.acl.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.acl.HOST_DST_IP)
1276 self.acl.CURRENT_PORT_NUM += 1
1277 time.sleep(5)
1278 if status is False:
1279 log.info('JSON request returned status %d' %code)
1280 assert_equal(status, True)
1281 srcMac = '00:00:00:00:00:11'
1282 dstMac = host_ip_mac[0][1]
1283 self.acl.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1, egress_iface_num = egress )
1284 status, code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'deny',controller=master)
1285 time.sleep(10)
1286 if status is False:
1287 log.info('JSON request returned status %d' %code)
1288 assert_equal(status, True)
1289 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1290 log.info('killing cluster members %s and %s'%(standbys[0],standbys[1]))
1291 Container(member1_onos_name, Onos.IMAGE).kill()
1292 Container(member2_onos_name, Onos.IMAGE).kill()
1293 time.sleep(40)
1294 status = self.verify_cluster_status(onos_instances=onos_instances-2,verify=True,controller=master)
1295 assert_equal(status, True)
1296 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1297 self.acl.acl_hosts_remove(egress_iface_count = 1, egress_iface_num = egress)
1298
1299 #pass
1300 def test_cluster_with_dhcpRelay_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1301 status = self.verify_cluster_status(onos_instances=onos_instances)
1302 assert_equal(status, True)
1303 master,standbys = self.get_cluster_current_master_standbys()
1304 assert_equal(len(standbys),(onos_instances-1))
1305 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001306 mac = self.dhcprelay.get_mac(iface)
1307 self.dhcprelay.host_load(iface)
1308 ##we use the defaults for this test that serves as an example for others
1309 ##You don't need to restart dhcpd server if retaining default config
1310 config = self.dhcprelay.default_config
1311 options = self.dhcprelay.default_options
1312 subnet = self.dhcprelay.default_subnet_config
1313 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1314 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1315 config = config,
1316 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001317 subnet = subnet,
1318 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001319 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
1320 cip, sip = self.dhcprelay.send_recv(mac)
1321 log.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
1322 self.change_master_current_cluster(new_master=standbys[0])
1323 log.info('Releasing ip %s to server %s' %(cip, sip))
1324 assert_equal(self.dhcprelay.dhcp.release(cip), True)
1325 log.info('Triggering DHCP discover again after release')
1326 cip2, sip2 = self.dhcprelay.send_recv(mac)
1327 log.info('Verifying released IP was given back on rediscover')
1328 assert_equal(cip, cip2)
1329 log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
1330 assert_equal(self.dhcprelay.dhcp.release(cip2), True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001331 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001332
ChetanGaonker689b3862016-10-17 16:25:01 -07001333
1334 def test_cluster_with_dhcpRelay_and_verify_dhcp_ip_after_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1335 status = self.verify_cluster_status(onos_instances=onos_instances)
1336 assert_equal(status, True)
1337 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001338 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001339 onos_names_ips = self.get_cluster_container_names_ips()
1340 master_onos_name = onos_names_ips[master]
1341 self.dhcprelay.setUpClass(controller=master)
1342 mac = self.dhcprelay.get_mac(iface)
1343 self.dhcprelay.host_load(iface)
1344 ##we use the defaults for this test that serves as an example for others
1345 ##You don't need to restart dhcpd server if retaining default config
1346 config = self.dhcprelay.default_config
1347 options = self.dhcprelay.default_options
1348 subnet = self.dhcprelay.default_subnet_config
1349 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1350 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1351 config = config,
1352 options = options,
1353 subnet = subnet,
1354 controller=master)
1355 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1356 log.info('Initiating dhcp process from client %s'%mac)
1357 cip, sip = self.dhcprelay.send_recv(mac)
1358 log.info('Killing cluster current master %s'%master)
1359 Container(master_onos_name, Onos.IMAGE).kill()
1360 time.sleep(60)
1361 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=standbys[0])
1362 assert_equal(status, True)
1363 mac = self.dhcprelay.dhcp.get_mac(cip)[0]
1364 log.info("Verifying dhcp clients gets same IP after cluster master restarts")
1365 new_cip, new_sip = self.dhcprelay.dhcp.only_request(cip, mac)
1366 assert_equal(new_cip, cip)
1367 self.dhcprelay.tearDownClass(controller=standbys[0])
1368
1369 #pass
1370 def test_cluster_with_dhcpRelay_and_simulate_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1371 status = self.verify_cluster_status(onos_instances=onos_instances)
1372 assert_equal(status, True)
1373 master,standbys = self.get_cluster_current_master_standbys()
1374 assert_equal(len(standbys),(onos_instances-1))
1375 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001376 macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3']
1377 self.dhcprelay.host_load(iface)
1378 ##we use the defaults for this test that serves as an example for others
1379 ##You don't need to restart dhcpd server if retaining default config
1380 config = self.dhcprelay.default_config
1381 options = self.dhcprelay.default_options
1382 subnet = self.dhcprelay.default_subnet_config
1383 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1384 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1385 config = config,
1386 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001387 subnet = subnet,
1388 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001389 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1390 cip1, sip1 = self.dhcprelay.send_recv(macs[0])
1391 assert_not_equal(cip1,None)
1392 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip1,macs[0],master))
1393 log.info('Changing cluster master from %s to %s'%(master, standbys[0]))
1394 self.change_master_current_cluster(new_master=standbys[0])
1395 cip2, sip2 = self.dhcprelay.send_recv(macs[1])
1396 assert_not_equal(cip2,None)
1397 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[1],standbys[0]))
1398 self.change_master_current_cluster(new_master=master)
1399 log.info('Changing cluster master from %s to %s'%(standbys[0],master))
1400 cip3, sip3 = self.dhcprelay.send_recv(macs[2])
1401 assert_not_equal(cip3,None)
1402 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master))
ChetanGaonker689b3862016-10-17 16:25:01 -07001403 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001404
ChetanGaonker689b3862016-10-17 16:25:01 -07001405 def test_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001406 status = self.verify_cluster_status(onos_instances=onos_instances)
1407 assert_equal(status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001408 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001409 self.subscriber.num_subscribers = 5
1410 self.subscriber.num_channels = 10
1411 for i in [0,1]:
1412 if i == 1:
1413 cord_test_onos_restart()
1414 time.sleep(45)
1415 status = self.verify_cluster_status(onos_instances=onos_instances)
1416 assert_equal(status, True)
1417 log.info('Verifying cord subscriber functionality after cluster restart')
1418 else:
1419 log.info('Verifying cord subscriber functionality before cluster restart')
1420 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1421 num_channels = self.subscriber.num_channels,
1422 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1423 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1424 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1425 self.subscriber.num_channels))
1426 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001427 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001428
ChetanGaonker689b3862016-10-17 16:25:01 -07001429 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1430 def test_cluster_with_cord_subscriber_join_next_channel_before_and_after_cluster_mastership_is_withdrawn(self,onos_instances=ONOS_INSTANCES):
1431 status = self.verify_cluster_status(onos_instances=onos_instances)
1432 assert_equal(status, True)
1433 master,standbys = self.get_cluster_current_master_standbys()
1434 assert_equal(len(standbys),(onos_instances-1))
1435 self.subscriber.setUpClass(controller=master)
1436 self.subscriber.num_subscribers = 5
1437 self.subscriber.num_channels = 10
1438 for i in [0,1]:
1439 if i == 1:
1440 status=self.withdraw_cluster_current_mastership(master_ip=master)
1441 asser_equal(status, True)
1442 master,standbys = self.get_cluster_current_master_standbys()
1443 log.info('verifying cord subscriber functionality after cluster current master withdraw mastership')
1444 else:
1445 log.info('verifying cord subscriber functionality before cluster master withdraw mastership')
1446 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1447 num_channels = self.subscriber.num_channels,
1448 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1449 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1450 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1451 self.subscriber.num_channels),controller=master)
1452 assert_equal(test_status, True)
1453 self.subscriber.tearDownClass(controller=master)
1454
1455 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1456 def test_cluster_with_cord_subscriber_join_recv_traffic_from_10channels_and_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001457 status = self.verify_cluster_status(onos_instances=onos_instances)
1458 assert_equal(status, True)
1459 master, standbys = self.get_cluster_current_master_standbys()
1460 assert_equal(len(standbys),(onos_instances-1))
1461 onos_names_ips = self.get_cluster_container_names_ips()
1462 member_onos_name = onos_names_ips[standbys[0]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001463 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001464 num_subscribers = 1
1465 num_channels = 10
1466 for i in [0,1]:
1467 if i == 1:
1468 cord_test_onos_shutdown(node = member_onos_name)
1469 time.sleep(30)
ChetanGaonker689b3862016-10-17 16:25:01 -07001470 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001471 assert_equal(status, True)
1472 log.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0])
1473 else:
1474 log.info('Verifying cord subscriber functionality before cluster member %s is down'%standbys[0])
1475 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1476 num_channels = num_channels,
1477 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify,
1478 self.subscriber.igmp_verify, self.subscriber.traffic_verify),
1479 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
ChetanGaonker689b3862016-10-17 16:25:01 -07001480 negative_subscriber_auth = 'all',controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001481 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001482 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001483
ChetanGaonker689b3862016-10-17 16:25:01 -07001484 def test_cluster_with_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001485 status = self.verify_cluster_status(onos_instances=onos_instances)
1486 assert_equal(status, True)
1487 master, standbys = self.get_cluster_current_master_standbys()
1488 assert_equal(len(standbys),(onos_instances-1))
1489 onos_names_ips = self.get_cluster_container_names_ips()
1490 member1_onos_name = onos_names_ips[standbys[0]]
1491 member2_onos_name = onos_names_ips[standbys[1]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001492 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001493 num_subscribers = 1
1494 num_channels = 10
1495 for i in [0,1]:
1496 if i == 1:
1497 cord_test_onos_shutdown(node = member1_onos_name)
1498 cord_test_onos_shutdown(node = member2_onos_name)
1499 time.sleep(60)
1500 status = self.verify_cluster_status(onos_instances=onos_instances-2)
1501 assert_equal(status, True)
1502 log.info('Verifying cord subscriber funtionality after cluster two members %s and %s down'%(standbys[0],standbys[1]))
1503 else:
1504 log.info('Verifying cord subscriber funtionality before cluster two members %s and %s down'%(standbys[0],standbys[1]))
1505 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1506 num_channels = num_channels,
1507 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1508 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1509 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
1510 negative_subscriber_auth = 'all')
1511 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001512 self.subscriber.tearDownClass(controller=master)
1513
1514 #pass
1515 def test_cluster_with_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1516 status = self.verify_cluster_status(onos_instances=onos_instances)
1517 assert_equal(status, True)
1518 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1519 for device in device_dict.keys():
1520 log.info("Device is %s"%device_dict[device])
1521 assert_not_equal(device_dict[device]['master'],'none')
1522 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1523 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1524
1525 #pass
1526 def test_cluster_state_in_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1527 status = self.verify_cluster_status(onos_instances=onos_instances)
1528 assert_equal(status, True)
1529 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1530 cluster_ips = self.get_cluster_current_member_ips()
1531 for ip in cluster_ips:
1532 device_dict= self.get_cluster_current_master_standbys_of_connected_devices(controller = ip)
1533 assert_equal(len(device_dict.keys()),onos_instances)
1534 for device in device_dict.keys():
1535 log.info("Device is %s"%device_dict[device])
1536 assert_not_equal(device_dict[device]['master'],'none')
1537 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1538 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1539
1540 #pass
1541 def test_cluster_verifying_multiple_ovs_switches_after_master_is_restarted(self,onos_instances = ONOS_INSTANCES):
1542 status = self.verify_cluster_status(onos_instances=onos_instances)
1543 assert_equal(status, True)
1544 onos_names_ips = self.get_cluster_container_names_ips()
1545 master_count = self.get_number_of_devices_of_master()
1546 log.info('Master count information is %s'%master_count)
1547 total_devices = 0
1548 for master in master_count.keys():
1549 total_devices += master_count[master]['size']
1550 if master_count[master]['size'] != 0:
1551 restart_ip = master
1552 assert_equal(total_devices,onos_instances)
1553 member_onos_name = onos_names_ips[restart_ip]
1554 log.info('Restarting cluster member %s having ip %s'%(member_onos_name,restart_ip))
1555 Container(member_onos_name, Onos.IMAGE).restart()
1556 time.sleep(40)
1557 master_count = self.get_number_of_devices_of_master()
1558 log.info('Master count information after restart is %s'%master_count)
1559 total_devices = 0
1560 for master in master_count.keys():
1561 total_devices += master_count[master]['size']
1562 if master == restart_ip:
1563 assert_equal(master_count[master]['size'], 0)
1564 assert_equal(total_devices,onos_instances)
1565
1566 #pass
1567 def test_cluster_verifying_multiple_ovs_switches_with_one_master_down(self,onos_instances = ONOS_INSTANCES):
1568 status = self.verify_cluster_status(onos_instances=onos_instances)
1569 assert_equal(status, True)
1570 onos_names_ips = self.get_cluster_container_names_ips()
1571 master_count = self.get_number_of_devices_of_master()
1572 log.info('Master count information is %s'%master_count)
1573 total_devices = 0
1574 for master in master_count.keys():
1575 total_devices += master_count[master]['size']
1576 if master_count[master]['size'] != 0:
1577 restart_ip = master
1578 assert_equal(total_devices,onos_instances)
1579 master_onos_name = onos_names_ips[restart_ip]
1580 log.info('Shutting down cluster member %s having ip %s'%(master_onos_name,restart_ip))
1581 Container(master_onos_name, Onos.IMAGE).kill()
1582 time.sleep(40)
1583 for ip in onos_names_ips.keys():
1584 if ip != restart_ip:
1585 controller_ip = ip
1586 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=controller_ip)
1587 assert_equal(status, True)
1588 master_count = self.get_number_of_devices_of_master(controller=controller_ip)
1589 log.info('Master count information after restart is %s'%master_count)
1590 total_devices = 0
1591 for master in master_count.keys():
1592 total_devices += master_count[master]['size']
1593 if master == restart_ip:
1594 assert_equal(master_count[master]['size'], 0)
1595 assert_equal(total_devices,onos_instances)
1596
1597 #pass
1598 def test_cluster_verifying_multiple_ovs_switches_with_current_master_withdrawing_mastership(self,onos_instances = ONOS_INSTANCES):
1599 status = self.verify_cluster_status(onos_instances=onos_instances)
1600 assert_equal(status, True)
1601 master_count = self.get_number_of_devices_of_master()
1602 log.info('Master count information is %s'%master_count)
1603 total_devices = 0
1604 for master in master_count.keys():
1605 total_devices += int(master_count[master]['size'])
1606 if master_count[master]['size'] != 0:
1607 master_ip = master
1608 log.info('Devices of master %s are %s'%(master_count[master]['devices'],master))
1609 device_id = str(master_count[master]['devices'][0])
1610 device_count = master_count[master]['size']
1611 assert_equal(total_devices,onos_instances)
1612 log.info('Withdrawing mastership of device %s for controller %s'%(device_id,master_ip))
1613 status=self.withdraw_cluster_current_mastership(master_ip=master_ip,device_id = device_id)
1614 assert_equal(status, True)
1615 master_count = self.get_number_of_devices_of_master()
1616 log.info('Master count information after cluster mastership withdraw is %s'%master_count)
1617 total_devices = 0
1618 for master in master_count.keys():
1619 total_devices += int(master_count[master]['size'])
1620 if master == master_ip:
1621 assert_equal(master_count[master]['size'], device_count-1)
1622 assert_equal(total_devices,onos_instances)
1623
1624 #pass
1625 def test_cluster_verifying_multiple_ovs_switches_and_restarting_cluster(self,onos_instances = ONOS_INSTANCES):
1626 status = self.verify_cluster_status(onos_instances=onos_instances)
1627 assert_equal(status, True)
1628 master_count = self.get_number_of_devices_of_master()
1629 log.info('Master count information is %s'%master_count)
1630 total_devices = 0
1631 for master in master_count.keys():
1632 total_devices += master_count[master]['size']
1633 assert_equal(total_devices,onos_instances)
1634 log.info('Restarting cluster')
1635 cord_test_onos_restart()
1636 time.sleep(60)
1637 master_count = self.get_number_of_devices_of_master()
1638 log.info('Master count information after restart is %s'%master_count)
1639 total_devices = 0
1640 for master in master_count.keys():
1641 total_devices += master_count[master]['size']
1642 assert_equal(total_devices,onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -07001643