blob: b4c5f16d95bafcec566f5ed69858394629e5602a [file] [log] [blame]
ChetanGaonker2099d722016-10-07 15:16:58 -07001#copyright 2016-present Ciena Corporation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14#
15import unittest
16from nose.tools import *
17from scapy.all import *
18from OnosCtrl import OnosCtrl, get_mac
19from OltConfig import OltConfig
20from socket import socket
21from OnosFlowCtrl import OnosFlowCtrl
22from nose.twistedtools import reactor, deferred
23from twisted.internet import defer
24from onosclidriver import OnosCliDriver
25from CordContainer import Container, Onos, Quagga
26from CordTestServer import cord_test_onos_restart, cord_test_onos_shutdown, cord_test_onos_add_cluster, cord_test_quagga_restart
27from portmaps import g_subscriber_port_map
28from scapy.all import *
29import time, monotonic
30import threading
31from threading import current_thread
32from Cluster import *
33from EapTLS import TLSAuthTest
34from ACL import ACLTest
A R Karthick1f908202016-11-16 17:32:20 -080035from OnosLog import OnosLog
36from CordLogger import CordLogger
ChetanGaonker2099d722016-10-07 15:16:58 -070037import os
38import json
39import random
40import collections
41log.setLevel('INFO')
42
A R Karthick1f908202016-11-16 17:32:20 -080043class cluster_exchange(CordLogger):
ChetanGaonker2099d722016-10-07 15:16:58 -070044 test_path = os.path.dirname(os.path.realpath(__file__))
45 onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
46 mac = RandMAC()._fix()
47 flows_eth = Ether(src = RandMAC()._fix(), dst = RandMAC()._fix())
48 igmp_eth = Ether(dst = '01:00:5e:00:00:16', type = ETH_P_IP)
49 igmp_ip = IP(dst = '224.0.0.22')
50 ONOS_INSTANCES = 3
51 V_INF1 = 'veth0'
52 TLS_TIMEOUT = 100
53 device_id = 'of:' + get_mac()
54 igmp = cluster_igmp()
55 igmp_groups = igmp.mcast_ip_range(start_ip = '224.1.8.10',end_ip = '224.1.10.49')
56 igmp_sources = igmp.source_ip_range(start_ip = '38.24.29.35',end_ip='38.24.35.56')
57 tls = cluster_tls()
58 flows = cluster_flows()
59 proxyarp = cluster_proxyarp()
60 vrouter = cluster_vrouter()
61 acl = cluster_acl()
62 dhcprelay = cluster_dhcprelay()
63 subscriber = cluster_subscriber()
A R Karthick1f908202016-11-16 17:32:20 -080064 testcaseLoggers = ('test_cluster_controller_kills',)
65
66 def setUp(self):
67 if self._testMethodName not in self.testcaseLoggers:
68 super(cluster_exchange, self).setUp()
69
70 def tearDown(self):
71 if self._testMethodName not in self.testcaseLoggers:
72 super(cluster_exchange, self).tearDown()
ChetanGaonker2099d722016-10-07 15:16:58 -070073
74 def get_controller(self):
75 controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
76 controller = controller.split(',')[0]
77 return controller
78
A R Karthick1f908202016-11-16 17:32:20 -080079 @classmethod
80 def get_controllers(cls):
81 controllers = os.getenv('ONOS_CONTROLLER_IP') or ''
82 return controllers.split(',')
83
ChetanGaonker2099d722016-10-07 15:16:58 -070084 def cliEnter(self,controller = None):
85 retries = 0
86 while retries < 3:
87 self.cli = OnosCliDriver(controller = controller,connect = True)
88 if self.cli.handle:
89 break
90 else:
91 retries += 1
92 time.sleep(2)
93
94 def cliExit(self):
95 self.cli.disconnect()
96
A R Karthick1f908202016-11-16 17:32:20 -080097 def get_leader(self, controller = None):
98 self.cliEnter(controller = controller)
99 result = json.loads(self.cli.leaders(jsonFormat = True))
100 if result is None:
101 log.info('Leaders command failure for controller %s' %controller)
102 else:
103 log.info('Leaders returned: %s' %result)
104 self.cliExit()
105 return result
106
107 def get_leaders(self, controller = None):
108 result = []
109 if type(controller) in [ list, tuple ]:
110 for c in controller:
111 leaders = self.get_leader(controller = c)
112 result.append(leaders)
113 else:
114 leaders = self.get_leader(controller = controller)
115 result.append(leaders)
116 return result
117
ChetanGaonker2099d722016-10-07 15:16:58 -0700118 def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
119 tries = 0
120 try:
121 self.cliEnter(controller = controller)
122 while tries <= 10:
123 cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
124 if cluster_summary:
125 log.info("cluster 'summary' command output is %s"%cluster_summary)
126 nodes = cluster_summary['nodes']
127 if verify:
128 if nodes == onos_instances:
129 self.cliExit()
130 return True
131 else:
132 tries += 1
133 time.sleep(1)
134 else:
135 if nodes >= onos_instances:
136 self.cliExit()
137 return True
138 else:
139 tries += 1
140 time.sleep(1)
141 else:
142 tries += 1
143 time.sleep(1)
144 self.cliExit()
145 return False
146 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700147 raise Exception('Failed to get cluster members')
148 return False
ChetanGaonker2099d722016-10-07 15:16:58 -0700149
150 def get_cluster_current_member_ips(self,controller = None):
151 tries = 0
152 cluster_ips = []
153 try:
154 self.cliEnter(controller = controller)
155 while tries <= 10:
156 cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
157 if cluster_nodes:
158 log.info("cluster 'nodes' output is %s"%cluster_nodes)
159 cluster_ips = map(lambda c: c['id'], cluster_nodes)
160 self.cliExit()
161 cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
162 return cluster_ips
163 else:
164 tries += 1
165 self.cliExit()
166 return cluster_ips
167 except:
168 raise Exception('Failed to get cluster members')
169 return cluster_ips
170
ChetanGaonker689b3862016-10-17 16:25:01 -0700171 def get_cluster_container_names_ips(self,controller=None):
A R Karthick1f908202016-11-16 17:32:20 -0800172 onos_names_ips = {}
173 onos_ips = self.get_cluster_current_member_ips(controller=controller)
174 onos_names_ips[onos_ips[0]] = Onos.NAME
175 onos_names_ips[Onos.NAME] = onos_ips[0]
176 for i in range(1,len(onos_ips)):
177 name = '{0}-{1}'.format(Onos.NAME,i+1)
178 onos_names_ips[onos_ips[i]] = name
179 onos_names_ips[name] = onos_ips[i]
ChetanGaonker2099d722016-10-07 15:16:58 -0700180
181 return onos_names_ips
182
183 #identifying current master of a connected device, not tested
184 def get_cluster_current_master_standbys(self,controller=None,device_id=device_id):
185 master = None
186 standbys = []
187 tries = 0
188 try:
189 cli = self.cliEnter(controller = controller)
190 while tries <= 10:
191 roles = json.loads(self.cli.roles(jsonFormat = True))
192 log.info("cluster 'roles' command output is %s"%roles)
193 if roles:
194 for device in roles:
195 log.info('Verifying device info in line %s'%device)
196 if device['id'] == device_id:
197 master = str(device['master'])
198 standbys = map(lambda d: str(d), device['standbys'])
199 log.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
200 self.cliExit()
201 return master, standbys
202 self.cliExit()
203 return master, standbys
204 else:
205 tries += 1
ChetanGaonker689b3862016-10-17 16:25:01 -0700206 time.sleep(1)
207 self.cliExit()
208 return master,standbys
209 except:
210 raise Exception('Failed to get cluster members')
211 return master,standbys
212
213 def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
214 ''' returns master and standbys of all the connected devices to ONOS cluster instance'''
215 device_dict = {}
216 tries = 0
217 try:
218 cli = self.cliEnter(controller = controller)
219 while tries <= 10:
220 device_dict = {}
221 roles = json.loads(self.cli.roles(jsonFormat = True))
222 log.info("cluster 'roles' command output is %s"%roles)
223 if roles:
224 for device in roles:
225 device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
226 for i in range(len(device_dict[device['id']]['standbys'])):
227 device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
228 log.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
229 self.cliExit()
230 return device_dict
231 else:
232 tries += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700233 time.sleep(1)
234 self.cliExit()
ChetanGaonker689b3862016-10-17 16:25:01 -0700235 return device_dict
236 except:
237 raise Exception('Failed to get cluster members')
238 return device_dict
239
240 #identify current master of a connected device, not tested
241 def get_cluster_connected_devices(self,controller=None):
242 '''returns all the devices connected to ONOS cluster'''
243 device_list = []
244 tries = 0
245 try:
246 cli = self.cliEnter(controller = controller)
247 while tries <= 10:
248 device_list = []
249 devices = json.loads(self.cli.devices(jsonFormat = True))
250 log.info("cluster 'devices' command output is %s"%devices)
251 if devices:
252 for device in devices:
253 log.info('device id is %s'%device['id'])
254 device_list.append(str(device['id']))
255 self.cliExit()
256 return device_list
257 else:
258 tries += 1
259 time.sleep(1)
260 self.cliExit()
261 return device_list
262 except:
263 raise Exception('Failed to get cluster members')
264 return device_list
265
266 def get_number_of_devices_of_master(self,controller=None):
267 '''returns master-device pairs, which master having what devices'''
268 master_count = {}
269 try:
270 cli = self.cliEnter(controller = controller)
271 masters = json.loads(self.cli.masters(jsonFormat = True))
272 if masters:
273 for master in masters:
274 master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
275 return master_count
276 else:
277 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700278 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700279 raise Exception('Failed to get cluster members')
280 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700281
282 def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None):
283 if new_master is None: return False
ChetanGaonker689b3862016-10-17 16:25:01 -0700284 self.cliEnter(controller=controller)
ChetanGaonker2099d722016-10-07 15:16:58 -0700285 cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
286 command = self.cli.command(cmd = cmd, jsonFormat = False)
287 self.cliExit()
288 time.sleep(60)
289 master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
290 assert_equal(master,new_master)
291 log.info('Cluster master changed to %s successfully'%new_master)
292
ChetanGaonker689b3862016-10-17 16:25:01 -0700293 def withdraw_cluster_current_mastership(self,master_ip=None,device_id=device_id,controller=None):
294 '''current master looses its mastership and hence new master will be elected'''
295 self.cliEnter(controller=controller)
296 cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
297 command = self.cli.command(cmd = cmd, jsonFormat = False)
298 self.cliExit()
299 time.sleep(60)
300 new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
301 assert_not_equal(new_master_ip,master_ip)
302 log.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
303 log.info('Cluster new master is %s'%new_master_ip)
304 return True
305
A R Karthick1f908202016-11-16 17:32:20 -0800306 def test_cluster_controller_kills(self):
307 '''Test the cluster by repeatedly killing the controllers'''
308 controllers = self.get_controllers()
309 ctlr_len = len(controllers)
310 if ctlr_len <= 1:
311 log.info('ONOS is not running in cluster mode. This test only works for cluster mode')
312 assert_greater(ctlr_len, 1)
313
314 #this call would verify the cluster for once
315 onos_map = self.get_cluster_container_names_ips()
316
317 def check_storage_exception(controller = None):
318 adjacent_controller = None
319 adjacent_controllers = None
320 if controller:
321 adjacent_controllers = set(controllers) - set( [controller] )
322 adjacent_controller = next(iter(adjacent_controllers))
323 for node in controllers:
324 onosLog = OnosLog(host = node)
325 ##check the logs for storage exception
326 _, output = onosLog.get_log(('ERROR', 'Exception',))
327 if output and output.find('StorageException') >= 0:
328 log.info('Storage Exception found on node: %s' %node)
329 log.info('%s' %output)
330 assert_equal('Storage Exception on node {}'.format(node), False)
331 return controller
332
333 try:
334 ips = self.get_cluster_current_member_ips(controller = controller)
335 print('ONOS cluster formed with controllers: %s' %ips)
336 st = True
337 except:
338 st = False
339
340 leaders = self.get_leaders(controllers)
341 failed = filter(lambda l: l == None, leaders)
342 assert_equal(len(failed), 0)
343
344 if st is False:
345 log.info('No storage exception and ONOS cluster was not formed successfully')
346 else:
347 controller = None
348
349 return controller
350
351 next_controller = None
352 tries = 10
353 for num in range(tries):
354 index = num % ctlr_len
355 #index = random.randrange(0, ctlr_len)
356 controller = onos_map[controllers[index]] if next_controller is None else next_controller
357 log.info('Restarting Controller %s' %controller)
358 try:
359 cord_test_onos_restart(node = controller)
360 time.sleep(30)
361 except:
362 time.sleep(5)
363 continue
364 next_controller = check_storage_exception(controller = controller)
365
ChetanGaonker2099d722016-10-07 15:16:58 -0700366 #pass
ChetanGaonker689b3862016-10-17 16:25:01 -0700367 def test_cluster_formation_and_verification(self,onos_instances = ONOS_INSTANCES):
368 status = self.verify_cluster_status(onos_instances = onos_instances)
369 assert_equal(status, True)
370 log.info('Cluster exists with %d ONOS instances'%onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -0700371
372 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700373 def test_cluster_adding_members(self, add = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700374 status = self.verify_cluster_status(onos_instances = onos_instances)
375 assert_equal(status, True)
376 onos_ips = self.get_cluster_current_member_ips()
377 onos_instances = len(onos_ips)+add
378 log.info('Adding %d nodes to the ONOS cluster' %add)
379 cord_test_onos_add_cluster(count = add)
380 status = self.verify_cluster_status(onos_instances=onos_instances)
381 assert_equal(status, True)
382
ChetanGaonker689b3862016-10-17 16:25:01 -0700383 def test_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700384 status = self.verify_cluster_status(onos_instances = onos_instances)
385 assert_equal(status, True)
386 master, standbys = self.get_cluster_current_master_standbys()
387 assert_equal(len(standbys),(onos_instances-1))
388 onos_names_ips = self.get_cluster_container_names_ips()
389 master_onos_name = onos_names_ips[master]
390 log.info('Removing cluster current master %s'%(master))
391 cord_test_onos_shutdown(node = master_onos_name)
392 time.sleep(60)
393 onos_instances -= 1
394 status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0])
395 assert_equal(status, True)
396 new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
397 assert_not_equal(master,new_master)
ChetanGaonker689b3862016-10-17 16:25:01 -0700398 log.info('Successfully removed clusters master instance')
ChetanGaonker2099d722016-10-07 15:16:58 -0700399
ChetanGaonker689b3862016-10-17 16:25:01 -0700400 def test_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700401 status = self.verify_cluster_status(onos_instances = onos_instances)
402 assert_equal(status, True)
403 master, standbys = self.get_cluster_current_master_standbys()
404 assert_equal(len(standbys),(onos_instances-1))
405 onos_names_ips = self.get_cluster_container_names_ips()
406 member_onos_name = onos_names_ips[standbys[0]]
407 log.info('Removing cluster member %s'%standbys[0])
408 cord_test_onos_shutdown(node = member_onos_name)
409 time.sleep(60)
410 onos_instances -= 1
411 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
412 assert_equal(status, True)
413
ChetanGaonker689b3862016-10-17 16:25:01 -0700414 def test_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700415 status = self.verify_cluster_status(onos_instances = onos_instances)
416 assert_equal(status, True)
417 master, standbys = self.get_cluster_current_master_standbys()
418 assert_equal(len(standbys),(onos_instances-1))
419 onos_names_ips = self.get_cluster_container_names_ips()
420 member1_onos_name = onos_names_ips[standbys[0]]
421 member2_onos_name = onos_names_ips[standbys[1]]
422 log.info('Removing cluster member %s'%standbys[0])
423 cord_test_onos_shutdown(node = member1_onos_name)
424 log.info('Removing cluster member %s'%standbys[1])
425 cord_test_onos_shutdown(node = member2_onos_name)
426 time.sleep(60)
427 onos_instances = onos_instances - 2
428 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
429 assert_equal(status, True)
430
ChetanGaonker689b3862016-10-17 16:25:01 -0700431 def test_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700432 status = self.verify_cluster_status(onos_instances = onos_instances)
433 assert_equal(status, True)
434 master, standbys = self.get_cluster_current_master_standbys()
435 assert_equal(len(standbys),(onos_instances-1))
436 onos_names_ips = self.get_cluster_container_names_ips()
437 for i in range(remove):
438 member_onos_name = onos_names_ips[standbys[i]]
439 log.info('Removing onos container with name %s'%standbys[i])
440 cord_test_onos_shutdown(node = member_onos_name)
441 time.sleep(60)
442 onos_instances = onos_instances - remove
443 status = self.verify_cluster_status(onos_instances = onos_instances, controller=master)
444 assert_equal(status, True)
445
446 #nottest test cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700447 def test_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES , add = 2, remove = 2):
ChetanGaonker2099d722016-10-07 15:16:58 -0700448 status = self.verify_cluster_status(onos_instances = onos_instances)
449 assert_equal(status, True)
450 onos_ips = self.get_cluster_current_member_ips()
451 onos_instances = len(onos_ips)+add
452 log.info('Adding %d ONOS instances to the cluster'%add)
453 cord_test_onos_add_cluster(count = add)
454 status = self.verify_cluster_status(onos_instances=onos_instances)
455 assert_equal(status, True)
456 log.info('Removing %d ONOS instances from the cluster'%remove)
457 for i in range(remove):
458 name = '{}-{}'.format(Onos.NAME, onos_instances - i)
459 log.info('Removing onos container with name %s'%name)
460 cord_test_onos_shutdown(node = name)
461 time.sleep(60)
462 onos_instances = onos_instances-remove
463 status = self.verify_cluster_status(onos_instances=onos_instances)
464 assert_equal(status, True)
465
466 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700467 def test_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
ChetanGaonker2099d722016-10-07 15:16:58 -0700468 status = self.verify_cluster_status(onos_instances = onos_instances)
469 assert_equal(status, True)
470 onos_ips = self.get_cluster_current_member_ips()
471 onos_instances = onos_instances-remove
472 log.info('Removing %d ONOS instances from the cluster'%remove)
473 for i in range(remove):
474 name = '{}-{}'.format(Onos.NAME, len(onos_ips)-i)
475 log.info('Removing onos container with name %s'%name)
476 cord_test_onos_shutdown(node = name)
477 time.sleep(60)
478 status = self.verify_cluster_status(onos_instances=onos_instances)
479 assert_equal(status, True)
480 log.info('Adding %d ONOS instances to the cluster'%add)
481 cord_test_onos_add_cluster(count = add)
482 onos_instances = onos_instances+add
483 status = self.verify_cluster_status(onos_instances=onos_instances)
484 assert_equal(status, True)
485
ChetanGaonker689b3862016-10-17 16:25:01 -0700486 def test_cluster_restart(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700487 status = self.verify_cluster_status(onos_instances = onos_instances)
488 assert_equal(status, True)
489 log.info('Restarting cluster')
490 cord_test_onos_restart()
491 status = self.verify_cluster_status(onos_instances = onos_instances)
492 assert_equal(status, True)
493
ChetanGaonker689b3862016-10-17 16:25:01 -0700494 def test_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700495 status = self.verify_cluster_status(onos_instances = onos_instances)
496 assert_equal(status, True)
497 master, standbys = self.get_cluster_current_master_standbys()
498 onos_names_ips = self.get_cluster_container_names_ips()
499 master_onos_name = onos_names_ips[master]
500 log.info('Restarting cluster master %s'%master)
501 cord_test_onos_restart(node = master_onos_name)
502 status = self.verify_cluster_status(onos_instances = onos_instances)
503 assert_equal(status, True)
504 log.info('Cluster came up after master restart as expected')
505
506 #test fail. master changing after restart. Need to check correct behavior.
ChetanGaonker689b3862016-10-17 16:25:01 -0700507 def test_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700508 status = self.verify_cluster_status(onos_instances = onos_instances)
509 assert_equal(status, True)
510 master1, standbys = self.get_cluster_current_master_standbys()
511 onos_names_ips = self.get_cluster_container_names_ips()
512 master_onos_name = onos_names_ips[master1]
513 log.info('Restarting cluster master %s'%master)
514 cord_test_onos_restart(node = master_onos_name)
515 status = self.verify_cluster_status(onos_instances = onos_instances)
516 assert_equal(status, True)
517 master2, standbys = self.get_cluster_current_master_standbys()
518 assert_equal(master1,master2)
519 log.info('Cluster master is same before and after cluster master restart as expected')
520
ChetanGaonker689b3862016-10-17 16:25:01 -0700521 def test_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700522 status = self.verify_cluster_status(onos_instances = onos_instances)
523 assert_equal(status, True)
524 master, standbys = self.get_cluster_current_master_standbys()
525 assert_equal(len(standbys),(onos_instances-1))
526 onos_names_ips = self.get_cluster_container_names_ips()
527 member_onos_name = onos_names_ips[standbys[0]]
528 log.info('Restarting cluster member %s'%standbys[0])
529 cord_test_onos_restart(node = member_onos_name)
530 status = self.verify_cluster_status(onos_instances = onos_instances)
531 assert_equal(status, True)
532 log.info('Cluster came up as expected after restarting one member')
533
ChetanGaonker689b3862016-10-17 16:25:01 -0700534 def test_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700535 status = self.verify_cluster_status(onos_instances = onos_instances)
536 assert_equal(status, True)
537 master, standbys = self.get_cluster_current_master_standbys()
538 assert_equal(len(standbys),(onos_instances-1))
539 onos_names_ips = self.get_cluster_container_names_ips()
540 member1_onos_name = onos_names_ips[standbys[0]]
541 member2_onos_name = onos_names_ips[standbys[1]]
542 log.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1]))
543 cord_test_onos_restart(node = member1_onos_name)
544 cord_test_onos_restart(node = member2_onos_name)
545 status = self.verify_cluster_status(onos_instances = onos_instances)
546 assert_equal(status, True)
547 log.info('Cluster came up as expected after restarting two members')
548
ChetanGaonker689b3862016-10-17 16:25:01 -0700549 def test_cluster_state_with_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700550 status = self.verify_cluster_status(onos_instances = onos_instances)
551 assert_equal(status,True)
552 master, standbys = self.get_cluster_current_master_standbys()
553 assert_equal(len(standbys),(onos_instances-1))
554 onos_names_ips = self.get_cluster_container_names_ips()
555 for i in range(members):
556 member_onos_name = onos_names_ips[standbys[i]]
557 log.info('Restarting cluster member %s'%standbys[i])
558 cord_test_onos_restart(node = member_onos_name)
559
560 status = self.verify_cluster_status(onos_instances = onos_instances)
561 assert_equal(status, True)
562 log.info('Cluster came up as expected after restarting %d members'%members)
563
ChetanGaonker689b3862016-10-17 16:25:01 -0700564 def test_cluster_state_with_master_change(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700565 status = self.verify_cluster_status(onos_instances=onos_instances)
566 assert_equal(status, True)
567 master, standbys = self.get_cluster_current_master_standbys()
568 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -0700569 log.info('Cluster current master of devices is %s'%master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700570 self.change_master_current_cluster(new_master=standbys[0])
571 log.info('Cluster master changed successfully')
572
573 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700574 def test_cluster_with_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700575 status = self.verify_cluster_status(onos_instances = onos_instances)
576 assert_equal(status, True)
577 onos_ips = self.get_cluster_current_member_ips()
578 self.vrouter.setUpClass()
579 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
580 assert_equal(res, True)
581 for onos_ip in onos_ips:
582 tries = 0
583 flag = False
584 try:
585 self.cliEnter(controller = onos_ip)
586 while tries <= 5:
587 routes = json.loads(self.cli.routes(jsonFormat = True))
588 if routes:
589 assert_equal(len(routes['routes4']), networks)
590 self.cliExit()
591 flag = True
592 break
593 else:
594 tries += 1
595 time.sleep(1)
596 assert_equal(flag, True)
597 except:
598 log.info('Exception occured while checking routes in onos instance %s'%onos_ip)
599 raise
600
601 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700602 def test_cluster_with_vrouter_and_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700603 status = self.verify_cluster_status(onos_instances = onos_instances)
604 assert_equal(status, True)
605 onos_ips = self.get_cluster_current_member_ips()
606 master, standbys = self.get_cluster_current_master_standbys()
607 onos_names_ips = self.get_cluster_container_names_ips()
608 master_onos_name = onos_names_ips[master]
609 self.vrouter.setUpClass()
610 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
611 assert_equal(res,True)
612 cord_test_onos_shutdown(node = master_onos_name)
613 time.sleep(60)
ChetanGaonker689b3862016-10-17 16:25:01 -0700614 log.info('Verifying vrouter traffic after cluster master is down')
ChetanGaonker2099d722016-10-07 15:16:58 -0700615 self.vrouter.vrouter_traffic_verify()
616
617 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700618 def test_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700619 status = self.verify_cluster_status(onos_instances = onos_instances)
620 assert_equal(status, True)
621 onos_ips = self.get_cluster_current_member_ips()
622 master, standbys = self.get_cluster_current_master_standbys()
623 onos_names_ips = self.get_cluster_container_names_ips()
624 master_onos_name = onos_names_ips[master]
625 self.vrouter.setUpClass()
626 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
627 assert_equal(res, True)
628 cord_test_onos_restart()
629 self.vrouter.vrouter_traffic_verify()
630
631 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700632 def test_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700633 status = self.verify_cluster_status(onos_instances = onos_instances)
634 assert_equal(status, True)
635 self.vrouter.setUpClass()
636 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
637 assert_equal(res, True)
638 self.vrouter.vrouter_activate(deactivate=True)
639 time.sleep(15)
640 self.vrouter.vrouter_traffic_verify(positive_test=False)
641 self.vrouter.vrouter_activate(deactivate=False)
642
643 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700644 def test_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700645 status = self.verify_cluster_status(onos_instances = onos_instances)
646 assert_equal(status, True)
647 master, standbys = self.get_cluster_current_master_standbys()
648 onos_names_ips = self.get_cluster_container_names_ips()
649 master_onos_name = onos_names_ips[master]
650 self.vrouter.setUpClass()
651 log.info('Verifying vrouter before master down')
652 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
653 assert_equal(res, True)
654 self.vrouter.vrouter_activate(deactivate=True)
655 log.info('Verifying vrouter traffic after app deactivated')
656 time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down
657 self.vrouter.vrouter_traffic_verify(positive_test=False)
658 log.info('Verifying vrouter traffic after master down')
659 cord_test_onos_shutdown(node = master_onos_name)
660 time.sleep(60)
661 self.vrouter.vrouter_traffic_verify(positive_test=False)
662 self.vrouter.vrouter_activate(deactivate=False)
663
664 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700665 def test_cluster_for_vrouter_app_and_making_member_down(self, networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700666 status = self.verify_cluster_status(onos_instances = onos_instances)
667 assert_equal(status, True)
668 master, standbys = self.get_cluster_current_master_standbys()
669 onos_names_ips = self.get_cluster_container_names_ips()
670 member_onos_name = onos_names_ips[standbys[0]]
671 self.vrouter.setUpClass()
672 log.info('Verifying vrouter before cluster member down')
673 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
674 assert_equal(res, True) # Expecting vrouter should work properly
675 log.info('Verifying vrouter after cluster member down')
676 cord_test_onos_shutdown(node = member_onos_name)
677 time.sleep(60)
678 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
679
680 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700681 def test_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700682 status = self.verify_cluster_status(onos_instances = onos_instances)
683 assert_equal(status, True)
684 master, standbys = self.get_cluster_current_master_standbys()
685 onos_names_ips = self.get_cluster_container_names_ips()
686 member_onos_name = onos_names_ips[standbys[1]]
687 self.vrouter.setUpClass()
688 log.info('Verifying vrouter traffic before cluster member restart')
689 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
690 assert_equal(res, True) # Expecting vrouter should work properly
691 cord_test_onos_restart(node = member_onos_name)
692 log.info('Verifying vrouter traffic after cluster member restart')
693 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
694
695 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700696 def test_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700697 status = self.verify_cluster_status(onos_instances = onos_instances)
698 assert_equal(status, True)
699 self.vrouter.setUpClass()
700 log.info('Verifying vrouter traffic before cluster restart')
701 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
702 assert_equal(res, True) # Expecting vrouter should work properly
703 cord_test_onos_restart()
704 log.info('Verifying vrouter traffic after cluster restart')
705 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
706
707
708 #test fails because flow state is in pending_add in onos
ChetanGaonker689b3862016-10-17 16:25:01 -0700709 def test_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700710 status = self.verify_cluster_status(onos_instances = onos_instances)
711 assert_equal(status, True)
712 master, standbys = self.get_cluster_current_master_standbys()
713 onos_names_ips = self.get_cluster_container_names_ips()
714 master_onos_name = onos_names_ips[master]
715 self.flows.setUpClass()
716 egress = 1
717 ingress = 2
718 egress_map = { 'ip': '192.168.30.1', 'udp_port': 9500 }
719 ingress_map = { 'ip': '192.168.40.1', 'udp_port': 9000 }
720 flow = OnosFlowCtrl(deviceId = self.device_id,
721 egressPort = egress,
722 ingressPort = ingress,
723 udpSrc = ingress_map['udp_port'],
724 udpDst = egress_map['udp_port'],
725 controller=master
726 )
727 result = flow.addFlow()
728 assert_equal(result, True)
729 time.sleep(1)
730 self.success = False
731 def mac_recv_task():
732 def recv_cb(pkt):
733 log.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
734 self.success = True
735 sniff(timeout=2,
736 lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
737 and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.flows.port_map[egress])
738
739 for i in [0,1]:
740 if i == 1:
741 cord_test_onos_shutdown(node = master_onos_name)
742 log.info('Verifying flows traffic after master killed')
743 time.sleep(45)
744 else:
745 log.info('Verifying flows traffic before master killed')
746 t = threading.Thread(target = mac_recv_task)
747 t.start()
748 L2 = self.flows_eth #Ether(src = ingress_map['ether'], dst = egress_map['ether'])
749 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
750 L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
751 pkt = L2/L3/L4
752 log.info('Sending packets to verify if flows are correct')
753 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
754 t.join()
755 assert_equal(self.success, True)
756
ChetanGaonker689b3862016-10-17 16:25:01 -0700757 def test_cluster_state_changing_master_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700758 status = self.verify_cluster_status(onos_instances=onos_instances)
759 assert_equal(status, True)
760 master, standbys = self.get_cluster_current_master_standbys()
761 self.flows.setUpClass()
762 egress = 1
763 ingress = 2
764 egress_map = { 'ip': '192.168.30.1' }
765 ingress_map = { 'ip': '192.168.40.1' }
766 flow = OnosFlowCtrl(deviceId = self.device_id,
767 egressPort = egress,
768 ingressPort = ingress,
769 ecn = 1,
770 controller=master
771 )
772 result = flow.addFlow()
773 assert_equal(result, True)
774 ##wait for flows to be added to ONOS
775 time.sleep(1)
776 self.success = False
777 def mac_recv_task():
778 def recv_cb(pkt):
779 log.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
780 self.success = True
781 sniff(count=2, timeout=5,
782 lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
783 and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb,
784 iface = self.flows.port_map[egress])
785 for i in [0,1]:
786 if i == 1:
787 log.info('Changing cluster master to %s'%standbys[0])
788 self.change_master_current_cluster(new_master=standbys[0])
789 log.info('Verifying flow traffic after cluster master chnaged')
790 else:
791 log.info('Verifying flow traffic before cluster master changed')
792 t = threading.Thread(target = mac_recv_task)
793 t.start()
794 L2 = self.flows_eth # Ether(src = ingress_map['ether'], dst = egress_map['ether'])
795 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1)
796 pkt = L2/L3
797 log.info('Sending a packet to verify if flows are correct')
798 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
799 t.join()
800 assert_equal(self.success, True)
801
ChetanGaonker689b3862016-10-17 16:25:01 -0700802 #pass
803 def test_cluster_flow_for_ipv6_extension_header_and_master_restart(self,onos_instances = ONOS_INSTANCES):
804 status = self.verify_cluster_status(onos_instances=onos_instances)
805 assert_equal(status, True)
806 master,standbys = self.get_cluster_current_master_standbys()
807 onos_names_ips = self.get_cluster_container_names_ips()
808 master_onos_name = onos_names_ips[master]
809 self.flows.setUpClass()
810 egress = 1
811 ingress = 2
812 egress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
813 ingress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
814 flow = OnosFlowCtrl(deviceId = self.device_id,
815 egressPort = egress,
816 ingressPort = ingress,
817 ipv6_extension = 0,
818 controller=master
819 )
820
821 result = flow.addFlow()
822 assert_equal(result, True)
823 ##wait for flows to be added to ONOS
824 time.sleep(1)
825 self.success = False
826 def mac_recv_task():
827 def recv_cb(pkt):
828 log.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s'%(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
829 self.success = True
830 sniff(timeout=2,count=5,
831 lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.flows.port_map[egress])
832 for i in [0,1]:
833 if i == 1:
834 log.info('Restart cluster current master %s'%master)
835 Container(master_onos_name,Onos.IMAGE).restart()
836 time.sleep(45)
837 log.info('Verifying flow traffic after master restart')
838 else:
839 log.info('Verifying flow traffic before master restart')
840 t = threading.Thread(target = mac_recv_task)
841 t.start()
842 L2 = self.flows_eth
843 L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
844 pkt = L2/L3
845 log.info('Sending packets to verify if flows are correct')
846 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
847 t.join()
848 assert_equal(self.success, True)
849
850 def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
851 dst_mac = self.igmp.iptomac(group)
852 eth = Ether(dst= dst_mac)
853 ip = IP(dst=group,src=source)
854 data = repr(monotonic.monotonic())
855 sendp(eth/ip/data,count=20, iface = intf)
856 pkt = (eth/ip/data)
857 log.info('multicast traffic packet %s'%pkt.show())
858
859 def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
860 log.info('verifying multicast traffic for group %s from source %s'%(group,source))
861 self.success = False
862 def recv_task():
863 def igmp_recv_cb(pkt):
864 log.info('multicast data received for group %s from source %s'%(group,source))
865 self.success = True
866 sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
867 t = threading.Thread(target = recv_task)
868 t.start()
869 self.send_multicast_data_traffic(group,source=source)
870 t.join()
871 return self.success
872
873 #pass
874 def test_cluster_with_igmp_include_exclude_modes_and_restarting_master(self, onos_instances=ONOS_INSTANCES):
875 status = self.verify_cluster_status(onos_instances=onos_instances)
876 assert_equal(status, True)
877 master, standbys = self.get_cluster_current_master_standbys()
878 assert_equal(len(standbys), (onos_instances-1))
879 onos_names_ips = self.get_cluster_container_names_ips()
880 master_onos_name = onos_names_ips[master]
881 self.igmp.setUp(controller=master)
882 groups = ['224.2.3.4','230.5.6.7']
883 src_list = ['2.2.2.2','3.3.3.3']
884 self.igmp.onos_ssm_table_load(groups, src_list=src_list, controller=master)
885 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
886 iface = self.V_INF1, delay = 2)
887 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
888 iface = self.V_INF1, delay = 2)
889 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
890 assert_equal(status,True)
891 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
892 assert_equal(status,False)
893 log.info('restarting cluster master %s'%master)
894 Container(master_onos_name,Onos.IMAGE).restart()
895 time.sleep(60)
896 log.info('verifying multicast data traffic after master restart')
897 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
898 assert_equal(status,True)
899 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
900 assert_equal(status,False)
901
902 #pass
903 def test_cluster_with_igmp_include_exclude_modes_and_making_master_down(self, onos_instances=ONOS_INSTANCES):
904 status = self.verify_cluster_status(onos_instances=onos_instances)
905 assert_equal(status, True)
906 master, standbys = self.get_cluster_current_master_standbys()
907 assert_equal(len(standbys), (onos_instances-1))
908 onos_names_ips = self.get_cluster_container_names_ips()
909 master_onos_name = onos_names_ips[master]
910 self.igmp.setUp(controller=master)
911 groups = [self.igmp.random_mcast_ip(),self.igmp.random_mcast_ip()]
912 src_list = [self.igmp.randomsourceip()]
913 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
914 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
915 iface = self.V_INF1, delay = 2)
916 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
917 iface = self.V_INF1, delay = 2)
918 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
919 assert_equal(status,True)
920 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
921 assert_equal(status,False)
922 log.info('Killing cluster master %s'%master)
923 Container(master_onos_name,Onos.IMAGE).kill()
924 time.sleep(60)
925 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=standbys[0])
926 assert_equal(status, True)
927 log.info('Verifying multicast data traffic after cluster master down')
928 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
929 assert_equal(status,True)
930 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
931 assert_equal(status,False)
932
933 def test_cluster_with_igmp_include_mode_checking_traffic_recovery_time_after_master_is_down(self, onos_instances=ONOS_INSTANCES):
934 status = self.verify_cluster_status(onos_instances=onos_instances)
935 assert_equal(status, True)
936 master, standbys = self.get_cluster_current_master_standbys()
937 assert_equal(len(standbys), (onos_instances-1))
938 onos_names_ips = self.get_cluster_container_names_ips()
939 master_onos_name = onos_names_ips[master]
940 self.igmp.setUp(controller=master)
941 groups = [self.igmp.random_mcast_ip()]
942 src_list = [self.igmp.randomsourceip()]
943 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
944 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
945 iface = self.V_INF1, delay = 2)
946 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
947 assert_equal(status,True)
948 log.info('Killing clusters master %s'%master)
949 Container(master_onos_name,Onos.IMAGE).kill()
950 count = 0
951 for i in range(60):
952 log.info('Verifying multicast data traffic after cluster master down')
953 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
954 if status:
955 break
956 else:
957 count += 1
958 time.sleep(1)
959 assert_equal(status, True)
960 log.info('Time taken to recover traffic after clusters master down is %d seconds'%count)
961
962
963 #pass
964 def test_cluster_state_with_igmp_leave_group_after_master_change(self, onos_instances=ONOS_INSTANCES):
965 status = self.verify_cluster_status(onos_instances=onos_instances)
966 assert_equal(status, True)
967 master, standbys = self.get_cluster_current_master_standbys()
968 assert_equal(len(standbys), (onos_instances-1))
969 self.igmp.setUp(controller=master)
970 groups = [self.igmp.random_mcast_ip()]
971 src_list = [self.igmp.randomsourceip()]
972 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
973 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
974 iface = self.V_INF1, delay = 2)
975 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
976 assert_equal(status,True)
977 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
978 self.change_cluster_current_master(new_master=standbys[0])
979 log.info('Verifying multicast traffic after cluster master change')
980 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
981 assert_equal(status,True)
982 log.info('Sending igmp TO_EXCLUDE message to leave the group %s'%groups[0])
983 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
984 iface = self.V_INF1, delay = 1)
985 time.sleep(10)
986 status = self.verify_igmp_data_traffic(groups[0],intf = self.V_INF1,source= src_list[0])
987 assert_equal(status,False)
988
989 #pass
990 def test_cluster_state_with_igmp_join_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
991 status = self.verify_cluster_status(onos_instances=onos_instances)
992 assert_equal(status, True)
993 master,standbys = self.get_cluster_current_master_standbys()
994 assert_equal(len(standbys), (onos_instances-1))
995 self.igmp.setUp(controller=master)
996 groups = [self.igmp.random_mcast_ip()]
997 src_list = [self.igmp.randomsourceip()]
998 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
999 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
1000 self.change_cluster_current_master(new_master = standbys[0])
1001 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
1002 iface = self.V_INF1, delay = 2)
1003 time.sleep(1)
1004 self.change_cluster_current_master(new_master = master)
1005 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
1006 assert_equal(status,True)
1007
1008 #pass
ChetanGaonker2099d722016-10-07 15:16:58 -07001009 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001010 def test_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001011 status = self.verify_cluster_status(onos_instances=onos_instances)
1012 assert_equal(status, True)
1013 master, standbys = self.get_cluster_current_master_standbys()
1014 assert_equal(len(standbys), (onos_instances-1))
1015 self.tls.setUp(controller=master)
1016 df = defer.Deferred()
1017 def eap_tls_verify(df):
1018 tls = TLSAuthTest()
1019 tls.runTest()
1020 df.callback(0)
1021 reactor.callLater(0, eap_tls_verify, df)
1022 return df
1023
1024 @deferred(120)
ChetanGaonker689b3862016-10-17 16:25:01 -07001025 def test_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001026 master, standbys = self.get_cluster_current_master_standbys()
1027 assert_equal(len(standbys), (onos_instances-1))
1028 self.tls.setUp()
1029 df = defer.Deferred()
1030 def eap_tls_verify2(df2):
1031 tls = TLSAuthTest()
1032 tls.runTest()
1033 df.callback(0)
1034 for i in [0,1]:
1035 if i == 1:
1036 log.info('Changing cluster master %s to %s'%(master, standbys[0]))
1037 self.change_master_current_cluster(new_master=standbys[0])
1038 log.info('Verifying tls authentication after cluster master changed to %s'%standbys[0])
1039 else:
1040 log.info('Verifying tls authentication before cluster master change')
1041 reactor.callLater(0, eap_tls_verify, df)
1042 return df
1043
1044 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001045 def test_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001046 status = self.verify_cluster_status(onos_instances=onos_instances)
1047 assert_equal(status, True)
1048 master, standbys = self.get_cluster_current_master_standbys()
1049 assert_equal(len(standbys), (onos_instances-1))
1050 onos_names_ips = self.get_cluster_container_names_ips()
1051 master_onos_name = onos_names_ips[master]
1052 self.tls.setUp()
1053 df = defer.Deferred()
1054 def eap_tls_verify(df):
1055 tls = TLSAuthTest()
1056 tls.runTest()
1057 df.callback(0)
1058 for i in [0,1]:
1059 if i == 1:
1060 log.info('Killing cluster current master %s'%master)
1061 cord_test_onos_shutdown(node = master_onos_name)
1062 time.sleep(20)
1063 status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True)
1064 assert_equal(status, True)
1065 log.info('Cluster came up with %d instances after killing master'%(onos_instances-1))
1066 log.info('Verifying tls authentication after killing cluster master')
1067 reactor.callLater(0, eap_tls_verify, df)
1068 return df
1069
1070 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -07001071 def test_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001072 status = self.verify_cluster_status(onos_instances=onos_instances)
1073 assert_equal(status, True)
1074 master, standbys = self.get_cluster_current_master_standbys()
1075 assert_equal(len(standbys), (onos_instances-1))
1076 onos_names_ips = self.get_cluster_container_names_ips()
1077 member_onos_name = onos_names_ips[standbys[0]]
1078 self.tls.setUp()
1079 df = defer.Deferred()
1080 def eap_tls_no_cert(df):
1081 def tls_no_cert_cb():
1082 log.info('TLS authentication failed with no certificate')
1083 tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
1084 tls.runTest()
1085 assert_equal(tls.failTest, True)
1086 df.callback(0)
1087 for i in [0,1]:
1088 if i == 1:
1089 log.info('Restart cluster member %s'%standbys[0])
1090 Container(member_onos_name,Onos.IMAGE).restart()
1091 time.sleep(20)
1092 status = self.verify_cluster_status(onos_instances=onos_instances)
1093 assert_equal(status, True)
1094 log.info('Cluster came up with %d instances after member restart'%(onos_instances))
1095 log.info('Verifying tls authentication after member restart')
1096 reactor.callLater(0, eap_tls_no_cert, df)
1097 return df
1098
ChetanGaonker689b3862016-10-17 16:25:01 -07001099 #pass
1100 def test_cluster_proxyarp_master_change_and_app_deactivation(self,onos_instances=ONOS_INSTANCES,hosts = 3):
1101 status = self.verify_cluster_status(onos_instances=onos_instances)
1102 assert_equal(status,True)
1103 master,standbys = self.get_cluster_current_master_standbys()
1104 assert_equal(len(standbys),(onos_instances-1))
1105 self.proxyarp.setUpClass()
1106 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1107 ingress = hosts+1
1108 for hostip, hostmac in hosts_config:
1109 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1110 time.sleep(1)
1111 log.info('changing cluster current master from %s to %s'%(master,standbys[0]))
1112 self.change_cluster_current_master(new_master=standbys[0])
1113 log.info('verifying proxyarp after master change')
1114 for hostip, hostmac in hosts_config:
1115 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1116 time.sleep(1)
1117 log.info('Deactivating proxyarp app and expecting proxyarp functionality not to work')
1118 self.proxyarp.proxyarp_activate(deactivate = True,controller=standbys[0])
1119 time.sleep(3)
1120 for hostip, hostmac in hosts_config:
1121 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
1122 time.sleep(1)
1123 log.info('activating proxyarp app and expecting to get arp reply from ONOS')
1124 self.proxyarp.proxyarp_activate(deactivate = False,controller=standbys[0])
1125 time.sleep(3)
1126 for hostip, hostmac in hosts_config:
1127 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1128 time.sleep(1)
ChetanGaonker2099d722016-10-07 15:16:58 -07001129
ChetanGaonker689b3862016-10-17 16:25:01 -07001130 #pass
1131 def test_cluster_with_proxyarp_and_one_member_down(self,hosts=3,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001132 status = self.verify_cluster_status(onos_instances=onos_instances)
1133 assert_equal(status, True)
1134 master, standbys = self.get_cluster_current_master_standbys()
ChetanGaonker689b3862016-10-17 16:25:01 -07001135 assert_equal(len(standbys), (onos_instances-1))
1136 onos_names_ips = self.get_cluster_container_names_ips()
1137 member_onos_name = onos_names_ips[standbys[1]]
1138 self.proxyarp.setUpClass()
1139 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1140 ingress = hosts+1
1141 for hostip, hostmac in hosts_config:
1142 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1143 time.sleep(1)
1144 log.info('killing cluster member %s'%standbys[1])
1145 Container(member_onos_name,Onos.IMAGE).kill()
1146 time.sleep(20)
1147 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=master,verify=True)
1148 assert_equal(status, True)
1149 log.info('cluster came up with %d instances after member down'%(onos_instances-1))
1150 log.info('verifying proxy arp functionality after cluster member down')
1151 for hostip, hostmac in hosts_config:
1152 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1153 time.sleep(1)
1154
1155 #pass
1156 def test_cluster_with_proxyarp_and_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts=10,onos_instances=ONOS_INSTANCES):
1157 status = self.verify_cluster_status(onos_instances=onos_instances)
1158 assert_equal(status, True)
1159 self.proxyarp.setUpClass()
1160 master, standbys = self.get_cluster_current_master_standbys()
1161 assert_equal(len(standbys), (onos_instances-1))
1162 ports_map, egress_map, hosts_config = self.proxyarp.proxyarp_config(hosts = hosts, controller=master)
1163 self.success = True
1164 ingress = hosts+1
1165 ports = range(ingress,ingress+10)
1166 hostmac = []
1167 hostip = []
1168 for ip,mac in hosts_config:
1169 hostmac.append(mac)
1170 hostip.append(ip)
1171 success_dir = {}
1172 def verify_proxyarp(*r):
1173 ingress, hostmac, hostip = r[0],r[1],r[2]
1174 def mac_recv_task():
1175 def recv_cb(pkt):
1176 log.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
1177 success_dir[current_thread().name] = True
1178 sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
1179 prn = recv_cb, iface = self.proxyarp.port_map[ingress])
1180 t = threading.Thread(target = mac_recv_task)
1181 t.start()
1182 pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
1183 log.info('Sending arp request for dest ip %s on interface %s' %
1184 (hostip,self.proxyarp.port_map[ingress]))
1185 sendp(pkt, count = 10,iface = self.proxyarp.port_map[ingress])
1186 t.join()
1187 t = []
1188 for i in range(10):
1189 t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
1190 for i in range(10):
1191 t[i].start()
1192 time.sleep(2)
1193 for i in range(10):
1194 t[i].join()
1195 if len(success_dir) != 10:
1196 self.success = False
1197 assert_equal(self.success, True)
1198
1199 #pass
1200 def test_cluster_with_acl_rule_before_master_change_and_remove_acl_rule_after_master_change(self,onos_instances=ONOS_INSTANCES):
1201 status = self.verify_cluster_status(onos_instances=onos_instances)
1202 assert_equal(status, True)
1203 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001204 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001205 self.acl.setUp()
1206 acl_rule = ACLTest()
1207 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1208 if status is False:
1209 log.info('JSON request returned status %d' %code)
1210 assert_equal(status, True)
1211 result = acl_rule.get_acl_rules(controller=master)
1212 aclRules1 = result.json()['aclRules']
1213 log.info('Added acl rules is %s'%aclRules1)
1214 acl_Id = map(lambda d: d['id'], aclRules1)
1215 log.info('Changing cluster current master from %s to %s'%(master,standbys[0]))
1216 self.change_cluster_current_master(new_master=standbys[0])
1217 status,code = acl_rule.remove_acl_rule(acl_Id[0],controller=standbys[0])
1218 if status is False:
1219 log.info('JSON request returned status %d' %code)
1220 assert_equal(status, True)
1221
1222 #pass
1223 def test_cluster_verifying_acl_rule_in_new_master_after_current_master_is_down(self,onos_instances=ONOS_INSTANCES):
1224 status = self.verify_cluster_status(onos_instances=onos_instances)
1225 assert_equal(status, True)
1226 master,standbys = self.get_cluster_current_master_standbys()
1227 assert_equal(len(standbys),(onos_instances-1))
1228 onos_names_ips = self.get_cluster_container_names_ips()
1229 master_onos_name = onos_names_ips[master]
1230 self.acl.setUp()
1231 acl_rule = ACLTest()
1232 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1233 if status is False:
1234 log.info('JSON request returned status %d' %code)
1235 assert_equal(status, True)
1236 result1 = acl_rule.get_acl_rules(controller=master)
1237 aclRules1 = result1.json()['aclRules']
1238 log.info('Added acl rules is %s'%aclRules1)
1239 acl_Id1 = map(lambda d: d['id'], aclRules1)
1240 log.info('Killing cluster current master %s'%master)
1241 Container(master_onos_name,Onos.IMAGE).kill()
1242 time.sleep(45)
1243 status = self.verify_cluster_status(onos_instances=onos_instances,controller=standbys[0])
1244 assert_equal(status, True)
1245 new_master,standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
1246 assert_equal(len(standbys),(onos_instances-2))
1247 assert_not_equal(new_master,master)
1248 result2 = acl_rule.get_acl_rules(controller=new_master)
1249 aclRules2 = result2.json()['aclRules']
1250 acl_Id2 = map(lambda d: d['id'], aclRules2)
1251 log.info('Acl Ids before and after master down are %s and %s'%(acl_Id1,acl_Id2))
1252 assert_equal(acl_Id2,acl_Id1)
1253
1254 #acl traffic scenario not working as acl rule is not getting added to onos
1255 def test_cluster_with_acl_traffic_before_and_after_two_members_down(self,onos_instances=ONOS_INSTANCES):
1256 status = self.verify_cluster_status(onos_instances=onos_instances)
1257 assert_equal(status, True)
1258 master,standbys = self.get_cluster_current_master_standbys()
1259 assert_equal(len(standbys),(onos_instances-1))
1260 onos_names_ips = self.get_cluster_container_names_ips()
1261 member1_onos_name = onos_names_ips[standbys[0]]
1262 member2_onos_name = onos_names_ips[standbys[1]]
1263 ingress = self.acl.ingress_iface
1264 egress = self.acl.CURRENT_PORT_NUM
1265 acl_rule = ACLTest()
1266 status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.acl.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.acl.HOST_DST_IP)
1267 self.acl.CURRENT_PORT_NUM += 1
1268 time.sleep(5)
1269 if status is False:
1270 log.info('JSON request returned status %d' %code)
1271 assert_equal(status, True)
1272 srcMac = '00:00:00:00:00:11'
1273 dstMac = host_ip_mac[0][1]
1274 self.acl.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1, egress_iface_num = egress )
1275 status, code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'deny',controller=master)
1276 time.sleep(10)
1277 if status is False:
1278 log.info('JSON request returned status %d' %code)
1279 assert_equal(status, True)
1280 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1281 log.info('killing cluster members %s and %s'%(standbys[0],standbys[1]))
1282 Container(member1_onos_name, Onos.IMAGE).kill()
1283 Container(member2_onos_name, Onos.IMAGE).kill()
1284 time.sleep(40)
1285 status = self.verify_cluster_status(onos_instances=onos_instances-2,verify=True,controller=master)
1286 assert_equal(status, True)
1287 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1288 self.acl.acl_hosts_remove(egress_iface_count = 1, egress_iface_num = egress)
1289
1290 #pass
1291 def test_cluster_with_dhcpRelay_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1292 status = self.verify_cluster_status(onos_instances=onos_instances)
1293 assert_equal(status, True)
1294 master,standbys = self.get_cluster_current_master_standbys()
1295 assert_equal(len(standbys),(onos_instances-1))
1296 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001297 mac = self.dhcprelay.get_mac(iface)
1298 self.dhcprelay.host_load(iface)
1299 ##we use the defaults for this test that serves as an example for others
1300 ##You don't need to restart dhcpd server if retaining default config
1301 config = self.dhcprelay.default_config
1302 options = self.dhcprelay.default_options
1303 subnet = self.dhcprelay.default_subnet_config
1304 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1305 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1306 config = config,
1307 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001308 subnet = subnet,
1309 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001310 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
1311 cip, sip = self.dhcprelay.send_recv(mac)
1312 log.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
1313 self.change_master_current_cluster(new_master=standbys[0])
1314 log.info('Releasing ip %s to server %s' %(cip, sip))
1315 assert_equal(self.dhcprelay.dhcp.release(cip), True)
1316 log.info('Triggering DHCP discover again after release')
1317 cip2, sip2 = self.dhcprelay.send_recv(mac)
1318 log.info('Verifying released IP was given back on rediscover')
1319 assert_equal(cip, cip2)
1320 log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
1321 assert_equal(self.dhcprelay.dhcp.release(cip2), True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001322 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001323
ChetanGaonker689b3862016-10-17 16:25:01 -07001324
1325 def test_cluster_with_dhcpRelay_and_verify_dhcp_ip_after_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1326 status = self.verify_cluster_status(onos_instances=onos_instances)
1327 assert_equal(status, True)
1328 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001329 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001330 onos_names_ips = self.get_cluster_container_names_ips()
1331 master_onos_name = onos_names_ips[master]
1332 self.dhcprelay.setUpClass(controller=master)
1333 mac = self.dhcprelay.get_mac(iface)
1334 self.dhcprelay.host_load(iface)
1335 ##we use the defaults for this test that serves as an example for others
1336 ##You don't need to restart dhcpd server if retaining default config
1337 config = self.dhcprelay.default_config
1338 options = self.dhcprelay.default_options
1339 subnet = self.dhcprelay.default_subnet_config
1340 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1341 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1342 config = config,
1343 options = options,
1344 subnet = subnet,
1345 controller=master)
1346 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1347 log.info('Initiating dhcp process from client %s'%mac)
1348 cip, sip = self.dhcprelay.send_recv(mac)
1349 log.info('Killing cluster current master %s'%master)
1350 Container(master_onos_name, Onos.IMAGE).kill()
1351 time.sleep(60)
1352 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=standbys[0])
1353 assert_equal(status, True)
1354 mac = self.dhcprelay.dhcp.get_mac(cip)[0]
1355 log.info("Verifying dhcp clients gets same IP after cluster master restarts")
1356 new_cip, new_sip = self.dhcprelay.dhcp.only_request(cip, mac)
1357 assert_equal(new_cip, cip)
1358 self.dhcprelay.tearDownClass(controller=standbys[0])
1359
1360 #pass
1361 def test_cluster_with_dhcpRelay_and_simulate_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1362 status = self.verify_cluster_status(onos_instances=onos_instances)
1363 assert_equal(status, True)
1364 master,standbys = self.get_cluster_current_master_standbys()
1365 assert_equal(len(standbys),(onos_instances-1))
1366 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001367 macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3']
1368 self.dhcprelay.host_load(iface)
1369 ##we use the defaults for this test that serves as an example for others
1370 ##You don't need to restart dhcpd server if retaining default config
1371 config = self.dhcprelay.default_config
1372 options = self.dhcprelay.default_options
1373 subnet = self.dhcprelay.default_subnet_config
1374 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1375 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1376 config = config,
1377 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001378 subnet = subnet,
1379 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001380 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1381 cip1, sip1 = self.dhcprelay.send_recv(macs[0])
1382 assert_not_equal(cip1,None)
1383 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip1,macs[0],master))
1384 log.info('Changing cluster master from %s to %s'%(master, standbys[0]))
1385 self.change_master_current_cluster(new_master=standbys[0])
1386 cip2, sip2 = self.dhcprelay.send_recv(macs[1])
1387 assert_not_equal(cip2,None)
1388 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[1],standbys[0]))
1389 self.change_master_current_cluster(new_master=master)
1390 log.info('Changing cluster master from %s to %s'%(standbys[0],master))
1391 cip3, sip3 = self.dhcprelay.send_recv(macs[2])
1392 assert_not_equal(cip3,None)
1393 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master))
ChetanGaonker689b3862016-10-17 16:25:01 -07001394 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001395
ChetanGaonker689b3862016-10-17 16:25:01 -07001396 def test_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001397 status = self.verify_cluster_status(onos_instances=onos_instances)
1398 assert_equal(status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001399 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001400 self.subscriber.num_subscribers = 5
1401 self.subscriber.num_channels = 10
1402 for i in [0,1]:
1403 if i == 1:
1404 cord_test_onos_restart()
1405 time.sleep(45)
1406 status = self.verify_cluster_status(onos_instances=onos_instances)
1407 assert_equal(status, True)
1408 log.info('Verifying cord subscriber functionality after cluster restart')
1409 else:
1410 log.info('Verifying cord subscriber functionality before cluster restart')
1411 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1412 num_channels = self.subscriber.num_channels,
1413 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1414 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1415 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1416 self.subscriber.num_channels))
1417 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001418 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001419
ChetanGaonker689b3862016-10-17 16:25:01 -07001420 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1421 def test_cluster_with_cord_subscriber_join_next_channel_before_and_after_cluster_mastership_is_withdrawn(self,onos_instances=ONOS_INSTANCES):
1422 status = self.verify_cluster_status(onos_instances=onos_instances)
1423 assert_equal(status, True)
1424 master,standbys = self.get_cluster_current_master_standbys()
1425 assert_equal(len(standbys),(onos_instances-1))
1426 self.subscriber.setUpClass(controller=master)
1427 self.subscriber.num_subscribers = 5
1428 self.subscriber.num_channels = 10
1429 for i in [0,1]:
1430 if i == 1:
1431 status=self.withdraw_cluster_current_mastership(master_ip=master)
1432 asser_equal(status, True)
1433 master,standbys = self.get_cluster_current_master_standbys()
1434 log.info('verifying cord subscriber functionality after cluster current master withdraw mastership')
1435 else:
1436 log.info('verifying cord subscriber functionality before cluster master withdraw mastership')
1437 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1438 num_channels = self.subscriber.num_channels,
1439 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1440 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1441 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1442 self.subscriber.num_channels),controller=master)
1443 assert_equal(test_status, True)
1444 self.subscriber.tearDownClass(controller=master)
1445
1446 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1447 def test_cluster_with_cord_subscriber_join_recv_traffic_from_10channels_and_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001448 status = self.verify_cluster_status(onos_instances=onos_instances)
1449 assert_equal(status, True)
1450 master, standbys = self.get_cluster_current_master_standbys()
1451 assert_equal(len(standbys),(onos_instances-1))
1452 onos_names_ips = self.get_cluster_container_names_ips()
1453 member_onos_name = onos_names_ips[standbys[0]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001454 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001455 num_subscribers = 1
1456 num_channels = 10
1457 for i in [0,1]:
1458 if i == 1:
1459 cord_test_onos_shutdown(node = member_onos_name)
1460 time.sleep(30)
ChetanGaonker689b3862016-10-17 16:25:01 -07001461 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001462 assert_equal(status, True)
1463 log.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0])
1464 else:
1465 log.info('Verifying cord subscriber functionality before cluster member %s is down'%standbys[0])
1466 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1467 num_channels = num_channels,
1468 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify,
1469 self.subscriber.igmp_verify, self.subscriber.traffic_verify),
1470 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
ChetanGaonker689b3862016-10-17 16:25:01 -07001471 negative_subscriber_auth = 'all',controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001472 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001473 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001474
ChetanGaonker689b3862016-10-17 16:25:01 -07001475 def test_cluster_with_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001476 status = self.verify_cluster_status(onos_instances=onos_instances)
1477 assert_equal(status, True)
1478 master, standbys = self.get_cluster_current_master_standbys()
1479 assert_equal(len(standbys),(onos_instances-1))
1480 onos_names_ips = self.get_cluster_container_names_ips()
1481 member1_onos_name = onos_names_ips[standbys[0]]
1482 member2_onos_name = onos_names_ips[standbys[1]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001483 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001484 num_subscribers = 1
1485 num_channels = 10
1486 for i in [0,1]:
1487 if i == 1:
1488 cord_test_onos_shutdown(node = member1_onos_name)
1489 cord_test_onos_shutdown(node = member2_onos_name)
1490 time.sleep(60)
1491 status = self.verify_cluster_status(onos_instances=onos_instances-2)
1492 assert_equal(status, True)
1493 log.info('Verifying cord subscriber funtionality after cluster two members %s and %s down'%(standbys[0],standbys[1]))
1494 else:
1495 log.info('Verifying cord subscriber funtionality before cluster two members %s and %s down'%(standbys[0],standbys[1]))
1496 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1497 num_channels = num_channels,
1498 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1499 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1500 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
1501 negative_subscriber_auth = 'all')
1502 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001503 self.subscriber.tearDownClass(controller=master)
1504
1505 #pass
1506 def test_cluster_with_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1507 status = self.verify_cluster_status(onos_instances=onos_instances)
1508 assert_equal(status, True)
1509 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1510 for device in device_dict.keys():
1511 log.info("Device is %s"%device_dict[device])
1512 assert_not_equal(device_dict[device]['master'],'none')
1513 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1514 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1515
1516 #pass
1517 def test_cluster_state_in_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1518 status = self.verify_cluster_status(onos_instances=onos_instances)
1519 assert_equal(status, True)
1520 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1521 cluster_ips = self.get_cluster_current_member_ips()
1522 for ip in cluster_ips:
1523 device_dict= self.get_cluster_current_master_standbys_of_connected_devices(controller = ip)
1524 assert_equal(len(device_dict.keys()),onos_instances)
1525 for device in device_dict.keys():
1526 log.info("Device is %s"%device_dict[device])
1527 assert_not_equal(device_dict[device]['master'],'none')
1528 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1529 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1530
1531 #pass
1532 def test_cluster_verifying_multiple_ovs_switches_after_master_is_restarted(self,onos_instances = ONOS_INSTANCES):
1533 status = self.verify_cluster_status(onos_instances=onos_instances)
1534 assert_equal(status, True)
1535 onos_names_ips = self.get_cluster_container_names_ips()
1536 master_count = self.get_number_of_devices_of_master()
1537 log.info('Master count information is %s'%master_count)
1538 total_devices = 0
1539 for master in master_count.keys():
1540 total_devices += master_count[master]['size']
1541 if master_count[master]['size'] != 0:
1542 restart_ip = master
1543 assert_equal(total_devices,onos_instances)
1544 member_onos_name = onos_names_ips[restart_ip]
1545 log.info('Restarting cluster member %s having ip %s'%(member_onos_name,restart_ip))
1546 Container(member_onos_name, Onos.IMAGE).restart()
1547 time.sleep(40)
1548 master_count = self.get_number_of_devices_of_master()
1549 log.info('Master count information after restart is %s'%master_count)
1550 total_devices = 0
1551 for master in master_count.keys():
1552 total_devices += master_count[master]['size']
1553 if master == restart_ip:
1554 assert_equal(master_count[master]['size'], 0)
1555 assert_equal(total_devices,onos_instances)
1556
1557 #pass
1558 def test_cluster_verifying_multiple_ovs_switches_with_one_master_down(self,onos_instances = ONOS_INSTANCES):
1559 status = self.verify_cluster_status(onos_instances=onos_instances)
1560 assert_equal(status, True)
1561 onos_names_ips = self.get_cluster_container_names_ips()
1562 master_count = self.get_number_of_devices_of_master()
1563 log.info('Master count information is %s'%master_count)
1564 total_devices = 0
1565 for master in master_count.keys():
1566 total_devices += master_count[master]['size']
1567 if master_count[master]['size'] != 0:
1568 restart_ip = master
1569 assert_equal(total_devices,onos_instances)
1570 master_onos_name = onos_names_ips[restart_ip]
1571 log.info('Shutting down cluster member %s having ip %s'%(master_onos_name,restart_ip))
1572 Container(master_onos_name, Onos.IMAGE).kill()
1573 time.sleep(40)
1574 for ip in onos_names_ips.keys():
1575 if ip != restart_ip:
1576 controller_ip = ip
1577 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=controller_ip)
1578 assert_equal(status, True)
1579 master_count = self.get_number_of_devices_of_master(controller=controller_ip)
1580 log.info('Master count information after restart is %s'%master_count)
1581 total_devices = 0
1582 for master in master_count.keys():
1583 total_devices += master_count[master]['size']
1584 if master == restart_ip:
1585 assert_equal(master_count[master]['size'], 0)
1586 assert_equal(total_devices,onos_instances)
1587
1588 #pass
1589 def test_cluster_verifying_multiple_ovs_switches_with_current_master_withdrawing_mastership(self,onos_instances = ONOS_INSTANCES):
1590 status = self.verify_cluster_status(onos_instances=onos_instances)
1591 assert_equal(status, True)
1592 master_count = self.get_number_of_devices_of_master()
1593 log.info('Master count information is %s'%master_count)
1594 total_devices = 0
1595 for master in master_count.keys():
1596 total_devices += int(master_count[master]['size'])
1597 if master_count[master]['size'] != 0:
1598 master_ip = master
1599 log.info('Devices of master %s are %s'%(master_count[master]['devices'],master))
1600 device_id = str(master_count[master]['devices'][0])
1601 device_count = master_count[master]['size']
1602 assert_equal(total_devices,onos_instances)
1603 log.info('Withdrawing mastership of device %s for controller %s'%(device_id,master_ip))
1604 status=self.withdraw_cluster_current_mastership(master_ip=master_ip,device_id = device_id)
1605 assert_equal(status, True)
1606 master_count = self.get_number_of_devices_of_master()
1607 log.info('Master count information after cluster mastership withdraw is %s'%master_count)
1608 total_devices = 0
1609 for master in master_count.keys():
1610 total_devices += int(master_count[master]['size'])
1611 if master == master_ip:
1612 assert_equal(master_count[master]['size'], device_count-1)
1613 assert_equal(total_devices,onos_instances)
1614
1615 #pass
1616 def test_cluster_verifying_multiple_ovs_switches_and_restarting_cluster(self,onos_instances = ONOS_INSTANCES):
1617 status = self.verify_cluster_status(onos_instances=onos_instances)
1618 assert_equal(status, True)
1619 master_count = self.get_number_of_devices_of_master()
1620 log.info('Master count information is %s'%master_count)
1621 total_devices = 0
1622 for master in master_count.keys():
1623 total_devices += master_count[master]['size']
1624 assert_equal(total_devices,onos_instances)
1625 log.info('Restarting cluster')
1626 cord_test_onos_restart()
1627 time.sleep(60)
1628 master_count = self.get_number_of_devices_of_master()
1629 log.info('Master count information after restart is %s'%master_count)
1630 total_devices = 0
1631 for master in master_count.keys():
1632 total_devices += master_count[master]['size']
1633 assert_equal(total_devices,onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -07001634