blob: 55b47f860dc43235760e3135ab6ac0d62770069f [file] [log] [blame]
ChetanGaonker2099d722016-10-07 15:16:58 -07001#copyright 2016-present Ciena Corporation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14#
15import unittest
16from nose.tools import *
17from scapy.all import *
18from OnosCtrl import OnosCtrl, get_mac
19from OltConfig import OltConfig
20from socket import socket
21from OnosFlowCtrl import OnosFlowCtrl
22from nose.twistedtools import reactor, deferred
23from twisted.internet import defer
24from onosclidriver import OnosCliDriver
25from CordContainer import Container, Onos, Quagga
26from CordTestServer import cord_test_onos_restart, cord_test_onos_shutdown, cord_test_onos_add_cluster, cord_test_quagga_restart
27from portmaps import g_subscriber_port_map
28from scapy.all import *
29import time, monotonic
30import threading
31from threading import current_thread
32from Cluster import *
33from EapTLS import TLSAuthTest
34from ACL import ACLTest
35import os
36import json
37import random
38import collections
39log.setLevel('INFO')
40
41class cluster_exchange(unittest.TestCase):
42 test_path = os.path.dirname(os.path.realpath(__file__))
43 onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
44 mac = RandMAC()._fix()
45 flows_eth = Ether(src = RandMAC()._fix(), dst = RandMAC()._fix())
46 igmp_eth = Ether(dst = '01:00:5e:00:00:16', type = ETH_P_IP)
47 igmp_ip = IP(dst = '224.0.0.22')
48 ONOS_INSTANCES = 3
49 V_INF1 = 'veth0'
50 TLS_TIMEOUT = 100
51 device_id = 'of:' + get_mac()
52 igmp = cluster_igmp()
53 igmp_groups = igmp.mcast_ip_range(start_ip = '224.1.8.10',end_ip = '224.1.10.49')
54 igmp_sources = igmp.source_ip_range(start_ip = '38.24.29.35',end_ip='38.24.35.56')
55 tls = cluster_tls()
56 flows = cluster_flows()
57 proxyarp = cluster_proxyarp()
58 vrouter = cluster_vrouter()
59 acl = cluster_acl()
60 dhcprelay = cluster_dhcprelay()
61 subscriber = cluster_subscriber()
62
63 def get_controller(self):
64 controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
65 controller = controller.split(',')[0]
66 return controller
67
68 def cliEnter(self,controller = None):
69 retries = 0
70 while retries < 3:
71 self.cli = OnosCliDriver(controller = controller,connect = True)
72 if self.cli.handle:
73 break
74 else:
75 retries += 1
76 time.sleep(2)
77
78 def cliExit(self):
79 self.cli.disconnect()
80
81 def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
82 tries = 0
83 try:
84 self.cliEnter(controller = controller)
85 while tries <= 10:
86 cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
87 if cluster_summary:
88 log.info("cluster 'summary' command output is %s"%cluster_summary)
89 nodes = cluster_summary['nodes']
90 if verify:
91 if nodes == onos_instances:
92 self.cliExit()
93 return True
94 else:
95 tries += 1
96 time.sleep(1)
97 else:
98 if nodes >= onos_instances:
99 self.cliExit()
100 return True
101 else:
102 tries += 1
103 time.sleep(1)
104 else:
105 tries += 1
106 time.sleep(1)
107 self.cliExit()
108 return False
109 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700110 raise Exception('Failed to get cluster members')
111 return False
ChetanGaonker2099d722016-10-07 15:16:58 -0700112
113 def get_cluster_current_member_ips(self,controller = None):
114 tries = 0
115 cluster_ips = []
116 try:
117 self.cliEnter(controller = controller)
118 while tries <= 10:
119 cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
120 if cluster_nodes:
121 log.info("cluster 'nodes' output is %s"%cluster_nodes)
122 cluster_ips = map(lambda c: c['id'], cluster_nodes)
123 self.cliExit()
124 cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
125 return cluster_ips
126 else:
127 tries += 1
128 self.cliExit()
129 return cluster_ips
130 except:
131 raise Exception('Failed to get cluster members')
132 return cluster_ips
133
ChetanGaonker689b3862016-10-17 16:25:01 -0700134 def get_cluster_container_names_ips(self,controller=None):
135 onos_names_ips = {}
136 onos_ips = self.get_cluster_current_member_ips(controller=controller)
137 #onos_names = [Onos.NAME]
138 onos_names_ips[onos_ips[0]] = Onos.NAME
139 for i in range(1,len(onos_ips)):
ChetanGaonker2099d722016-10-07 15:16:58 -0700140 name = '{0}-{1}'.format(Onos.NAME,i+1)
141 onos_names_ips[onos_ips[i]] = name
ChetanGaonker689b3862016-10-17 16:25:01 -0700142 #onos_names.append(name)
ChetanGaonker2099d722016-10-07 15:16:58 -0700143
144 return onos_names_ips
145
146 #identifying current master of a connected device, not tested
147 def get_cluster_current_master_standbys(self,controller=None,device_id=device_id):
148 master = None
149 standbys = []
150 tries = 0
151 try:
152 cli = self.cliEnter(controller = controller)
153 while tries <= 10:
154 roles = json.loads(self.cli.roles(jsonFormat = True))
155 log.info("cluster 'roles' command output is %s"%roles)
156 if roles:
157 for device in roles:
158 log.info('Verifying device info in line %s'%device)
159 if device['id'] == device_id:
160 master = str(device['master'])
161 standbys = map(lambda d: str(d), device['standbys'])
162 log.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
163 self.cliExit()
164 return master, standbys
165 self.cliExit()
166 return master, standbys
167 else:
168 tries += 1
ChetanGaonker689b3862016-10-17 16:25:01 -0700169 time.sleep(1)
170 self.cliExit()
171 return master,standbys
172 except:
173 raise Exception('Failed to get cluster members')
174 return master,standbys
175
176 def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
177 ''' returns master and standbys of all the connected devices to ONOS cluster instance'''
178 device_dict = {}
179 tries = 0
180 try:
181 cli = self.cliEnter(controller = controller)
182 while tries <= 10:
183 device_dict = {}
184 roles = json.loads(self.cli.roles(jsonFormat = True))
185 log.info("cluster 'roles' command output is %s"%roles)
186 if roles:
187 for device in roles:
188 device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
189 for i in range(len(device_dict[device['id']]['standbys'])):
190 device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
191 log.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
192 self.cliExit()
193 return device_dict
194 else:
195 tries += 1
ChetanGaonker2099d722016-10-07 15:16:58 -0700196 time.sleep(1)
197 self.cliExit()
ChetanGaonker689b3862016-10-17 16:25:01 -0700198 return device_dict
199 except:
200 raise Exception('Failed to get cluster members')
201 return device_dict
202
203 #identify current master of a connected device, not tested
204 def get_cluster_connected_devices(self,controller=None):
205 '''returns all the devices connected to ONOS cluster'''
206 device_list = []
207 tries = 0
208 try:
209 cli = self.cliEnter(controller = controller)
210 while tries <= 10:
211 device_list = []
212 devices = json.loads(self.cli.devices(jsonFormat = True))
213 log.info("cluster 'devices' command output is %s"%devices)
214 if devices:
215 for device in devices:
216 log.info('device id is %s'%device['id'])
217 device_list.append(str(device['id']))
218 self.cliExit()
219 return device_list
220 else:
221 tries += 1
222 time.sleep(1)
223 self.cliExit()
224 return device_list
225 except:
226 raise Exception('Failed to get cluster members')
227 return device_list
228
229 def get_number_of_devices_of_master(self,controller=None):
230 '''returns master-device pairs, which master having what devices'''
231 master_count = {}
232 try:
233 cli = self.cliEnter(controller = controller)
234 masters = json.loads(self.cli.masters(jsonFormat = True))
235 if masters:
236 for master in masters:
237 master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
238 return master_count
239 else:
240 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700241 except:
ChetanGaonker689b3862016-10-17 16:25:01 -0700242 raise Exception('Failed to get cluster members')
243 return master_count
ChetanGaonker2099d722016-10-07 15:16:58 -0700244
245 def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None):
246 if new_master is None: return False
ChetanGaonker689b3862016-10-17 16:25:01 -0700247 self.cliEnter(controller=controller)
ChetanGaonker2099d722016-10-07 15:16:58 -0700248 cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
249 command = self.cli.command(cmd = cmd, jsonFormat = False)
250 self.cliExit()
251 time.sleep(60)
252 master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
253 assert_equal(master,new_master)
254 log.info('Cluster master changed to %s successfully'%new_master)
255
ChetanGaonker689b3862016-10-17 16:25:01 -0700256 def withdraw_cluster_current_mastership(self,master_ip=None,device_id=device_id,controller=None):
257 '''current master looses its mastership and hence new master will be elected'''
258 self.cliEnter(controller=controller)
259 cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
260 command = self.cli.command(cmd = cmd, jsonFormat = False)
261 self.cliExit()
262 time.sleep(60)
263 new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
264 assert_not_equal(new_master_ip,master_ip)
265 log.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
266 log.info('Cluster new master is %s'%new_master_ip)
267 return True
268
ChetanGaonker2099d722016-10-07 15:16:58 -0700269 #pass
ChetanGaonker689b3862016-10-17 16:25:01 -0700270 def test_cluster_formation_and_verification(self,onos_instances = ONOS_INSTANCES):
271 status = self.verify_cluster_status(onos_instances = onos_instances)
272 assert_equal(status, True)
273 log.info('Cluster exists with %d ONOS instances'%onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -0700274
275 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700276 def test_cluster_adding_members(self, add = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700277 status = self.verify_cluster_status(onos_instances = onos_instances)
278 assert_equal(status, True)
279 onos_ips = self.get_cluster_current_member_ips()
280 onos_instances = len(onos_ips)+add
281 log.info('Adding %d nodes to the ONOS cluster' %add)
282 cord_test_onos_add_cluster(count = add)
283 status = self.verify_cluster_status(onos_instances=onos_instances)
284 assert_equal(status, True)
285
ChetanGaonker689b3862016-10-17 16:25:01 -0700286 def test_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700287 status = self.verify_cluster_status(onos_instances = onos_instances)
288 assert_equal(status, True)
289 master, standbys = self.get_cluster_current_master_standbys()
290 assert_equal(len(standbys),(onos_instances-1))
291 onos_names_ips = self.get_cluster_container_names_ips()
292 master_onos_name = onos_names_ips[master]
293 log.info('Removing cluster current master %s'%(master))
294 cord_test_onos_shutdown(node = master_onos_name)
295 time.sleep(60)
296 onos_instances -= 1
297 status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0])
298 assert_equal(status, True)
299 new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
300 assert_not_equal(master,new_master)
ChetanGaonker689b3862016-10-17 16:25:01 -0700301 log.info('Successfully removed clusters master instance')
ChetanGaonker2099d722016-10-07 15:16:58 -0700302
ChetanGaonker689b3862016-10-17 16:25:01 -0700303 def test_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700304 status = self.verify_cluster_status(onos_instances = onos_instances)
305 assert_equal(status, True)
306 master, standbys = self.get_cluster_current_master_standbys()
307 assert_equal(len(standbys),(onos_instances-1))
308 onos_names_ips = self.get_cluster_container_names_ips()
309 member_onos_name = onos_names_ips[standbys[0]]
310 log.info('Removing cluster member %s'%standbys[0])
311 cord_test_onos_shutdown(node = member_onos_name)
312 time.sleep(60)
313 onos_instances -= 1
314 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
315 assert_equal(status, True)
316
ChetanGaonker689b3862016-10-17 16:25:01 -0700317 def test_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700318 status = self.verify_cluster_status(onos_instances = onos_instances)
319 assert_equal(status, True)
320 master, standbys = self.get_cluster_current_master_standbys()
321 assert_equal(len(standbys),(onos_instances-1))
322 onos_names_ips = self.get_cluster_container_names_ips()
323 member1_onos_name = onos_names_ips[standbys[0]]
324 member2_onos_name = onos_names_ips[standbys[1]]
325 log.info('Removing cluster member %s'%standbys[0])
326 cord_test_onos_shutdown(node = member1_onos_name)
327 log.info('Removing cluster member %s'%standbys[1])
328 cord_test_onos_shutdown(node = member2_onos_name)
329 time.sleep(60)
330 onos_instances = onos_instances - 2
331 status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
332 assert_equal(status, True)
333
ChetanGaonker689b3862016-10-17 16:25:01 -0700334 def test_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700335 status = self.verify_cluster_status(onos_instances = onos_instances)
336 assert_equal(status, True)
337 master, standbys = self.get_cluster_current_master_standbys()
338 assert_equal(len(standbys),(onos_instances-1))
339 onos_names_ips = self.get_cluster_container_names_ips()
340 for i in range(remove):
341 member_onos_name = onos_names_ips[standbys[i]]
342 log.info('Removing onos container with name %s'%standbys[i])
343 cord_test_onos_shutdown(node = member_onos_name)
344 time.sleep(60)
345 onos_instances = onos_instances - remove
346 status = self.verify_cluster_status(onos_instances = onos_instances, controller=master)
347 assert_equal(status, True)
348
349 #nottest test cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700350 def test_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES , add = 2, remove = 2):
ChetanGaonker2099d722016-10-07 15:16:58 -0700351 status = self.verify_cluster_status(onos_instances = onos_instances)
352 assert_equal(status, True)
353 onos_ips = self.get_cluster_current_member_ips()
354 onos_instances = len(onos_ips)+add
355 log.info('Adding %d ONOS instances to the cluster'%add)
356 cord_test_onos_add_cluster(count = add)
357 status = self.verify_cluster_status(onos_instances=onos_instances)
358 assert_equal(status, True)
359 log.info('Removing %d ONOS instances from the cluster'%remove)
360 for i in range(remove):
361 name = '{}-{}'.format(Onos.NAME, onos_instances - i)
362 log.info('Removing onos container with name %s'%name)
363 cord_test_onos_shutdown(node = name)
364 time.sleep(60)
365 onos_instances = onos_instances-remove
366 status = self.verify_cluster_status(onos_instances=onos_instances)
367 assert_equal(status, True)
368
369 #nottest cluster not coming up properly if member goes down
ChetanGaonker689b3862016-10-17 16:25:01 -0700370 def test_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
ChetanGaonker2099d722016-10-07 15:16:58 -0700371 status = self.verify_cluster_status(onos_instances = onos_instances)
372 assert_equal(status, True)
373 onos_ips = self.get_cluster_current_member_ips()
374 onos_instances = onos_instances-remove
375 log.info('Removing %d ONOS instances from the cluster'%remove)
376 for i in range(remove):
377 name = '{}-{}'.format(Onos.NAME, len(onos_ips)-i)
378 log.info('Removing onos container with name %s'%name)
379 cord_test_onos_shutdown(node = name)
380 time.sleep(60)
381 status = self.verify_cluster_status(onos_instances=onos_instances)
382 assert_equal(status, True)
383 log.info('Adding %d ONOS instances to the cluster'%add)
384 cord_test_onos_add_cluster(count = add)
385 onos_instances = onos_instances+add
386 status = self.verify_cluster_status(onos_instances=onos_instances)
387 assert_equal(status, True)
388
ChetanGaonker689b3862016-10-17 16:25:01 -0700389 def test_cluster_restart(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700390 status = self.verify_cluster_status(onos_instances = onos_instances)
391 assert_equal(status, True)
392 log.info('Restarting cluster')
393 cord_test_onos_restart()
394 status = self.verify_cluster_status(onos_instances = onos_instances)
395 assert_equal(status, True)
396
ChetanGaonker689b3862016-10-17 16:25:01 -0700397 def test_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700398 status = self.verify_cluster_status(onos_instances = onos_instances)
399 assert_equal(status, True)
400 master, standbys = self.get_cluster_current_master_standbys()
401 onos_names_ips = self.get_cluster_container_names_ips()
402 master_onos_name = onos_names_ips[master]
403 log.info('Restarting cluster master %s'%master)
404 cord_test_onos_restart(node = master_onos_name)
405 status = self.verify_cluster_status(onos_instances = onos_instances)
406 assert_equal(status, True)
407 log.info('Cluster came up after master restart as expected')
408
409 #test fail. master changing after restart. Need to check correct behavior.
ChetanGaonker689b3862016-10-17 16:25:01 -0700410 def test_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700411 status = self.verify_cluster_status(onos_instances = onos_instances)
412 assert_equal(status, True)
413 master1, standbys = self.get_cluster_current_master_standbys()
414 onos_names_ips = self.get_cluster_container_names_ips()
415 master_onos_name = onos_names_ips[master1]
416 log.info('Restarting cluster master %s'%master)
417 cord_test_onos_restart(node = master_onos_name)
418 status = self.verify_cluster_status(onos_instances = onos_instances)
419 assert_equal(status, True)
420 master2, standbys = self.get_cluster_current_master_standbys()
421 assert_equal(master1,master2)
422 log.info('Cluster master is same before and after cluster master restart as expected')
423
ChetanGaonker689b3862016-10-17 16:25:01 -0700424 def test_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700425 status = self.verify_cluster_status(onos_instances = onos_instances)
426 assert_equal(status, True)
427 master, standbys = self.get_cluster_current_master_standbys()
428 assert_equal(len(standbys),(onos_instances-1))
429 onos_names_ips = self.get_cluster_container_names_ips()
430 member_onos_name = onos_names_ips[standbys[0]]
431 log.info('Restarting cluster member %s'%standbys[0])
432 cord_test_onos_restart(node = member_onos_name)
433 status = self.verify_cluster_status(onos_instances = onos_instances)
434 assert_equal(status, True)
435 log.info('Cluster came up as expected after restarting one member')
436
ChetanGaonker689b3862016-10-17 16:25:01 -0700437 def test_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700438 status = self.verify_cluster_status(onos_instances = onos_instances)
439 assert_equal(status, True)
440 master, standbys = self.get_cluster_current_master_standbys()
441 assert_equal(len(standbys),(onos_instances-1))
442 onos_names_ips = self.get_cluster_container_names_ips()
443 member1_onos_name = onos_names_ips[standbys[0]]
444 member2_onos_name = onos_names_ips[standbys[1]]
445 log.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1]))
446 cord_test_onos_restart(node = member1_onos_name)
447 cord_test_onos_restart(node = member2_onos_name)
448 status = self.verify_cluster_status(onos_instances = onos_instances)
449 assert_equal(status, True)
450 log.info('Cluster came up as expected after restarting two members')
451
ChetanGaonker689b3862016-10-17 16:25:01 -0700452 def test_cluster_state_with_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700453 status = self.verify_cluster_status(onos_instances = onos_instances)
454 assert_equal(status,True)
455 master, standbys = self.get_cluster_current_master_standbys()
456 assert_equal(len(standbys),(onos_instances-1))
457 onos_names_ips = self.get_cluster_container_names_ips()
458 for i in range(members):
459 member_onos_name = onos_names_ips[standbys[i]]
460 log.info('Restarting cluster member %s'%standbys[i])
461 cord_test_onos_restart(node = member_onos_name)
462
463 status = self.verify_cluster_status(onos_instances = onos_instances)
464 assert_equal(status, True)
465 log.info('Cluster came up as expected after restarting %d members'%members)
466
ChetanGaonker689b3862016-10-17 16:25:01 -0700467 def test_cluster_state_with_master_change(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700468 status = self.verify_cluster_status(onos_instances=onos_instances)
469 assert_equal(status, True)
470 master, standbys = self.get_cluster_current_master_standbys()
471 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -0700472 log.info('Cluster current master of devices is %s'%master)
ChetanGaonker2099d722016-10-07 15:16:58 -0700473 self.change_master_current_cluster(new_master=standbys[0])
474 log.info('Cluster master changed successfully')
475
476 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700477 def test_cluster_with_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700478 status = self.verify_cluster_status(onos_instances = onos_instances)
479 assert_equal(status, True)
480 onos_ips = self.get_cluster_current_member_ips()
481 self.vrouter.setUpClass()
482 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
483 assert_equal(res, True)
484 for onos_ip in onos_ips:
485 tries = 0
486 flag = False
487 try:
488 self.cliEnter(controller = onos_ip)
489 while tries <= 5:
490 routes = json.loads(self.cli.routes(jsonFormat = True))
491 if routes:
492 assert_equal(len(routes['routes4']), networks)
493 self.cliExit()
494 flag = True
495 break
496 else:
497 tries += 1
498 time.sleep(1)
499 assert_equal(flag, True)
500 except:
501 log.info('Exception occured while checking routes in onos instance %s'%onos_ip)
502 raise
503
504 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700505 def test_cluster_with_vrouter_and_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700506 status = self.verify_cluster_status(onos_instances = onos_instances)
507 assert_equal(status, True)
508 onos_ips = self.get_cluster_current_member_ips()
509 master, standbys = self.get_cluster_current_master_standbys()
510 onos_names_ips = self.get_cluster_container_names_ips()
511 master_onos_name = onos_names_ips[master]
512 self.vrouter.setUpClass()
513 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
514 assert_equal(res,True)
515 cord_test_onos_shutdown(node = master_onos_name)
516 time.sleep(60)
ChetanGaonker689b3862016-10-17 16:25:01 -0700517 log.info('Verifying vrouter traffic after cluster master is down')
ChetanGaonker2099d722016-10-07 15:16:58 -0700518 self.vrouter.vrouter_traffic_verify()
519
520 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700521 def test_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700522 status = self.verify_cluster_status(onos_instances = onos_instances)
523 assert_equal(status, True)
524 onos_ips = self.get_cluster_current_member_ips()
525 master, standbys = self.get_cluster_current_master_standbys()
526 onos_names_ips = self.get_cluster_container_names_ips()
527 master_onos_name = onos_names_ips[master]
528 self.vrouter.setUpClass()
529 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
530 assert_equal(res, True)
531 cord_test_onos_restart()
532 self.vrouter.vrouter_traffic_verify()
533
534 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700535 def test_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700536 status = self.verify_cluster_status(onos_instances = onos_instances)
537 assert_equal(status, True)
538 self.vrouter.setUpClass()
539 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
540 assert_equal(res, True)
541 self.vrouter.vrouter_activate(deactivate=True)
542 time.sleep(15)
543 self.vrouter.vrouter_traffic_verify(positive_test=False)
544 self.vrouter.vrouter_activate(deactivate=False)
545
546 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700547 def test_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700548 status = self.verify_cluster_status(onos_instances = onos_instances)
549 assert_equal(status, True)
550 master, standbys = self.get_cluster_current_master_standbys()
551 onos_names_ips = self.get_cluster_container_names_ips()
552 master_onos_name = onos_names_ips[master]
553 self.vrouter.setUpClass()
554 log.info('Verifying vrouter before master down')
555 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
556 assert_equal(res, True)
557 self.vrouter.vrouter_activate(deactivate=True)
558 log.info('Verifying vrouter traffic after app deactivated')
559 time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down
560 self.vrouter.vrouter_traffic_verify(positive_test=False)
561 log.info('Verifying vrouter traffic after master down')
562 cord_test_onos_shutdown(node = master_onos_name)
563 time.sleep(60)
564 self.vrouter.vrouter_traffic_verify(positive_test=False)
565 self.vrouter.vrouter_activate(deactivate=False)
566
567 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700568 def test_cluster_for_vrouter_app_and_making_member_down(self, networks = 5,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700569 status = self.verify_cluster_status(onos_instances = onos_instances)
570 assert_equal(status, True)
571 master, standbys = self.get_cluster_current_master_standbys()
572 onos_names_ips = self.get_cluster_container_names_ips()
573 member_onos_name = onos_names_ips[standbys[0]]
574 self.vrouter.setUpClass()
575 log.info('Verifying vrouter before cluster member down')
576 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
577 assert_equal(res, True) # Expecting vrouter should work properly
578 log.info('Verifying vrouter after cluster member down')
579 cord_test_onos_shutdown(node = member_onos_name)
580 time.sleep(60)
581 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
582
583 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700584 def test_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700585 status = self.verify_cluster_status(onos_instances = onos_instances)
586 assert_equal(status, True)
587 master, standbys = self.get_cluster_current_master_standbys()
588 onos_names_ips = self.get_cluster_container_names_ips()
589 member_onos_name = onos_names_ips[standbys[1]]
590 self.vrouter.setUpClass()
591 log.info('Verifying vrouter traffic before cluster member restart')
592 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
593 assert_equal(res, True) # Expecting vrouter should work properly
594 cord_test_onos_restart(node = member_onos_name)
595 log.info('Verifying vrouter traffic after cluster member restart')
596 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
597
598 #tested on single onos setup.
ChetanGaonker689b3862016-10-17 16:25:01 -0700599 def test_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700600 status = self.verify_cluster_status(onos_instances = onos_instances)
601 assert_equal(status, True)
602 self.vrouter.setUpClass()
603 log.info('Verifying vrouter traffic before cluster restart')
604 res = self.vrouter.vrouter_network_verify(networks, peers = 1)
605 assert_equal(res, True) # Expecting vrouter should work properly
606 cord_test_onos_restart()
607 log.info('Verifying vrouter traffic after cluster restart')
608 self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
609
610
611 #test fails because flow state is in pending_add in onos
ChetanGaonker689b3862016-10-17 16:25:01 -0700612 def test_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700613 status = self.verify_cluster_status(onos_instances = onos_instances)
614 assert_equal(status, True)
615 master, standbys = self.get_cluster_current_master_standbys()
616 onos_names_ips = self.get_cluster_container_names_ips()
617 master_onos_name = onos_names_ips[master]
618 self.flows.setUpClass()
619 egress = 1
620 ingress = 2
621 egress_map = { 'ip': '192.168.30.1', 'udp_port': 9500 }
622 ingress_map = { 'ip': '192.168.40.1', 'udp_port': 9000 }
623 flow = OnosFlowCtrl(deviceId = self.device_id,
624 egressPort = egress,
625 ingressPort = ingress,
626 udpSrc = ingress_map['udp_port'],
627 udpDst = egress_map['udp_port'],
628 controller=master
629 )
630 result = flow.addFlow()
631 assert_equal(result, True)
632 time.sleep(1)
633 self.success = False
634 def mac_recv_task():
635 def recv_cb(pkt):
636 log.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
637 self.success = True
638 sniff(timeout=2,
639 lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
640 and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.flows.port_map[egress])
641
642 for i in [0,1]:
643 if i == 1:
644 cord_test_onos_shutdown(node = master_onos_name)
645 log.info('Verifying flows traffic after master killed')
646 time.sleep(45)
647 else:
648 log.info('Verifying flows traffic before master killed')
649 t = threading.Thread(target = mac_recv_task)
650 t.start()
651 L2 = self.flows_eth #Ether(src = ingress_map['ether'], dst = egress_map['ether'])
652 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
653 L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
654 pkt = L2/L3/L4
655 log.info('Sending packets to verify if flows are correct')
656 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
657 t.join()
658 assert_equal(self.success, True)
659
ChetanGaonker689b3862016-10-17 16:25:01 -0700660 def test_cluster_state_changing_master_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700661 status = self.verify_cluster_status(onos_instances=onos_instances)
662 assert_equal(status, True)
663 master, standbys = self.get_cluster_current_master_standbys()
664 self.flows.setUpClass()
665 egress = 1
666 ingress = 2
667 egress_map = { 'ip': '192.168.30.1' }
668 ingress_map = { 'ip': '192.168.40.1' }
669 flow = OnosFlowCtrl(deviceId = self.device_id,
670 egressPort = egress,
671 ingressPort = ingress,
672 ecn = 1,
673 controller=master
674 )
675 result = flow.addFlow()
676 assert_equal(result, True)
677 ##wait for flows to be added to ONOS
678 time.sleep(1)
679 self.success = False
680 def mac_recv_task():
681 def recv_cb(pkt):
682 log.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
683 self.success = True
684 sniff(count=2, timeout=5,
685 lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
686 and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb,
687 iface = self.flows.port_map[egress])
688 for i in [0,1]:
689 if i == 1:
690 log.info('Changing cluster master to %s'%standbys[0])
691 self.change_master_current_cluster(new_master=standbys[0])
692 log.info('Verifying flow traffic after cluster master chnaged')
693 else:
694 log.info('Verifying flow traffic before cluster master changed')
695 t = threading.Thread(target = mac_recv_task)
696 t.start()
697 L2 = self.flows_eth # Ether(src = ingress_map['ether'], dst = egress_map['ether'])
698 L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1)
699 pkt = L2/L3
700 log.info('Sending a packet to verify if flows are correct')
701 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
702 t.join()
703 assert_equal(self.success, True)
704
ChetanGaonker689b3862016-10-17 16:25:01 -0700705 #pass
706 def test_cluster_flow_for_ipv6_extension_header_and_master_restart(self,onos_instances = ONOS_INSTANCES):
707 status = self.verify_cluster_status(onos_instances=onos_instances)
708 assert_equal(status, True)
709 master,standbys = self.get_cluster_current_master_standbys()
710 onos_names_ips = self.get_cluster_container_names_ips()
711 master_onos_name = onos_names_ips[master]
712 self.flows.setUpClass()
713 egress = 1
714 ingress = 2
715 egress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
716 ingress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
717 flow = OnosFlowCtrl(deviceId = self.device_id,
718 egressPort = egress,
719 ingressPort = ingress,
720 ipv6_extension = 0,
721 controller=master
722 )
723
724 result = flow.addFlow()
725 assert_equal(result, True)
726 ##wait for flows to be added to ONOS
727 time.sleep(1)
728 self.success = False
729 def mac_recv_task():
730 def recv_cb(pkt):
731 log.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s'%(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
732 self.success = True
733 sniff(timeout=2,count=5,
734 lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.flows.port_map[egress])
735 for i in [0,1]:
736 if i == 1:
737 log.info('Restart cluster current master %s'%master)
738 Container(master_onos_name,Onos.IMAGE).restart()
739 time.sleep(45)
740 log.info('Verifying flow traffic after master restart')
741 else:
742 log.info('Verifying flow traffic before master restart')
743 t = threading.Thread(target = mac_recv_task)
744 t.start()
745 L2 = self.flows_eth
746 L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
747 pkt = L2/L3
748 log.info('Sending packets to verify if flows are correct')
749 sendp(pkt, count=50, iface = self.flows.port_map[ingress])
750 t.join()
751 assert_equal(self.success, True)
752
753 def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
754 dst_mac = self.igmp.iptomac(group)
755 eth = Ether(dst= dst_mac)
756 ip = IP(dst=group,src=source)
757 data = repr(monotonic.monotonic())
758 sendp(eth/ip/data,count=20, iface = intf)
759 pkt = (eth/ip/data)
760 log.info('multicast traffic packet %s'%pkt.show())
761
762 def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
763 log.info('verifying multicast traffic for group %s from source %s'%(group,source))
764 self.success = False
765 def recv_task():
766 def igmp_recv_cb(pkt):
767 log.info('multicast data received for group %s from source %s'%(group,source))
768 self.success = True
769 sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
770 t = threading.Thread(target = recv_task)
771 t.start()
772 self.send_multicast_data_traffic(group,source=source)
773 t.join()
774 return self.success
775
776 #pass
777 def test_cluster_with_igmp_include_exclude_modes_and_restarting_master(self, onos_instances=ONOS_INSTANCES):
778 status = self.verify_cluster_status(onos_instances=onos_instances)
779 assert_equal(status, True)
780 master, standbys = self.get_cluster_current_master_standbys()
781 assert_equal(len(standbys), (onos_instances-1))
782 onos_names_ips = self.get_cluster_container_names_ips()
783 master_onos_name = onos_names_ips[master]
784 self.igmp.setUp(controller=master)
785 groups = ['224.2.3.4','230.5.6.7']
786 src_list = ['2.2.2.2','3.3.3.3']
787 self.igmp.onos_ssm_table_load(groups, src_list=src_list, controller=master)
788 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
789 iface = self.V_INF1, delay = 2)
790 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
791 iface = self.V_INF1, delay = 2)
792 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
793 assert_equal(status,True)
794 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
795 assert_equal(status,False)
796 log.info('restarting cluster master %s'%master)
797 Container(master_onos_name,Onos.IMAGE).restart()
798 time.sleep(60)
799 log.info('verifying multicast data traffic after master restart')
800 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
801 assert_equal(status,True)
802 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
803 assert_equal(status,False)
804
805 #pass
806 def test_cluster_with_igmp_include_exclude_modes_and_making_master_down(self, onos_instances=ONOS_INSTANCES):
807 status = self.verify_cluster_status(onos_instances=onos_instances)
808 assert_equal(status, True)
809 master, standbys = self.get_cluster_current_master_standbys()
810 assert_equal(len(standbys), (onos_instances-1))
811 onos_names_ips = self.get_cluster_container_names_ips()
812 master_onos_name = onos_names_ips[master]
813 self.igmp.setUp(controller=master)
814 groups = [self.igmp.random_mcast_ip(),self.igmp.random_mcast_ip()]
815 src_list = [self.igmp.randomsourceip()]
816 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
817 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
818 iface = self.V_INF1, delay = 2)
819 self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
820 iface = self.V_INF1, delay = 2)
821 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
822 assert_equal(status,True)
823 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
824 assert_equal(status,False)
825 log.info('Killing cluster master %s'%master)
826 Container(master_onos_name,Onos.IMAGE).kill()
827 time.sleep(60)
828 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=standbys[0])
829 assert_equal(status, True)
830 log.info('Verifying multicast data traffic after cluster master down')
831 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
832 assert_equal(status,True)
833 status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
834 assert_equal(status,False)
835
836 def test_cluster_with_igmp_include_mode_checking_traffic_recovery_time_after_master_is_down(self, onos_instances=ONOS_INSTANCES):
837 status = self.verify_cluster_status(onos_instances=onos_instances)
838 assert_equal(status, True)
839 master, standbys = self.get_cluster_current_master_standbys()
840 assert_equal(len(standbys), (onos_instances-1))
841 onos_names_ips = self.get_cluster_container_names_ips()
842 master_onos_name = onos_names_ips[master]
843 self.igmp.setUp(controller=master)
844 groups = [self.igmp.random_mcast_ip()]
845 src_list = [self.igmp.randomsourceip()]
846 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
847 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
848 iface = self.V_INF1, delay = 2)
849 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
850 assert_equal(status,True)
851 log.info('Killing clusters master %s'%master)
852 Container(master_onos_name,Onos.IMAGE).kill()
853 count = 0
854 for i in range(60):
855 log.info('Verifying multicast data traffic after cluster master down')
856 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
857 if status:
858 break
859 else:
860 count += 1
861 time.sleep(1)
862 assert_equal(status, True)
863 log.info('Time taken to recover traffic after clusters master down is %d seconds'%count)
864
865
866 #pass
867 def test_cluster_state_with_igmp_leave_group_after_master_change(self, onos_instances=ONOS_INSTANCES):
868 status = self.verify_cluster_status(onos_instances=onos_instances)
869 assert_equal(status, True)
870 master, standbys = self.get_cluster_current_master_standbys()
871 assert_equal(len(standbys), (onos_instances-1))
872 self.igmp.setUp(controller=master)
873 groups = [self.igmp.random_mcast_ip()]
874 src_list = [self.igmp.randomsourceip()]
875 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
876 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
877 iface = self.V_INF1, delay = 2)
878 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
879 assert_equal(status,True)
880 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
881 self.change_cluster_current_master(new_master=standbys[0])
882 log.info('Verifying multicast traffic after cluster master change')
883 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
884 assert_equal(status,True)
885 log.info('Sending igmp TO_EXCLUDE message to leave the group %s'%groups[0])
886 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
887 iface = self.V_INF1, delay = 1)
888 time.sleep(10)
889 status = self.verify_igmp_data_traffic(groups[0],intf = self.V_INF1,source= src_list[0])
890 assert_equal(status,False)
891
892 #pass
893 def test_cluster_state_with_igmp_join_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
894 status = self.verify_cluster_status(onos_instances=onos_instances)
895 assert_equal(status, True)
896 master,standbys = self.get_cluster_current_master_standbys()
897 assert_equal(len(standbys), (onos_instances-1))
898 self.igmp.setUp(controller=master)
899 groups = [self.igmp.random_mcast_ip()]
900 src_list = [self.igmp.randomsourceip()]
901 self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
902 log.info('Changing cluster master %s to %s'%(master,standbys[0]))
903 self.change_cluster_current_master(new_master = standbys[0])
904 self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
905 iface = self.V_INF1, delay = 2)
906 time.sleep(1)
907 self.change_cluster_current_master(new_master = master)
908 status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
909 assert_equal(status,True)
910
911 #pass
ChetanGaonker2099d722016-10-07 15:16:58 -0700912 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -0700913 def test_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700914 status = self.verify_cluster_status(onos_instances=onos_instances)
915 assert_equal(status, True)
916 master, standbys = self.get_cluster_current_master_standbys()
917 assert_equal(len(standbys), (onos_instances-1))
918 self.tls.setUp(controller=master)
919 df = defer.Deferred()
920 def eap_tls_verify(df):
921 tls = TLSAuthTest()
922 tls.runTest()
923 df.callback(0)
924 reactor.callLater(0, eap_tls_verify, df)
925 return df
926
927 @deferred(120)
ChetanGaonker689b3862016-10-17 16:25:01 -0700928 def test_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700929 master, standbys = self.get_cluster_current_master_standbys()
930 assert_equal(len(standbys), (onos_instances-1))
931 self.tls.setUp()
932 df = defer.Deferred()
933 def eap_tls_verify2(df2):
934 tls = TLSAuthTest()
935 tls.runTest()
936 df.callback(0)
937 for i in [0,1]:
938 if i == 1:
939 log.info('Changing cluster master %s to %s'%(master, standbys[0]))
940 self.change_master_current_cluster(new_master=standbys[0])
941 log.info('Verifying tls authentication after cluster master changed to %s'%standbys[0])
942 else:
943 log.info('Verifying tls authentication before cluster master change')
944 reactor.callLater(0, eap_tls_verify, df)
945 return df
946
947 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -0700948 def test_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700949 status = self.verify_cluster_status(onos_instances=onos_instances)
950 assert_equal(status, True)
951 master, standbys = self.get_cluster_current_master_standbys()
952 assert_equal(len(standbys), (onos_instances-1))
953 onos_names_ips = self.get_cluster_container_names_ips()
954 master_onos_name = onos_names_ips[master]
955 self.tls.setUp()
956 df = defer.Deferred()
957 def eap_tls_verify(df):
958 tls = TLSAuthTest()
959 tls.runTest()
960 df.callback(0)
961 for i in [0,1]:
962 if i == 1:
963 log.info('Killing cluster current master %s'%master)
964 cord_test_onos_shutdown(node = master_onos_name)
965 time.sleep(20)
966 status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True)
967 assert_equal(status, True)
968 log.info('Cluster came up with %d instances after killing master'%(onos_instances-1))
969 log.info('Verifying tls authentication after killing cluster master')
970 reactor.callLater(0, eap_tls_verify, df)
971 return df
972
973 @deferred(TLS_TIMEOUT)
ChetanGaonker689b3862016-10-17 16:25:01 -0700974 def test_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -0700975 status = self.verify_cluster_status(onos_instances=onos_instances)
976 assert_equal(status, True)
977 master, standbys = self.get_cluster_current_master_standbys()
978 assert_equal(len(standbys), (onos_instances-1))
979 onos_names_ips = self.get_cluster_container_names_ips()
980 member_onos_name = onos_names_ips[standbys[0]]
981 self.tls.setUp()
982 df = defer.Deferred()
983 def eap_tls_no_cert(df):
984 def tls_no_cert_cb():
985 log.info('TLS authentication failed with no certificate')
986 tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
987 tls.runTest()
988 assert_equal(tls.failTest, True)
989 df.callback(0)
990 for i in [0,1]:
991 if i == 1:
992 log.info('Restart cluster member %s'%standbys[0])
993 Container(member_onos_name,Onos.IMAGE).restart()
994 time.sleep(20)
995 status = self.verify_cluster_status(onos_instances=onos_instances)
996 assert_equal(status, True)
997 log.info('Cluster came up with %d instances after member restart'%(onos_instances))
998 log.info('Verifying tls authentication after member restart')
999 reactor.callLater(0, eap_tls_no_cert, df)
1000 return df
1001
ChetanGaonker689b3862016-10-17 16:25:01 -07001002 #pass
1003 def test_cluster_proxyarp_master_change_and_app_deactivation(self,onos_instances=ONOS_INSTANCES,hosts = 3):
1004 status = self.verify_cluster_status(onos_instances=onos_instances)
1005 assert_equal(status,True)
1006 master,standbys = self.get_cluster_current_master_standbys()
1007 assert_equal(len(standbys),(onos_instances-1))
1008 self.proxyarp.setUpClass()
1009 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1010 ingress = hosts+1
1011 for hostip, hostmac in hosts_config:
1012 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1013 time.sleep(1)
1014 log.info('changing cluster current master from %s to %s'%(master,standbys[0]))
1015 self.change_cluster_current_master(new_master=standbys[0])
1016 log.info('verifying proxyarp after master change')
1017 for hostip, hostmac in hosts_config:
1018 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1019 time.sleep(1)
1020 log.info('Deactivating proxyarp app and expecting proxyarp functionality not to work')
1021 self.proxyarp.proxyarp_activate(deactivate = True,controller=standbys[0])
1022 time.sleep(3)
1023 for hostip, hostmac in hosts_config:
1024 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
1025 time.sleep(1)
1026 log.info('activating proxyarp app and expecting to get arp reply from ONOS')
1027 self.proxyarp.proxyarp_activate(deactivate = False,controller=standbys[0])
1028 time.sleep(3)
1029 for hostip, hostmac in hosts_config:
1030 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1031 time.sleep(1)
ChetanGaonker2099d722016-10-07 15:16:58 -07001032
ChetanGaonker689b3862016-10-17 16:25:01 -07001033 #pass
1034 def test_cluster_with_proxyarp_and_one_member_down(self,hosts=3,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001035 status = self.verify_cluster_status(onos_instances=onos_instances)
1036 assert_equal(status, True)
1037 master, standbys = self.get_cluster_current_master_standbys()
ChetanGaonker689b3862016-10-17 16:25:01 -07001038 assert_equal(len(standbys), (onos_instances-1))
1039 onos_names_ips = self.get_cluster_container_names_ips()
1040 member_onos_name = onos_names_ips[standbys[1]]
1041 self.proxyarp.setUpClass()
1042 ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
1043 ingress = hosts+1
1044 for hostip, hostmac in hosts_config:
1045 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1046 time.sleep(1)
1047 log.info('killing cluster member %s'%standbys[1])
1048 Container(member_onos_name,Onos.IMAGE).kill()
1049 time.sleep(20)
1050 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=master,verify=True)
1051 assert_equal(status, True)
1052 log.info('cluster came up with %d instances after member down'%(onos_instances-1))
1053 log.info('verifying proxy arp functionality after cluster member down')
1054 for hostip, hostmac in hosts_config:
1055 self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
1056 time.sleep(1)
1057
1058 #pass
1059 def test_cluster_with_proxyarp_and_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts=10,onos_instances=ONOS_INSTANCES):
1060 status = self.verify_cluster_status(onos_instances=onos_instances)
1061 assert_equal(status, True)
1062 self.proxyarp.setUpClass()
1063 master, standbys = self.get_cluster_current_master_standbys()
1064 assert_equal(len(standbys), (onos_instances-1))
1065 ports_map, egress_map, hosts_config = self.proxyarp.proxyarp_config(hosts = hosts, controller=master)
1066 self.success = True
1067 ingress = hosts+1
1068 ports = range(ingress,ingress+10)
1069 hostmac = []
1070 hostip = []
1071 for ip,mac in hosts_config:
1072 hostmac.append(mac)
1073 hostip.append(ip)
1074 success_dir = {}
1075 def verify_proxyarp(*r):
1076 ingress, hostmac, hostip = r[0],r[1],r[2]
1077 def mac_recv_task():
1078 def recv_cb(pkt):
1079 log.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
1080 success_dir[current_thread().name] = True
1081 sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
1082 prn = recv_cb, iface = self.proxyarp.port_map[ingress])
1083 t = threading.Thread(target = mac_recv_task)
1084 t.start()
1085 pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
1086 log.info('Sending arp request for dest ip %s on interface %s' %
1087 (hostip,self.proxyarp.port_map[ingress]))
1088 sendp(pkt, count = 10,iface = self.proxyarp.port_map[ingress])
1089 t.join()
1090 t = []
1091 for i in range(10):
1092 t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
1093 for i in range(10):
1094 t[i].start()
1095 time.sleep(2)
1096 for i in range(10):
1097 t[i].join()
1098 if len(success_dir) != 10:
1099 self.success = False
1100 assert_equal(self.success, True)
1101
1102 #pass
1103 def test_cluster_with_acl_rule_before_master_change_and_remove_acl_rule_after_master_change(self,onos_instances=ONOS_INSTANCES):
1104 status = self.verify_cluster_status(onos_instances=onos_instances)
1105 assert_equal(status, True)
1106 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001107 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001108 self.acl.setUp()
1109 acl_rule = ACLTest()
1110 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1111 if status is False:
1112 log.info('JSON request returned status %d' %code)
1113 assert_equal(status, True)
1114 result = acl_rule.get_acl_rules(controller=master)
1115 aclRules1 = result.json()['aclRules']
1116 log.info('Added acl rules is %s'%aclRules1)
1117 acl_Id = map(lambda d: d['id'], aclRules1)
1118 log.info('Changing cluster current master from %s to %s'%(master,standbys[0]))
1119 self.change_cluster_current_master(new_master=standbys[0])
1120 status,code = acl_rule.remove_acl_rule(acl_Id[0],controller=standbys[0])
1121 if status is False:
1122 log.info('JSON request returned status %d' %code)
1123 assert_equal(status, True)
1124
1125 #pass
1126 def test_cluster_verifying_acl_rule_in_new_master_after_current_master_is_down(self,onos_instances=ONOS_INSTANCES):
1127 status = self.verify_cluster_status(onos_instances=onos_instances)
1128 assert_equal(status, True)
1129 master,standbys = self.get_cluster_current_master_standbys()
1130 assert_equal(len(standbys),(onos_instances-1))
1131 onos_names_ips = self.get_cluster_container_names_ips()
1132 master_onos_name = onos_names_ips[master]
1133 self.acl.setUp()
1134 acl_rule = ACLTest()
1135 status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
1136 if status is False:
1137 log.info('JSON request returned status %d' %code)
1138 assert_equal(status, True)
1139 result1 = acl_rule.get_acl_rules(controller=master)
1140 aclRules1 = result1.json()['aclRules']
1141 log.info('Added acl rules is %s'%aclRules1)
1142 acl_Id1 = map(lambda d: d['id'], aclRules1)
1143 log.info('Killing cluster current master %s'%master)
1144 Container(master_onos_name,Onos.IMAGE).kill()
1145 time.sleep(45)
1146 status = self.verify_cluster_status(onos_instances=onos_instances,controller=standbys[0])
1147 assert_equal(status, True)
1148 new_master,standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
1149 assert_equal(len(standbys),(onos_instances-2))
1150 assert_not_equal(new_master,master)
1151 result2 = acl_rule.get_acl_rules(controller=new_master)
1152 aclRules2 = result2.json()['aclRules']
1153 acl_Id2 = map(lambda d: d['id'], aclRules2)
1154 log.info('Acl Ids before and after master down are %s and %s'%(acl_Id1,acl_Id2))
1155 assert_equal(acl_Id2,acl_Id1)
1156
1157 #acl traffic scenario not working as acl rule is not getting added to onos
1158 def test_cluster_with_acl_traffic_before_and_after_two_members_down(self,onos_instances=ONOS_INSTANCES):
1159 status = self.verify_cluster_status(onos_instances=onos_instances)
1160 assert_equal(status, True)
1161 master,standbys = self.get_cluster_current_master_standbys()
1162 assert_equal(len(standbys),(onos_instances-1))
1163 onos_names_ips = self.get_cluster_container_names_ips()
1164 member1_onos_name = onos_names_ips[standbys[0]]
1165 member2_onos_name = onos_names_ips[standbys[1]]
1166 ingress = self.acl.ingress_iface
1167 egress = self.acl.CURRENT_PORT_NUM
1168 acl_rule = ACLTest()
1169 status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.acl.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.acl.HOST_DST_IP)
1170 self.acl.CURRENT_PORT_NUM += 1
1171 time.sleep(5)
1172 if status is False:
1173 log.info('JSON request returned status %d' %code)
1174 assert_equal(status, True)
1175 srcMac = '00:00:00:00:00:11'
1176 dstMac = host_ip_mac[0][1]
1177 self.acl.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1, egress_iface_num = egress )
1178 status, code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'deny',controller=master)
1179 time.sleep(10)
1180 if status is False:
1181 log.info('JSON request returned status %d' %code)
1182 assert_equal(status, True)
1183 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1184 log.info('killing cluster members %s and %s'%(standbys[0],standbys[1]))
1185 Container(member1_onos_name, Onos.IMAGE).kill()
1186 Container(member2_onos_name, Onos.IMAGE).kill()
1187 time.sleep(40)
1188 status = self.verify_cluster_status(onos_instances=onos_instances-2,verify=True,controller=master)
1189 assert_equal(status, True)
1190 self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
1191 self.acl.acl_hosts_remove(egress_iface_count = 1, egress_iface_num = egress)
1192
1193 #pass
1194 def test_cluster_with_dhcpRelay_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1195 status = self.verify_cluster_status(onos_instances=onos_instances)
1196 assert_equal(status, True)
1197 master,standbys = self.get_cluster_current_master_standbys()
1198 assert_equal(len(standbys),(onos_instances-1))
1199 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001200 mac = self.dhcprelay.get_mac(iface)
1201 self.dhcprelay.host_load(iface)
1202 ##we use the defaults for this test that serves as an example for others
1203 ##You don't need to restart dhcpd server if retaining default config
1204 config = self.dhcprelay.default_config
1205 options = self.dhcprelay.default_options
1206 subnet = self.dhcprelay.default_subnet_config
1207 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1208 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1209 config = config,
1210 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001211 subnet = subnet,
1212 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001213 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
1214 cip, sip = self.dhcprelay.send_recv(mac)
1215 log.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
1216 self.change_master_current_cluster(new_master=standbys[0])
1217 log.info('Releasing ip %s to server %s' %(cip, sip))
1218 assert_equal(self.dhcprelay.dhcp.release(cip), True)
1219 log.info('Triggering DHCP discover again after release')
1220 cip2, sip2 = self.dhcprelay.send_recv(mac)
1221 log.info('Verifying released IP was given back on rediscover')
1222 assert_equal(cip, cip2)
1223 log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
1224 assert_equal(self.dhcprelay.dhcp.release(cip2), True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001225 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001226
ChetanGaonker689b3862016-10-17 16:25:01 -07001227
1228 def test_cluster_with_dhcpRelay_and_verify_dhcp_ip_after_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1229 status = self.verify_cluster_status(onos_instances=onos_instances)
1230 assert_equal(status, True)
1231 master,standbys = self.get_cluster_current_master_standbys()
ChetanGaonker2099d722016-10-07 15:16:58 -07001232 assert_equal(len(standbys),(onos_instances-1))
ChetanGaonker689b3862016-10-17 16:25:01 -07001233 onos_names_ips = self.get_cluster_container_names_ips()
1234 master_onos_name = onos_names_ips[master]
1235 self.dhcprelay.setUpClass(controller=master)
1236 mac = self.dhcprelay.get_mac(iface)
1237 self.dhcprelay.host_load(iface)
1238 ##we use the defaults for this test that serves as an example for others
1239 ##You don't need to restart dhcpd server if retaining default config
1240 config = self.dhcprelay.default_config
1241 options = self.dhcprelay.default_options
1242 subnet = self.dhcprelay.default_subnet_config
1243 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1244 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1245 config = config,
1246 options = options,
1247 subnet = subnet,
1248 controller=master)
1249 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1250 log.info('Initiating dhcp process from client %s'%mac)
1251 cip, sip = self.dhcprelay.send_recv(mac)
1252 log.info('Killing cluster current master %s'%master)
1253 Container(master_onos_name, Onos.IMAGE).kill()
1254 time.sleep(60)
1255 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=standbys[0])
1256 assert_equal(status, True)
1257 mac = self.dhcprelay.dhcp.get_mac(cip)[0]
1258 log.info("Verifying dhcp clients gets same IP after cluster master restarts")
1259 new_cip, new_sip = self.dhcprelay.dhcp.only_request(cip, mac)
1260 assert_equal(new_cip, cip)
1261 self.dhcprelay.tearDownClass(controller=standbys[0])
1262
1263 #pass
1264 def test_cluster_with_dhcpRelay_and_simulate_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
1265 status = self.verify_cluster_status(onos_instances=onos_instances)
1266 assert_equal(status, True)
1267 master,standbys = self.get_cluster_current_master_standbys()
1268 assert_equal(len(standbys),(onos_instances-1))
1269 self.dhcprelay.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001270 macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3']
1271 self.dhcprelay.host_load(iface)
1272 ##we use the defaults for this test that serves as an example for others
1273 ##You don't need to restart dhcpd server if retaining default config
1274 config = self.dhcprelay.default_config
1275 options = self.dhcprelay.default_options
1276 subnet = self.dhcprelay.default_subnet_config
1277 dhcpd_interface_list = self.dhcprelay.relay_interfaces
1278 self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
1279 config = config,
1280 options = options,
ChetanGaonker689b3862016-10-17 16:25:01 -07001281 subnet = subnet,
1282 controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001283 self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
1284 cip1, sip1 = self.dhcprelay.send_recv(macs[0])
1285 assert_not_equal(cip1,None)
1286 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip1,macs[0],master))
1287 log.info('Changing cluster master from %s to %s'%(master, standbys[0]))
1288 self.change_master_current_cluster(new_master=standbys[0])
1289 cip2, sip2 = self.dhcprelay.send_recv(macs[1])
1290 assert_not_equal(cip2,None)
1291 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[1],standbys[0]))
1292 self.change_master_current_cluster(new_master=master)
1293 log.info('Changing cluster master from %s to %s'%(standbys[0],master))
1294 cip3, sip3 = self.dhcprelay.send_recv(macs[2])
1295 assert_not_equal(cip3,None)
1296 log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master))
ChetanGaonker689b3862016-10-17 16:25:01 -07001297 self.dhcprelay.tearDownClass(controller=standbys[0])
ChetanGaonker2099d722016-10-07 15:16:58 -07001298
ChetanGaonker689b3862016-10-17 16:25:01 -07001299 def test_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001300 status = self.verify_cluster_status(onos_instances=onos_instances)
1301 assert_equal(status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001302 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001303 self.subscriber.num_subscribers = 5
1304 self.subscriber.num_channels = 10
1305 for i in [0,1]:
1306 if i == 1:
1307 cord_test_onos_restart()
1308 time.sleep(45)
1309 status = self.verify_cluster_status(onos_instances=onos_instances)
1310 assert_equal(status, True)
1311 log.info('Verifying cord subscriber functionality after cluster restart')
1312 else:
1313 log.info('Verifying cord subscriber functionality before cluster restart')
1314 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1315 num_channels = self.subscriber.num_channels,
1316 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1317 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1318 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1319 self.subscriber.num_channels))
1320 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001321 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001322
ChetanGaonker689b3862016-10-17 16:25:01 -07001323 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1324 def test_cluster_with_cord_subscriber_join_next_channel_before_and_after_cluster_mastership_is_withdrawn(self,onos_instances=ONOS_INSTANCES):
1325 status = self.verify_cluster_status(onos_instances=onos_instances)
1326 assert_equal(status, True)
1327 master,standbys = self.get_cluster_current_master_standbys()
1328 assert_equal(len(standbys),(onos_instances-1))
1329 self.subscriber.setUpClass(controller=master)
1330 self.subscriber.num_subscribers = 5
1331 self.subscriber.num_channels = 10
1332 for i in [0,1]:
1333 if i == 1:
1334 status=self.withdraw_cluster_current_mastership(master_ip=master)
1335 asser_equal(status, True)
1336 master,standbys = self.get_cluster_current_master_standbys()
1337 log.info('verifying cord subscriber functionality after cluster current master withdraw mastership')
1338 else:
1339 log.info('verifying cord subscriber functionality before cluster master withdraw mastership')
1340 test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
1341 num_channels = self.subscriber.num_channels,
1342 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1343 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1344 port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
1345 self.subscriber.num_channels),controller=master)
1346 assert_equal(test_status, True)
1347 self.subscriber.tearDownClass(controller=master)
1348
1349 #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
1350 def test_cluster_with_cord_subscriber_join_recv_traffic_from_10channels_and_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001351 status = self.verify_cluster_status(onos_instances=onos_instances)
1352 assert_equal(status, True)
1353 master, standbys = self.get_cluster_current_master_standbys()
1354 assert_equal(len(standbys),(onos_instances-1))
1355 onos_names_ips = self.get_cluster_container_names_ips()
1356 member_onos_name = onos_names_ips[standbys[0]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001357 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001358 num_subscribers = 1
1359 num_channels = 10
1360 for i in [0,1]:
1361 if i == 1:
1362 cord_test_onos_shutdown(node = member_onos_name)
1363 time.sleep(30)
ChetanGaonker689b3862016-10-17 16:25:01 -07001364 status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001365 assert_equal(status, True)
1366 log.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0])
1367 else:
1368 log.info('Verifying cord subscriber functionality before cluster member %s is down'%standbys[0])
1369 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1370 num_channels = num_channels,
1371 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify,
1372 self.subscriber.igmp_verify, self.subscriber.traffic_verify),
1373 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
ChetanGaonker689b3862016-10-17 16:25:01 -07001374 negative_subscriber_auth = 'all',controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001375 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001376 self.subscriber.tearDownClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001377
ChetanGaonker689b3862016-10-17 16:25:01 -07001378 def test_cluster_with_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
ChetanGaonker2099d722016-10-07 15:16:58 -07001379 status = self.verify_cluster_status(onos_instances=onos_instances)
1380 assert_equal(status, True)
1381 master, standbys = self.get_cluster_current_master_standbys()
1382 assert_equal(len(standbys),(onos_instances-1))
1383 onos_names_ips = self.get_cluster_container_names_ips()
1384 member1_onos_name = onos_names_ips[standbys[0]]
1385 member2_onos_name = onos_names_ips[standbys[1]]
ChetanGaonker689b3862016-10-17 16:25:01 -07001386 self.subscriber.setUpClass(controller=master)
ChetanGaonker2099d722016-10-07 15:16:58 -07001387 num_subscribers = 1
1388 num_channels = 10
1389 for i in [0,1]:
1390 if i == 1:
1391 cord_test_onos_shutdown(node = member1_onos_name)
1392 cord_test_onos_shutdown(node = member2_onos_name)
1393 time.sleep(60)
1394 status = self.verify_cluster_status(onos_instances=onos_instances-2)
1395 assert_equal(status, True)
1396 log.info('Verifying cord subscriber funtionality after cluster two members %s and %s down'%(standbys[0],standbys[1]))
1397 else:
1398 log.info('Verifying cord subscriber funtionality before cluster two members %s and %s down'%(standbys[0],standbys[1]))
1399 test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
1400 num_channels = num_channels,
1401 cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
1402 self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
1403 port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
1404 negative_subscriber_auth = 'all')
1405 assert_equal(test_status, True)
ChetanGaonker689b3862016-10-17 16:25:01 -07001406 self.subscriber.tearDownClass(controller=master)
1407
1408 #pass
1409 def test_cluster_with_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1410 status = self.verify_cluster_status(onos_instances=onos_instances)
1411 assert_equal(status, True)
1412 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1413 for device in device_dict.keys():
1414 log.info("Device is %s"%device_dict[device])
1415 assert_not_equal(device_dict[device]['master'],'none')
1416 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1417 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1418
1419 #pass
1420 def test_cluster_state_in_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
1421 status = self.verify_cluster_status(onos_instances=onos_instances)
1422 assert_equal(status, True)
1423 device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
1424 cluster_ips = self.get_cluster_current_member_ips()
1425 for ip in cluster_ips:
1426 device_dict= self.get_cluster_current_master_standbys_of_connected_devices(controller = ip)
1427 assert_equal(len(device_dict.keys()),onos_instances)
1428 for device in device_dict.keys():
1429 log.info("Device is %s"%device_dict[device])
1430 assert_not_equal(device_dict[device]['master'],'none')
1431 log.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
1432 assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
1433
1434 #pass
1435 def test_cluster_verifying_multiple_ovs_switches_after_master_is_restarted(self,onos_instances = ONOS_INSTANCES):
1436 status = self.verify_cluster_status(onos_instances=onos_instances)
1437 assert_equal(status, True)
1438 onos_names_ips = self.get_cluster_container_names_ips()
1439 master_count = self.get_number_of_devices_of_master()
1440 log.info('Master count information is %s'%master_count)
1441 total_devices = 0
1442 for master in master_count.keys():
1443 total_devices += master_count[master]['size']
1444 if master_count[master]['size'] != 0:
1445 restart_ip = master
1446 assert_equal(total_devices,onos_instances)
1447 member_onos_name = onos_names_ips[restart_ip]
1448 log.info('Restarting cluster member %s having ip %s'%(member_onos_name,restart_ip))
1449 Container(member_onos_name, Onos.IMAGE).restart()
1450 time.sleep(40)
1451 master_count = self.get_number_of_devices_of_master()
1452 log.info('Master count information after restart is %s'%master_count)
1453 total_devices = 0
1454 for master in master_count.keys():
1455 total_devices += master_count[master]['size']
1456 if master == restart_ip:
1457 assert_equal(master_count[master]['size'], 0)
1458 assert_equal(total_devices,onos_instances)
1459
1460 #pass
1461 def test_cluster_verifying_multiple_ovs_switches_with_one_master_down(self,onos_instances = ONOS_INSTANCES):
1462 status = self.verify_cluster_status(onos_instances=onos_instances)
1463 assert_equal(status, True)
1464 onos_names_ips = self.get_cluster_container_names_ips()
1465 master_count = self.get_number_of_devices_of_master()
1466 log.info('Master count information is %s'%master_count)
1467 total_devices = 0
1468 for master in master_count.keys():
1469 total_devices += master_count[master]['size']
1470 if master_count[master]['size'] != 0:
1471 restart_ip = master
1472 assert_equal(total_devices,onos_instances)
1473 master_onos_name = onos_names_ips[restart_ip]
1474 log.info('Shutting down cluster member %s having ip %s'%(master_onos_name,restart_ip))
1475 Container(master_onos_name, Onos.IMAGE).kill()
1476 time.sleep(40)
1477 for ip in onos_names_ips.keys():
1478 if ip != restart_ip:
1479 controller_ip = ip
1480 status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=controller_ip)
1481 assert_equal(status, True)
1482 master_count = self.get_number_of_devices_of_master(controller=controller_ip)
1483 log.info('Master count information after restart is %s'%master_count)
1484 total_devices = 0
1485 for master in master_count.keys():
1486 total_devices += master_count[master]['size']
1487 if master == restart_ip:
1488 assert_equal(master_count[master]['size'], 0)
1489 assert_equal(total_devices,onos_instances)
1490
1491 #pass
1492 def test_cluster_verifying_multiple_ovs_switches_with_current_master_withdrawing_mastership(self,onos_instances = ONOS_INSTANCES):
1493 status = self.verify_cluster_status(onos_instances=onos_instances)
1494 assert_equal(status, True)
1495 master_count = self.get_number_of_devices_of_master()
1496 log.info('Master count information is %s'%master_count)
1497 total_devices = 0
1498 for master in master_count.keys():
1499 total_devices += int(master_count[master]['size'])
1500 if master_count[master]['size'] != 0:
1501 master_ip = master
1502 log.info('Devices of master %s are %s'%(master_count[master]['devices'],master))
1503 device_id = str(master_count[master]['devices'][0])
1504 device_count = master_count[master]['size']
1505 assert_equal(total_devices,onos_instances)
1506 log.info('Withdrawing mastership of device %s for controller %s'%(device_id,master_ip))
1507 status=self.withdraw_cluster_current_mastership(master_ip=master_ip,device_id = device_id)
1508 assert_equal(status, True)
1509 master_count = self.get_number_of_devices_of_master()
1510 log.info('Master count information after cluster mastership withdraw is %s'%master_count)
1511 total_devices = 0
1512 for master in master_count.keys():
1513 total_devices += int(master_count[master]['size'])
1514 if master == master_ip:
1515 assert_equal(master_count[master]['size'], device_count-1)
1516 assert_equal(total_devices,onos_instances)
1517
1518 #pass
1519 def test_cluster_verifying_multiple_ovs_switches_and_restarting_cluster(self,onos_instances = ONOS_INSTANCES):
1520 status = self.verify_cluster_status(onos_instances=onos_instances)
1521 assert_equal(status, True)
1522 master_count = self.get_number_of_devices_of_master()
1523 log.info('Master count information is %s'%master_count)
1524 total_devices = 0
1525 for master in master_count.keys():
1526 total_devices += master_count[master]['size']
1527 assert_equal(total_devices,onos_instances)
1528 log.info('Restarting cluster')
1529 cord_test_onos_restart()
1530 time.sleep(60)
1531 master_count = self.get_number_of_devices_of_master()
1532 log.info('Master count information after restart is %s'%master_count)
1533 total_devices = 0
1534 for master in master_count.keys():
1535 total_devices += master_count[master]['size']
1536 assert_equal(total_devices,onos_instances)
ChetanGaonker2099d722016-10-07 15:16:58 -07001537