ChetanGaonker | 2099d72 | 2016-10-07 15:16:58 -0700 | [diff] [blame] | 1 | #copyright 2016-present Ciena Corporation |
| 2 | # |
| 3 | # Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | # you may not use this file except in compliance with the License. |
| 5 | # You may obtain a copy of the License at |
| 6 | # |
| 7 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | # |
| 9 | # Unless required by applicable law or agreed to in writing, software |
| 10 | # distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | # See the License for the specific language governing permissions and |
| 13 | # limitations under the License. |
| 14 | # |
| 15 | import unittest |
| 16 | from nose.tools import * |
| 17 | from scapy.all import * |
| 18 | from OnosCtrl import OnosCtrl, get_mac |
| 19 | from OltConfig import OltConfig |
| 20 | from socket import socket |
| 21 | from OnosFlowCtrl import OnosFlowCtrl |
| 22 | from nose.twistedtools import reactor, deferred |
| 23 | from twisted.internet import defer |
| 24 | from onosclidriver import OnosCliDriver |
| 25 | from CordContainer import Container, Onos, Quagga |
| 26 | from CordTestServer import cord_test_onos_restart, cord_test_onos_shutdown, cord_test_onos_add_cluster, cord_test_quagga_restart |
| 27 | from portmaps import g_subscriber_port_map |
| 28 | from scapy.all import * |
| 29 | import time, monotonic |
| 30 | import threading |
| 31 | from threading import current_thread |
| 32 | from Cluster import * |
| 33 | from EapTLS import TLSAuthTest |
| 34 | from ACL import ACLTest |
| 35 | import os |
| 36 | import json |
| 37 | import random |
| 38 | import collections |
| 39 | log.setLevel('INFO') |
| 40 | |
| 41 | class cluster_exchange(unittest.TestCase): |
| 42 | test_path = os.path.dirname(os.path.realpath(__file__)) |
| 43 | onos_config_path = os.path.join(test_path, '..', 'setup/onos-config') |
| 44 | mac = RandMAC()._fix() |
| 45 | flows_eth = Ether(src = RandMAC()._fix(), dst = RandMAC()._fix()) |
| 46 | igmp_eth = Ether(dst = '01:00:5e:00:00:16', type = ETH_P_IP) |
| 47 | igmp_ip = IP(dst = '224.0.0.22') |
| 48 | ONOS_INSTANCES = 3 |
| 49 | V_INF1 = 'veth0' |
| 50 | TLS_TIMEOUT = 100 |
| 51 | device_id = 'of:' + get_mac() |
| 52 | igmp = cluster_igmp() |
| 53 | igmp_groups = igmp.mcast_ip_range(start_ip = '224.1.8.10',end_ip = '224.1.10.49') |
| 54 | igmp_sources = igmp.source_ip_range(start_ip = '38.24.29.35',end_ip='38.24.35.56') |
| 55 | tls = cluster_tls() |
| 56 | flows = cluster_flows() |
| 57 | proxyarp = cluster_proxyarp() |
| 58 | vrouter = cluster_vrouter() |
| 59 | acl = cluster_acl() |
| 60 | dhcprelay = cluster_dhcprelay() |
| 61 | subscriber = cluster_subscriber() |
| 62 | |
| 63 | def get_controller(self): |
| 64 | controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost' |
| 65 | controller = controller.split(',')[0] |
| 66 | return controller |
| 67 | |
| 68 | def cliEnter(self,controller = None): |
| 69 | retries = 0 |
| 70 | while retries < 3: |
| 71 | self.cli = OnosCliDriver(controller = controller,connect = True) |
| 72 | if self.cli.handle: |
| 73 | break |
| 74 | else: |
| 75 | retries += 1 |
| 76 | time.sleep(2) |
| 77 | |
| 78 | def cliExit(self): |
| 79 | self.cli.disconnect() |
| 80 | |
| 81 | def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False): |
| 82 | tries = 0 |
| 83 | try: |
| 84 | self.cliEnter(controller = controller) |
| 85 | while tries <= 10: |
| 86 | cluster_summary = json.loads(self.cli.summary(jsonFormat = True)) |
| 87 | if cluster_summary: |
| 88 | log.info("cluster 'summary' command output is %s"%cluster_summary) |
| 89 | nodes = cluster_summary['nodes'] |
| 90 | if verify: |
| 91 | if nodes == onos_instances: |
| 92 | self.cliExit() |
| 93 | return True |
| 94 | else: |
| 95 | tries += 1 |
| 96 | time.sleep(1) |
| 97 | else: |
| 98 | if nodes >= onos_instances: |
| 99 | self.cliExit() |
| 100 | return True |
| 101 | else: |
| 102 | tries += 1 |
| 103 | time.sleep(1) |
| 104 | else: |
| 105 | tries += 1 |
| 106 | time.sleep(1) |
| 107 | self.cliExit() |
| 108 | return False |
| 109 | except: |
| 110 | raise |
| 111 | return False |
| 112 | |
| 113 | def get_cluster_current_member_ips(self,controller = None): |
| 114 | tries = 0 |
| 115 | cluster_ips = [] |
| 116 | try: |
| 117 | self.cliEnter(controller = controller) |
| 118 | while tries <= 10: |
| 119 | cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True)) |
| 120 | if cluster_nodes: |
| 121 | log.info("cluster 'nodes' output is %s"%cluster_nodes) |
| 122 | cluster_ips = map(lambda c: c['id'], cluster_nodes) |
| 123 | self.cliExit() |
| 124 | cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1])) |
| 125 | return cluster_ips |
| 126 | else: |
| 127 | tries += 1 |
| 128 | self.cliExit() |
| 129 | return cluster_ips |
| 130 | except: |
| 131 | raise Exception('Failed to get cluster members') |
| 132 | return cluster_ips |
| 133 | |
| 134 | def get_cluster_container_names_ips(self): |
| 135 | onos_names_ips = {} |
| 136 | onos_ips = self.get_cluster_current_member_ips() |
| 137 | onos_names_ips[onos_ips[0]] = Onos.NAME |
| 138 | for i in range(1, len(onos_ips)): |
| 139 | name = '{0}-{1}'.format(Onos.NAME,i+1) |
| 140 | onos_names_ips[onos_ips[i]] = name |
| 141 | |
| 142 | return onos_names_ips |
| 143 | |
| 144 | #identifying current master of a connected device, not tested |
| 145 | def get_cluster_current_master_standbys(self,controller=None,device_id=device_id): |
| 146 | master = None |
| 147 | standbys = [] |
| 148 | tries = 0 |
| 149 | try: |
| 150 | cli = self.cliEnter(controller = controller) |
| 151 | while tries <= 10: |
| 152 | roles = json.loads(self.cli.roles(jsonFormat = True)) |
| 153 | log.info("cluster 'roles' command output is %s"%roles) |
| 154 | if roles: |
| 155 | for device in roles: |
| 156 | log.info('Verifying device info in line %s'%device) |
| 157 | if device['id'] == device_id: |
| 158 | master = str(device['master']) |
| 159 | standbys = map(lambda d: str(d), device['standbys']) |
| 160 | log.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys)) |
| 161 | self.cliExit() |
| 162 | return master, standbys |
| 163 | self.cliExit() |
| 164 | return master, standbys |
| 165 | else: |
| 166 | tries += 1 |
| 167 | time.sleep(1) |
| 168 | self.cliExit() |
| 169 | return master, standbys |
| 170 | except: |
| 171 | raise Exception('Cannot get cluster master and standbys') |
| 172 | return master, standbys |
| 173 | |
| 174 | def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None): |
| 175 | if new_master is None: return False |
| 176 | self.cliEnter() |
| 177 | cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master' |
| 178 | command = self.cli.command(cmd = cmd, jsonFormat = False) |
| 179 | self.cliExit() |
| 180 | time.sleep(60) |
| 181 | master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id) |
| 182 | assert_equal(master,new_master) |
| 183 | log.info('Cluster master changed to %s successfully'%new_master) |
| 184 | |
| 185 | ############# Cluster Test cases ########################### |
| 186 | #pass |
| 187 | def test_onos_cluster_formation_verify(self,onos_instances = ONOS_INSTANCES): |
| 188 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 189 | assert_equal(status, True) |
| 190 | log.info('Cluster exists with %d ONOS instances'%onos_instances) |
| 191 | |
| 192 | #nottest cluster not coming up properly if member goes down |
| 193 | def test_onos_cluster_adding_members(self,add = 2, onos_instances = ONOS_INSTANCES): |
| 194 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 195 | assert_equal(status, True) |
| 196 | onos_ips = self.get_cluster_current_member_ips() |
| 197 | onos_instances = len(onos_ips)+add |
| 198 | log.info('Adding %d nodes to the ONOS cluster' %add) |
| 199 | cord_test_onos_add_cluster(count = add) |
| 200 | status = self.verify_cluster_status(onos_instances=onos_instances) |
| 201 | assert_equal(status, True) |
| 202 | |
| 203 | def test_onos_cluster_removing_master(self, onos_instances = ONOS_INSTANCES): |
| 204 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 205 | assert_equal(status, True) |
| 206 | master, standbys = self.get_cluster_current_master_standbys() |
| 207 | assert_equal(len(standbys),(onos_instances-1)) |
| 208 | onos_names_ips = self.get_cluster_container_names_ips() |
| 209 | master_onos_name = onos_names_ips[master] |
| 210 | log.info('Removing cluster current master %s'%(master)) |
| 211 | cord_test_onos_shutdown(node = master_onos_name) |
| 212 | time.sleep(60) |
| 213 | onos_instances -= 1 |
| 214 | status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0]) |
| 215 | assert_equal(status, True) |
| 216 | new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0]) |
| 217 | assert_not_equal(master,new_master) |
| 218 | log.info('Successfully removed cluster master instance') |
| 219 | |
| 220 | def test_onos_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES): |
| 221 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 222 | assert_equal(status, True) |
| 223 | master, standbys = self.get_cluster_current_master_standbys() |
| 224 | assert_equal(len(standbys),(onos_instances-1)) |
| 225 | onos_names_ips = self.get_cluster_container_names_ips() |
| 226 | member_onos_name = onos_names_ips[standbys[0]] |
| 227 | log.info('Removing cluster member %s'%standbys[0]) |
| 228 | cord_test_onos_shutdown(node = member_onos_name) |
| 229 | time.sleep(60) |
| 230 | onos_instances -= 1 |
| 231 | status = self.verify_cluster_status(onos_instances = onos_instances,controller=master) |
| 232 | assert_equal(status, True) |
| 233 | |
| 234 | def test_onos_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES): |
| 235 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 236 | assert_equal(status, True) |
| 237 | master, standbys = self.get_cluster_current_master_standbys() |
| 238 | assert_equal(len(standbys),(onos_instances-1)) |
| 239 | onos_names_ips = self.get_cluster_container_names_ips() |
| 240 | member1_onos_name = onos_names_ips[standbys[0]] |
| 241 | member2_onos_name = onos_names_ips[standbys[1]] |
| 242 | log.info('Removing cluster member %s'%standbys[0]) |
| 243 | cord_test_onos_shutdown(node = member1_onos_name) |
| 244 | log.info('Removing cluster member %s'%standbys[1]) |
| 245 | cord_test_onos_shutdown(node = member2_onos_name) |
| 246 | time.sleep(60) |
| 247 | onos_instances = onos_instances - 2 |
| 248 | status = self.verify_cluster_status(onos_instances = onos_instances,controller=master) |
| 249 | assert_equal(status, True) |
| 250 | |
| 251 | def test_onos_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES): |
| 252 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 253 | assert_equal(status, True) |
| 254 | master, standbys = self.get_cluster_current_master_standbys() |
| 255 | assert_equal(len(standbys),(onos_instances-1)) |
| 256 | onos_names_ips = self.get_cluster_container_names_ips() |
| 257 | for i in range(remove): |
| 258 | member_onos_name = onos_names_ips[standbys[i]] |
| 259 | log.info('Removing onos container with name %s'%standbys[i]) |
| 260 | cord_test_onos_shutdown(node = member_onos_name) |
| 261 | time.sleep(60) |
| 262 | onos_instances = onos_instances - remove |
| 263 | status = self.verify_cluster_status(onos_instances = onos_instances, controller=master) |
| 264 | assert_equal(status, True) |
| 265 | |
| 266 | #nottest test cluster not coming up properly if member goes down |
| 267 | def test_onos_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES ,add = 2, remove = 2): |
| 268 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 269 | assert_equal(status, True) |
| 270 | onos_ips = self.get_cluster_current_member_ips() |
| 271 | onos_instances = len(onos_ips)+add |
| 272 | log.info('Adding %d ONOS instances to the cluster'%add) |
| 273 | cord_test_onos_add_cluster(count = add) |
| 274 | status = self.verify_cluster_status(onos_instances=onos_instances) |
| 275 | assert_equal(status, True) |
| 276 | log.info('Removing %d ONOS instances from the cluster'%remove) |
| 277 | for i in range(remove): |
| 278 | name = '{}-{}'.format(Onos.NAME, onos_instances - i) |
| 279 | log.info('Removing onos container with name %s'%name) |
| 280 | cord_test_onos_shutdown(node = name) |
| 281 | time.sleep(60) |
| 282 | onos_instances = onos_instances-remove |
| 283 | status = self.verify_cluster_status(onos_instances=onos_instances) |
| 284 | assert_equal(status, True) |
| 285 | |
| 286 | #nottest cluster not coming up properly if member goes down |
| 287 | def test_onos_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1): |
| 288 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 289 | assert_equal(status, True) |
| 290 | onos_ips = self.get_cluster_current_member_ips() |
| 291 | onos_instances = onos_instances-remove |
| 292 | log.info('Removing %d ONOS instances from the cluster'%remove) |
| 293 | for i in range(remove): |
| 294 | name = '{}-{}'.format(Onos.NAME, len(onos_ips)-i) |
| 295 | log.info('Removing onos container with name %s'%name) |
| 296 | cord_test_onos_shutdown(node = name) |
| 297 | time.sleep(60) |
| 298 | status = self.verify_cluster_status(onos_instances=onos_instances) |
| 299 | assert_equal(status, True) |
| 300 | log.info('Adding %d ONOS instances to the cluster'%add) |
| 301 | cord_test_onos_add_cluster(count = add) |
| 302 | onos_instances = onos_instances+add |
| 303 | status = self.verify_cluster_status(onos_instances=onos_instances) |
| 304 | assert_equal(status, True) |
| 305 | |
| 306 | def test_onos_cluster_restart(self,onos_instances = ONOS_INSTANCES): |
| 307 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 308 | assert_equal(status, True) |
| 309 | log.info('Restarting cluster') |
| 310 | cord_test_onos_restart() |
| 311 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 312 | assert_equal(status, True) |
| 313 | |
| 314 | def test_onos_cluster_master_restart(self,onos_instances = ONOS_INSTANCES): |
| 315 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 316 | assert_equal(status, True) |
| 317 | master, standbys = self.get_cluster_current_master_standbys() |
| 318 | onos_names_ips = self.get_cluster_container_names_ips() |
| 319 | master_onos_name = onos_names_ips[master] |
| 320 | log.info('Restarting cluster master %s'%master) |
| 321 | cord_test_onos_restart(node = master_onos_name) |
| 322 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 323 | assert_equal(status, True) |
| 324 | log.info('Cluster came up after master restart as expected') |
| 325 | |
| 326 | #test fail. master changing after restart. Need to check correct behavior. |
| 327 | def test_onos_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES): |
| 328 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 329 | assert_equal(status, True) |
| 330 | master1, standbys = self.get_cluster_current_master_standbys() |
| 331 | onos_names_ips = self.get_cluster_container_names_ips() |
| 332 | master_onos_name = onos_names_ips[master1] |
| 333 | log.info('Restarting cluster master %s'%master) |
| 334 | cord_test_onos_restart(node = master_onos_name) |
| 335 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 336 | assert_equal(status, True) |
| 337 | master2, standbys = self.get_cluster_current_master_standbys() |
| 338 | assert_equal(master1,master2) |
| 339 | log.info('Cluster master is same before and after cluster master restart as expected') |
| 340 | |
| 341 | def test_onos_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES): |
| 342 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 343 | assert_equal(status, True) |
| 344 | master, standbys = self.get_cluster_current_master_standbys() |
| 345 | assert_equal(len(standbys),(onos_instances-1)) |
| 346 | onos_names_ips = self.get_cluster_container_names_ips() |
| 347 | member_onos_name = onos_names_ips[standbys[0]] |
| 348 | log.info('Restarting cluster member %s'%standbys[0]) |
| 349 | cord_test_onos_restart(node = member_onos_name) |
| 350 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 351 | assert_equal(status, True) |
| 352 | log.info('Cluster came up as expected after restarting one member') |
| 353 | |
| 354 | def test_onos_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES): |
| 355 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 356 | assert_equal(status, True) |
| 357 | master, standbys = self.get_cluster_current_master_standbys() |
| 358 | assert_equal(len(standbys),(onos_instances-1)) |
| 359 | onos_names_ips = self.get_cluster_container_names_ips() |
| 360 | member1_onos_name = onos_names_ips[standbys[0]] |
| 361 | member2_onos_name = onos_names_ips[standbys[1]] |
| 362 | log.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1])) |
| 363 | cord_test_onos_restart(node = member1_onos_name) |
| 364 | cord_test_onos_restart(node = member2_onos_name) |
| 365 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 366 | assert_equal(status, True) |
| 367 | log.info('Cluster came up as expected after restarting two members') |
| 368 | |
| 369 | def test_onos_cluster_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES): |
| 370 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 371 | assert_equal(status,True) |
| 372 | master, standbys = self.get_cluster_current_master_standbys() |
| 373 | assert_equal(len(standbys),(onos_instances-1)) |
| 374 | onos_names_ips = self.get_cluster_container_names_ips() |
| 375 | for i in range(members): |
| 376 | member_onos_name = onos_names_ips[standbys[i]] |
| 377 | log.info('Restarting cluster member %s'%standbys[i]) |
| 378 | cord_test_onos_restart(node = member_onos_name) |
| 379 | |
| 380 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 381 | assert_equal(status, True) |
| 382 | log.info('Cluster came up as expected after restarting %d members'%members) |
| 383 | |
| 384 | def test_onos_cluster_master_change(self,onos_instances = ONOS_INSTANCES): |
| 385 | status = self.verify_cluster_status(onos_instances=onos_instances) |
| 386 | assert_equal(status, True) |
| 387 | master, standbys = self.get_cluster_current_master_standbys() |
| 388 | assert_equal(len(standbys),(onos_instances-1)) |
| 389 | log.info('Cluster current master of device is %s'%master) |
| 390 | self.change_master_current_cluster(new_master=standbys[0]) |
| 391 | log.info('Cluster master changed successfully') |
| 392 | |
| 393 | #tested on single onos setup. |
| 394 | def test_onos_cluster_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES): |
| 395 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 396 | assert_equal(status, True) |
| 397 | onos_ips = self.get_cluster_current_member_ips() |
| 398 | self.vrouter.setUpClass() |
| 399 | res = self.vrouter.vrouter_network_verify(networks, peers = 1) |
| 400 | assert_equal(res, True) |
| 401 | for onos_ip in onos_ips: |
| 402 | tries = 0 |
| 403 | flag = False |
| 404 | try: |
| 405 | self.cliEnter(controller = onos_ip) |
| 406 | while tries <= 5: |
| 407 | routes = json.loads(self.cli.routes(jsonFormat = True)) |
| 408 | if routes: |
| 409 | assert_equal(len(routes['routes4']), networks) |
| 410 | self.cliExit() |
| 411 | flag = True |
| 412 | break |
| 413 | else: |
| 414 | tries += 1 |
| 415 | time.sleep(1) |
| 416 | assert_equal(flag, True) |
| 417 | except: |
| 418 | log.info('Exception occured while checking routes in onos instance %s'%onos_ip) |
| 419 | raise |
| 420 | |
| 421 | #tested on single onos setup. |
| 422 | def test_onos_cluster_vrouter_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES): |
| 423 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 424 | assert_equal(status, True) |
| 425 | onos_ips = self.get_cluster_current_member_ips() |
| 426 | master, standbys = self.get_cluster_current_master_standbys() |
| 427 | onos_names_ips = self.get_cluster_container_names_ips() |
| 428 | master_onos_name = onos_names_ips[master] |
| 429 | self.vrouter.setUpClass() |
| 430 | res = self.vrouter.vrouter_network_verify(networks, peers = 1) |
| 431 | assert_equal(res,True) |
| 432 | cord_test_onos_shutdown(node = master_onos_name) |
| 433 | time.sleep(60) |
| 434 | log.info('Verifying vrouter traffic after cluster master down') |
| 435 | self.vrouter.vrouter_traffic_verify() |
| 436 | |
| 437 | #tested on single onos setup. |
| 438 | def test_onos_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES): |
| 439 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 440 | assert_equal(status, True) |
| 441 | onos_ips = self.get_cluster_current_member_ips() |
| 442 | master, standbys = self.get_cluster_current_master_standbys() |
| 443 | onos_names_ips = self.get_cluster_container_names_ips() |
| 444 | master_onos_name = onos_names_ips[master] |
| 445 | self.vrouter.setUpClass() |
| 446 | res = self.vrouter.vrouter_network_verify(networks, peers = 1) |
| 447 | assert_equal(res, True) |
| 448 | cord_test_onos_restart() |
| 449 | self.vrouter.vrouter_traffic_verify() |
| 450 | |
| 451 | #tested on single onos setup. |
| 452 | def test_onos_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES): |
| 453 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 454 | assert_equal(status, True) |
| 455 | self.vrouter.setUpClass() |
| 456 | res = self.vrouter.vrouter_network_verify(networks, peers = 1) |
| 457 | assert_equal(res, True) |
| 458 | self.vrouter.vrouter_activate(deactivate=True) |
| 459 | time.sleep(15) |
| 460 | self.vrouter.vrouter_traffic_verify(positive_test=False) |
| 461 | self.vrouter.vrouter_activate(deactivate=False) |
| 462 | |
| 463 | #tested on single onos setup. |
| 464 | def test_onos_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES): |
| 465 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 466 | assert_equal(status, True) |
| 467 | master, standbys = self.get_cluster_current_master_standbys() |
| 468 | onos_names_ips = self.get_cluster_container_names_ips() |
| 469 | master_onos_name = onos_names_ips[master] |
| 470 | self.vrouter.setUpClass() |
| 471 | log.info('Verifying vrouter before master down') |
| 472 | res = self.vrouter.vrouter_network_verify(networks, peers = 1) |
| 473 | assert_equal(res, True) |
| 474 | self.vrouter.vrouter_activate(deactivate=True) |
| 475 | log.info('Verifying vrouter traffic after app deactivated') |
| 476 | time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down |
| 477 | self.vrouter.vrouter_traffic_verify(positive_test=False) |
| 478 | log.info('Verifying vrouter traffic after master down') |
| 479 | cord_test_onos_shutdown(node = master_onos_name) |
| 480 | time.sleep(60) |
| 481 | self.vrouter.vrouter_traffic_verify(positive_test=False) |
| 482 | self.vrouter.vrouter_activate(deactivate=False) |
| 483 | |
| 484 | #tested on single onos setup. |
| 485 | def test_onos_cluster_for_vrouter_app_and_making_member_down(self,networks = 5,onos_instances = ONOS_INSTANCES): |
| 486 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 487 | assert_equal(status, True) |
| 488 | master, standbys = self.get_cluster_current_master_standbys() |
| 489 | onos_names_ips = self.get_cluster_container_names_ips() |
| 490 | member_onos_name = onos_names_ips[standbys[0]] |
| 491 | self.vrouter.setUpClass() |
| 492 | log.info('Verifying vrouter before cluster member down') |
| 493 | res = self.vrouter.vrouter_network_verify(networks, peers = 1) |
| 494 | assert_equal(res, True) # Expecting vrouter should work properly |
| 495 | log.info('Verifying vrouter after cluster member down') |
| 496 | cord_test_onos_shutdown(node = member_onos_name) |
| 497 | time.sleep(60) |
| 498 | self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down |
| 499 | |
| 500 | #tested on single onos setup. |
| 501 | def test_onos_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES): |
| 502 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 503 | assert_equal(status, True) |
| 504 | master, standbys = self.get_cluster_current_master_standbys() |
| 505 | onos_names_ips = self.get_cluster_container_names_ips() |
| 506 | member_onos_name = onos_names_ips[standbys[1]] |
| 507 | self.vrouter.setUpClass() |
| 508 | log.info('Verifying vrouter traffic before cluster member restart') |
| 509 | res = self.vrouter.vrouter_network_verify(networks, peers = 1) |
| 510 | assert_equal(res, True) # Expecting vrouter should work properly |
| 511 | cord_test_onos_restart(node = member_onos_name) |
| 512 | log.info('Verifying vrouter traffic after cluster member restart') |
| 513 | self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts |
| 514 | |
| 515 | #tested on single onos setup. |
| 516 | def test_onos_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES): |
| 517 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 518 | assert_equal(status, True) |
| 519 | self.vrouter.setUpClass() |
| 520 | log.info('Verifying vrouter traffic before cluster restart') |
| 521 | res = self.vrouter.vrouter_network_verify(networks, peers = 1) |
| 522 | assert_equal(res, True) # Expecting vrouter should work properly |
| 523 | cord_test_onos_restart() |
| 524 | log.info('Verifying vrouter traffic after cluster restart') |
| 525 | self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts |
| 526 | |
| 527 | |
| 528 | #test fails because flow state is in pending_add in onos |
| 529 | def test_onos_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES): |
| 530 | status = self.verify_cluster_status(onos_instances = onos_instances) |
| 531 | assert_equal(status, True) |
| 532 | master, standbys = self.get_cluster_current_master_standbys() |
| 533 | onos_names_ips = self.get_cluster_container_names_ips() |
| 534 | master_onos_name = onos_names_ips[master] |
| 535 | self.flows.setUpClass() |
| 536 | egress = 1 |
| 537 | ingress = 2 |
| 538 | egress_map = { 'ip': '192.168.30.1', 'udp_port': 9500 } |
| 539 | ingress_map = { 'ip': '192.168.40.1', 'udp_port': 9000 } |
| 540 | flow = OnosFlowCtrl(deviceId = self.device_id, |
| 541 | egressPort = egress, |
| 542 | ingressPort = ingress, |
| 543 | udpSrc = ingress_map['udp_port'], |
| 544 | udpDst = egress_map['udp_port'], |
| 545 | controller=master |
| 546 | ) |
| 547 | result = flow.addFlow() |
| 548 | assert_equal(result, True) |
| 549 | time.sleep(1) |
| 550 | self.success = False |
| 551 | def mac_recv_task(): |
| 552 | def recv_cb(pkt): |
| 553 | log.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport)) |
| 554 | self.success = True |
| 555 | sniff(timeout=2, |
| 556 | lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port'] |
| 557 | and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.flows.port_map[egress]) |
| 558 | |
| 559 | for i in [0,1]: |
| 560 | if i == 1: |
| 561 | cord_test_onos_shutdown(node = master_onos_name) |
| 562 | log.info('Verifying flows traffic after master killed') |
| 563 | time.sleep(45) |
| 564 | else: |
| 565 | log.info('Verifying flows traffic before master killed') |
| 566 | t = threading.Thread(target = mac_recv_task) |
| 567 | t.start() |
| 568 | L2 = self.flows_eth #Ether(src = ingress_map['ether'], dst = egress_map['ether']) |
| 569 | L3 = IP(src = ingress_map['ip'], dst = egress_map['ip']) |
| 570 | L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port']) |
| 571 | pkt = L2/L3/L4 |
| 572 | log.info('Sending packets to verify if flows are correct') |
| 573 | sendp(pkt, count=50, iface = self.flows.port_map[ingress]) |
| 574 | t.join() |
| 575 | assert_equal(self.success, True) |
| 576 | |
| 577 | def test_onos_cluster_making_master_change_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES): |
| 578 | status = self.verify_cluster_status(onos_instances=onos_instances) |
| 579 | assert_equal(status, True) |
| 580 | master, standbys = self.get_cluster_current_master_standbys() |
| 581 | self.flows.setUpClass() |
| 582 | egress = 1 |
| 583 | ingress = 2 |
| 584 | egress_map = { 'ip': '192.168.30.1' } |
| 585 | ingress_map = { 'ip': '192.168.40.1' } |
| 586 | flow = OnosFlowCtrl(deviceId = self.device_id, |
| 587 | egressPort = egress, |
| 588 | ingressPort = ingress, |
| 589 | ecn = 1, |
| 590 | controller=master |
| 591 | ) |
| 592 | result = flow.addFlow() |
| 593 | assert_equal(result, True) |
| 594 | ##wait for flows to be added to ONOS |
| 595 | time.sleep(1) |
| 596 | self.success = False |
| 597 | def mac_recv_task(): |
| 598 | def recv_cb(pkt): |
| 599 | log.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos)) |
| 600 | self.success = True |
| 601 | sniff(count=2, timeout=5, |
| 602 | lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip'] |
| 603 | and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb, |
| 604 | iface = self.flows.port_map[egress]) |
| 605 | for i in [0,1]: |
| 606 | if i == 1: |
| 607 | log.info('Changing cluster master to %s'%standbys[0]) |
| 608 | self.change_master_current_cluster(new_master=standbys[0]) |
| 609 | log.info('Verifying flow traffic after cluster master chnaged') |
| 610 | else: |
| 611 | log.info('Verifying flow traffic before cluster master changed') |
| 612 | t = threading.Thread(target = mac_recv_task) |
| 613 | t.start() |
| 614 | L2 = self.flows_eth # Ether(src = ingress_map['ether'], dst = egress_map['ether']) |
| 615 | L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1) |
| 616 | pkt = L2/L3 |
| 617 | log.info('Sending a packet to verify if flows are correct') |
| 618 | sendp(pkt, count=50, iface = self.flows.port_map[ingress]) |
| 619 | t.join() |
| 620 | assert_equal(self.success, True) |
| 621 | |
| 622 | @deferred(TLS_TIMEOUT) |
| 623 | def test_onos_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES): |
| 624 | status = self.verify_cluster_status(onos_instances=onos_instances) |
| 625 | assert_equal(status, True) |
| 626 | master, standbys = self.get_cluster_current_master_standbys() |
| 627 | assert_equal(len(standbys), (onos_instances-1)) |
| 628 | self.tls.setUp(controller=master) |
| 629 | df = defer.Deferred() |
| 630 | def eap_tls_verify(df): |
| 631 | tls = TLSAuthTest() |
| 632 | tls.runTest() |
| 633 | df.callback(0) |
| 634 | reactor.callLater(0, eap_tls_verify, df) |
| 635 | return df |
| 636 | |
| 637 | @deferred(120) |
| 638 | def test_onos_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES): |
| 639 | master, standbys = self.get_cluster_current_master_standbys() |
| 640 | assert_equal(len(standbys), (onos_instances-1)) |
| 641 | self.tls.setUp() |
| 642 | df = defer.Deferred() |
| 643 | def eap_tls_verify2(df2): |
| 644 | tls = TLSAuthTest() |
| 645 | tls.runTest() |
| 646 | df.callback(0) |
| 647 | for i in [0,1]: |
| 648 | if i == 1: |
| 649 | log.info('Changing cluster master %s to %s'%(master, standbys[0])) |
| 650 | self.change_master_current_cluster(new_master=standbys[0]) |
| 651 | log.info('Verifying tls authentication after cluster master changed to %s'%standbys[0]) |
| 652 | else: |
| 653 | log.info('Verifying tls authentication before cluster master change') |
| 654 | reactor.callLater(0, eap_tls_verify, df) |
| 655 | return df |
| 656 | |
| 657 | @deferred(TLS_TIMEOUT) |
| 658 | def test_onos_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES): |
| 659 | status = self.verify_cluster_status(onos_instances=onos_instances) |
| 660 | assert_equal(status, True) |
| 661 | master, standbys = self.get_cluster_current_master_standbys() |
| 662 | assert_equal(len(standbys), (onos_instances-1)) |
| 663 | onos_names_ips = self.get_cluster_container_names_ips() |
| 664 | master_onos_name = onos_names_ips[master] |
| 665 | self.tls.setUp() |
| 666 | df = defer.Deferred() |
| 667 | def eap_tls_verify(df): |
| 668 | tls = TLSAuthTest() |
| 669 | tls.runTest() |
| 670 | df.callback(0) |
| 671 | for i in [0,1]: |
| 672 | if i == 1: |
| 673 | log.info('Killing cluster current master %s'%master) |
| 674 | cord_test_onos_shutdown(node = master_onos_name) |
| 675 | time.sleep(20) |
| 676 | status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True) |
| 677 | assert_equal(status, True) |
| 678 | log.info('Cluster came up with %d instances after killing master'%(onos_instances-1)) |
| 679 | log.info('Verifying tls authentication after killing cluster master') |
| 680 | reactor.callLater(0, eap_tls_verify, df) |
| 681 | return df |
| 682 | |
| 683 | @deferred(TLS_TIMEOUT) |
| 684 | def test_onos_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES): |
| 685 | status = self.verify_cluster_status(onos_instances=onos_instances) |
| 686 | assert_equal(status, True) |
| 687 | master, standbys = self.get_cluster_current_master_standbys() |
| 688 | assert_equal(len(standbys), (onos_instances-1)) |
| 689 | onos_names_ips = self.get_cluster_container_names_ips() |
| 690 | member_onos_name = onos_names_ips[standbys[0]] |
| 691 | self.tls.setUp() |
| 692 | df = defer.Deferred() |
| 693 | def eap_tls_no_cert(df): |
| 694 | def tls_no_cert_cb(): |
| 695 | log.info('TLS authentication failed with no certificate') |
| 696 | tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '') |
| 697 | tls.runTest() |
| 698 | assert_equal(tls.failTest, True) |
| 699 | df.callback(0) |
| 700 | for i in [0,1]: |
| 701 | if i == 1: |
| 702 | log.info('Restart cluster member %s'%standbys[0]) |
| 703 | Container(member_onos_name,Onos.IMAGE).restart() |
| 704 | time.sleep(20) |
| 705 | status = self.verify_cluster_status(onos_instances=onos_instances) |
| 706 | assert_equal(status, True) |
| 707 | log.info('Cluster came up with %d instances after member restart'%(onos_instances)) |
| 708 | log.info('Verifying tls authentication after member restart') |
| 709 | reactor.callLater(0, eap_tls_no_cert, df) |
| 710 | return df |
| 711 | |
| 712 | ###### Dhcp Relay Test cases ###################################### |
| 713 | |
| 714 | def test_onos_cluster_with_dhcpRelay_app_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES): |
| 715 | status = self.verify_cluster_status(onos_instances=onos_instances) |
| 716 | assert_equal(status, True) |
| 717 | master, standbys = self.get_cluster_current_master_standbys() |
| 718 | assert_equal(len(standbys),(onos_instances-1)) |
| 719 | self.dhcprelay.setUpClass() |
| 720 | mac = self.dhcprelay.get_mac(iface) |
| 721 | self.dhcprelay.host_load(iface) |
| 722 | ##we use the defaults for this test that serves as an example for others |
| 723 | ##You don't need to restart dhcpd server if retaining default config |
| 724 | config = self.dhcprelay.default_config |
| 725 | options = self.dhcprelay.default_options |
| 726 | subnet = self.dhcprelay.default_subnet_config |
| 727 | dhcpd_interface_list = self.dhcprelay.relay_interfaces |
| 728 | self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list, |
| 729 | config = config, |
| 730 | options = options, |
| 731 | subnet = subnet) |
| 732 | self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface) |
| 733 | cip, sip = self.dhcprelay.send_recv(mac) |
| 734 | log.info('Changing cluster current master from %s to %s'%(master, standbys[0])) |
| 735 | self.change_master_current_cluster(new_master=standbys[0]) |
| 736 | log.info('Releasing ip %s to server %s' %(cip, sip)) |
| 737 | assert_equal(self.dhcprelay.dhcp.release(cip), True) |
| 738 | log.info('Triggering DHCP discover again after release') |
| 739 | cip2, sip2 = self.dhcprelay.send_recv(mac) |
| 740 | log.info('Verifying released IP was given back on rediscover') |
| 741 | assert_equal(cip, cip2) |
| 742 | log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2)) |
| 743 | assert_equal(self.dhcprelay.dhcp.release(cip2), True) |
| 744 | self.dhcprelay.tearDownClass() |
| 745 | |
| 746 | def test_onos_cluster_with_dhcpRelay_app_simulating_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES): |
| 747 | #status = self.verify_cluster_status(onos_instances=onos_instances) |
| 748 | #assert_equal(status, True) |
| 749 | master, standbys = self.get_cluster_current_master_standbys() |
| 750 | assert_equal(len(standbys),(onos_instances-1)) |
| 751 | self.dhcprelay.setUpClass() |
| 752 | macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3'] |
| 753 | self.dhcprelay.host_load(iface) |
| 754 | ##we use the defaults for this test that serves as an example for others |
| 755 | ##You don't need to restart dhcpd server if retaining default config |
| 756 | config = self.dhcprelay.default_config |
| 757 | options = self.dhcprelay.default_options |
| 758 | subnet = self.dhcprelay.default_subnet_config |
| 759 | dhcpd_interface_list = self.dhcprelay.relay_interfaces |
| 760 | self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list, |
| 761 | config = config, |
| 762 | options = options, |
| 763 | subnet = subnet) |
| 764 | self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface) |
| 765 | cip1, sip1 = self.dhcprelay.send_recv(macs[0]) |
| 766 | assert_not_equal(cip1,None) |
| 767 | log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip1,macs[0],master)) |
| 768 | log.info('Changing cluster master from %s to %s'%(master, standbys[0])) |
| 769 | self.change_master_current_cluster(new_master=standbys[0]) |
| 770 | cip2, sip2 = self.dhcprelay.send_recv(macs[1]) |
| 771 | assert_not_equal(cip2,None) |
| 772 | log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[1],standbys[0])) |
| 773 | self.change_master_current_cluster(new_master=master) |
| 774 | log.info('Changing cluster master from %s to %s'%(standbys[0],master)) |
| 775 | cip3, sip3 = self.dhcprelay.send_recv(macs[2]) |
| 776 | assert_not_equal(cip3,None) |
| 777 | log.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master)) |
| 778 | self.dhcprelay.tearDownClass() |
| 779 | |
| 780 | |
| 781 | ############ Cord Subscriber Test cases ################## |
| 782 | |
| 783 | def test_onos_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES): |
| 784 | status = self.verify_cluster_status(onos_instances=onos_instances) |
| 785 | assert_equal(status, True) |
| 786 | """Test subscriber join next for channel surfing""" |
| 787 | self.subscriber.setUpClass() |
| 788 | self.subscriber.num_subscribers = 5 |
| 789 | self.subscriber.num_channels = 10 |
| 790 | for i in [0,1]: |
| 791 | if i == 1: |
| 792 | cord_test_onos_restart() |
| 793 | time.sleep(45) |
| 794 | status = self.verify_cluster_status(onos_instances=onos_instances) |
| 795 | assert_equal(status, True) |
| 796 | log.info('Verifying cord subscriber functionality after cluster restart') |
| 797 | else: |
| 798 | log.info('Verifying cord subscriber functionality before cluster restart') |
| 799 | test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers, |
| 800 | num_channels = self.subscriber.num_channels, |
| 801 | cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify, |
| 802 | self.subscriber.igmp_next_verify, self.subscriber.traffic_verify), |
| 803 | port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers, |
| 804 | self.subscriber.num_channels)) |
| 805 | assert_equal(test_status, True) |
| 806 | self.subscriber.tearDownClass() |
| 807 | |
| 808 | def test_onos_cluster_with_cord_subscriber_joining_10channels_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES): |
| 809 | status = self.verify_cluster_status(onos_instances=onos_instances) |
| 810 | assert_equal(status, True) |
| 811 | master, standbys = self.get_cluster_current_master_standbys() |
| 812 | assert_equal(len(standbys),(onos_instances-1)) |
| 813 | onos_names_ips = self.get_cluster_container_names_ips() |
| 814 | member_onos_name = onos_names_ips[standbys[0]] |
| 815 | self.subscriber.setUpClass() |
| 816 | num_subscribers = 1 |
| 817 | num_channels = 10 |
| 818 | for i in [0,1]: |
| 819 | if i == 1: |
| 820 | cord_test_onos_shutdown(node = member_onos_name) |
| 821 | time.sleep(30) |
| 822 | status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True) |
| 823 | assert_equal(status, True) |
| 824 | log.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0]) |
| 825 | else: |
| 826 | log.info('Verifying cord subscriber functionality before cluster member %s is down'%standbys[0]) |
| 827 | test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers, |
| 828 | num_channels = num_channels, |
| 829 | cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify, |
| 830 | self.subscriber.igmp_verify, self.subscriber.traffic_verify), |
| 831 | port_list = self.subscriber.generate_port_list(num_subscribers, num_channels), |
| 832 | negative_subscriber_auth = 'all') |
| 833 | assert_equal(test_status, True) |
| 834 | self.subscriber.tearDownClass() |
| 835 | |
| 836 | def test_onos_cluster_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES): |
| 837 | status = self.verify_cluster_status(onos_instances=onos_instances) |
| 838 | assert_equal(status, True) |
| 839 | master, standbys = self.get_cluster_current_master_standbys() |
| 840 | assert_equal(len(standbys),(onos_instances-1)) |
| 841 | onos_names_ips = self.get_cluster_container_names_ips() |
| 842 | member1_onos_name = onos_names_ips[standbys[0]] |
| 843 | member2_onos_name = onos_names_ips[standbys[1]] |
| 844 | self.subscriber.setUpClass() |
| 845 | num_subscribers = 1 |
| 846 | num_channels = 10 |
| 847 | for i in [0,1]: |
| 848 | if i == 1: |
| 849 | cord_test_onos_shutdown(node = member1_onos_name) |
| 850 | cord_test_onos_shutdown(node = member2_onos_name) |
| 851 | time.sleep(60) |
| 852 | status = self.verify_cluster_status(onos_instances=onos_instances-2) |
| 853 | assert_equal(status, True) |
| 854 | log.info('Verifying cord subscriber funtionality after cluster two members %s and %s down'%(standbys[0],standbys[1])) |
| 855 | else: |
| 856 | log.info('Verifying cord subscriber funtionality before cluster two members %s and %s down'%(standbys[0],standbys[1])) |
| 857 | test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers, |
| 858 | num_channels = num_channels, |
| 859 | cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify, |
| 860 | self.subscriber.igmp_next_verify, self.subscriber.traffic_verify), |
| 861 | port_list = self.subscriber.generate_port_list(num_subscribers, num_channels), |
| 862 | negative_subscriber_auth = 'all') |
| 863 | assert_equal(test_status, True) |
| 864 | self.subscriber.tearDownClass() |
| 865 | |